<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF xmlns="http://purl.org/rss/1.0/"
 xmlns:dc="http://purl.org/dc/elements/1.1/"
 xmlns:dcterms="http://purl.org/dc/terms/"
 xmlns:cc="http://web.resource.org/cc/"
 xmlns:prism="http://prismstandard.org/namespaces/basic/2.0/"
 xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
 xmlns:admin="http://webns.net/mvcb/"
 xmlns:content="http://purl.org/rss/1.0/modules/content/">
    <channel rdf:about="https://www.mdpi.com/rss/journal/econometrics">
		<title>Econometrics</title>
		<description>Latest open access articles published in Econometrics at https://www.mdpi.com/journal/econometrics</description>
		<link>https://www.mdpi.com/journal/econometrics</link>
		<admin:generatorAgent rdf:resource="https://www.mdpi.com/journal/econometrics"/>
		<admin:errorReportsTo rdf:resource="mailto:support@mdpi.com"/>
		<dc:publisher>MDPI</dc:publisher>
		<dc:language>en</dc:language>
		<dc:rights>Creative Commons Attribution (CC-BY)</dc:rights>
						<prism:copyright>MDPI</prism:copyright>
		<prism:rightsAgent>support@mdpi.com</prism:rightsAgent>
		<image rdf:resource="https://pub.mdpi-res.com/img/design/mdpi-pub-logo.png?13cf3b5bd783e021?1778678334"/>
				<items>
			<rdf:Seq>
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/2/24" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/2/23" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/2/22" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/2/21" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/2/20" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/2/19" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/2/18" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/2/17" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/16" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/15" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/14" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/13" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/12" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/11" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/10" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/9" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/8" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/7" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/6" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/5" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/4" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/3" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/2" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/14/1/1" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/52" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/51" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/50" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/49" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/48" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/47" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/46" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/45" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/44" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/43" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/42" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/41" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/40" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/39" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/4/38" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/37" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/36" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/35" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/34" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/33" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/32" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/31" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/30" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/29" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/28" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/27" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/26" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/25" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/3/24" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/2/23" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/2/22" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/2/21" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/2/20" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/2/19" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/2/18" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/2/17" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/2/16" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/2/15" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/2/14" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/2/13" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/1/12" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/1/11" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/1/10" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/1/9" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/1/8" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/1/7" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/1/6" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/1/5" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/1/4" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/1/3" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/1/2" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/13/1/1" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/4/39" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/4/38" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/4/37" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/4/36" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/4/35" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/4/34" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/4/33" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/4/32" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/4/31" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/4/30" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/4/29" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/4/28" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/4/27" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/3/26" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/3/25" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/3/24" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/3/23" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/3/22" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/3/21" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/3/20" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/3/19" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/2/18" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/2/17" />
            				<rdf:li rdf:resource="https://www.mdpi.com/2225-1146/12/2/16" />
                    	</rdf:Seq>
		</items>
				<cc:license rdf:resource="https://creativecommons.org/licenses/by/4.0/" />
	</channel>

        <item rdf:about="https://www.mdpi.com/2225-1146/14/2/24">

	<title>Econometrics, Vol. 14, Pages 24: Measuring the Return to Online Advertising: Estimation and Inference of Endogenous Treatment Effects</title>
	<link>https://www.mdpi.com/2225-1146/14/2/24</link>
	<description>In this paper we aim to conduct inference on the &amp;amp;ldquo;lift&amp;amp;rdquo; effect generated by an online advertisement display: specifically we want to analyze if the presence of the brand ad among the advertisements on the page increases the overall number of consumer clicks on that page. A distinctive feature of online advertising is that the ad displays are highly targeted&amp;amp;mdash;the advertising platform evaluates the (unconditional) probability of each consumer clicking on a given ad, which leads to a higher probability of displaying the ads that have a higher a priori estimated probability of click. As a result, inferring thecausal effect of the ad display on the page clicks by a given consumer from typical observational data is difficult. To address this we propose a multi-step estimator that focuses on the tails of the consumer distribution to estimate the true causal effect of an ad display. This &amp;amp;ldquo;identification at infinity&amp;amp;rdquo; approach alleviates the need for independent experimental randomization but results in nonstandard asymptotic theory, motivating our novel inference method. To validate our results, we use a set of large-scale randomized controlled experiments that Microsoft has run on its advertising platform. Our dataset has a large number of observations and a large number of variables and we employ LASSO to perform variable selection. Providing a basis for comparison with our estimates, we use a study conducted by Microsoft with approximately 9.3 million search sessions focusing on consumer click behavior across search result pages of a major search engine. Randomized experiments indicate that displaying a brand advertisement increases the probability of visiting the advertiser&amp;amp;rsquo;s website by about 2.27 percentage points relative to a baseline visit rate of roughly 78 percent. Our non-experimental estimates exhibit broadly similar patterns to those obtained from randomized controlled trials, suggesting that the proposed observational estimator can recover qualitatively comparable treatment effects in large-scale advertising data.</description>
	<pubDate>2026-05-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 24: Measuring the Return to Online Advertising: Estimation and Inference of Endogenous Treatment Effects</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/2/24">doi: 10.3390/econometrics14020024</a></p>
	<p>Authors:
		Shakeeb Khan
		Denis Nekipelov
		Justin Rao
		</p>
	<p>In this paper we aim to conduct inference on the &amp;amp;ldquo;lift&amp;amp;rdquo; effect generated by an online advertisement display: specifically we want to analyze if the presence of the brand ad among the advertisements on the page increases the overall number of consumer clicks on that page. A distinctive feature of online advertising is that the ad displays are highly targeted&amp;amp;mdash;the advertising platform evaluates the (unconditional) probability of each consumer clicking on a given ad, which leads to a higher probability of displaying the ads that have a higher a priori estimated probability of click. As a result, inferring thecausal effect of the ad display on the page clicks by a given consumer from typical observational data is difficult. To address this we propose a multi-step estimator that focuses on the tails of the consumer distribution to estimate the true causal effect of an ad display. This &amp;amp;ldquo;identification at infinity&amp;amp;rdquo; approach alleviates the need for independent experimental randomization but results in nonstandard asymptotic theory, motivating our novel inference method. To validate our results, we use a set of large-scale randomized controlled experiments that Microsoft has run on its advertising platform. Our dataset has a large number of observations and a large number of variables and we employ LASSO to perform variable selection. Providing a basis for comparison with our estimates, we use a study conducted by Microsoft with approximately 9.3 million search sessions focusing on consumer click behavior across search result pages of a major search engine. Randomized experiments indicate that displaying a brand advertisement increases the probability of visiting the advertiser&amp;amp;rsquo;s website by about 2.27 percentage points relative to a baseline visit rate of roughly 78 percent. Our non-experimental estimates exhibit broadly similar patterns to those obtained from randomized controlled trials, suggesting that the proposed observational estimator can recover qualitatively comparable treatment effects in large-scale advertising data.</p>
	]]></content:encoded>

	<dc:title>Measuring the Return to Online Advertising: Estimation and Inference of Endogenous Treatment Effects</dc:title>
			<dc:creator>Shakeeb Khan</dc:creator>
			<dc:creator>Denis Nekipelov</dc:creator>
			<dc:creator>Justin Rao</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14020024</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-05-12</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-05-12</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>24</prism:startingPage>
		<prism:doi>10.3390/econometrics14020024</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/2/24</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/2/23">

	<title>Econometrics, Vol. 14, Pages 23: Internationalization and Financing Decisions of Chinese Enterprises: Evidence from Hong Kong Listings</title>
	<link>https://www.mdpi.com/2225-1146/14/2/23</link>
	<description>This study explores the impact of internationalization on the financing decisions and finance costs of Chinese enterprises listed in Hong Kong, extending the pecking order theory to an international context. Utilizing data from 785 companies from 2010 to 2020, the research investigates how the degree of internationalization influences corporate finance strategies, with a focus on the mediating role of the pecking order and the moderating effects of international business factors. The findings reveal that while broader internationalization increases finance costs, deeper internationalization reduces them. Legal distance is found to negatively moderate this relationship, whereas the structure of the financial system positively influences it. The results suggest that multinational enterprises with extensive overseas resource allocation demonstrate greater flexibility in financing decisions, particularly in foreign markets characterized by strong investor protection and efficient direct finance mechanisms. Managers should be cautious about pursuing wide geographic expansion without adequate operating depth because a broad but shallow international presence may increase financing frictions. By contrast, deeper resource commitment abroad can strengthen financing flexibility and improve access to lower-cost funds, especially when institutional conditions in the financing market are favorable.</description>
	<pubDate>2026-05-07</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 23: Internationalization and Financing Decisions of Chinese Enterprises: Evidence from Hong Kong Listings</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/2/23">doi: 10.3390/econometrics14020023</a></p>
	<p>Authors:
		Pujie Lin
		Tsz Leung Yip
		</p>
	<p>This study explores the impact of internationalization on the financing decisions and finance costs of Chinese enterprises listed in Hong Kong, extending the pecking order theory to an international context. Utilizing data from 785 companies from 2010 to 2020, the research investigates how the degree of internationalization influences corporate finance strategies, with a focus on the mediating role of the pecking order and the moderating effects of international business factors. The findings reveal that while broader internationalization increases finance costs, deeper internationalization reduces them. Legal distance is found to negatively moderate this relationship, whereas the structure of the financial system positively influences it. The results suggest that multinational enterprises with extensive overseas resource allocation demonstrate greater flexibility in financing decisions, particularly in foreign markets characterized by strong investor protection and efficient direct finance mechanisms. Managers should be cautious about pursuing wide geographic expansion without adequate operating depth because a broad but shallow international presence may increase financing frictions. By contrast, deeper resource commitment abroad can strengthen financing flexibility and improve access to lower-cost funds, especially when institutional conditions in the financing market are favorable.</p>
	]]></content:encoded>

	<dc:title>Internationalization and Financing Decisions of Chinese Enterprises: Evidence from Hong Kong Listings</dc:title>
			<dc:creator>Pujie Lin</dc:creator>
			<dc:creator>Tsz Leung Yip</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14020023</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-05-07</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-05-07</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>23</prism:startingPage>
		<prism:doi>10.3390/econometrics14020023</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/2/23</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/2/22">

	<title>Econometrics, Vol. 14, Pages 22: Estimation of Two-States Proportional Hazard Rates Models with Unobserved Heterogeneity</title>
	<link>https://www.mdpi.com/2225-1146/14/2/22</link>
	<description>This article examines two-state proportional hazard rate models with unobserved heterogeneity specific to each state, a framework that is especially relevant for labor market transitions. To make estimation feasible in large longitudinal datasets, we implement hshaz2s, a Stata routine that uses analytical expressions for the gradient vector and Hessian matrix of the log-likelihood function through the dual second-order moment (d2 ml) method. The empirical application estimates a discrete-time duration model for transitions between employment and unemployment using Spanish labor market microdata for young low-skilled workers over 2000&amp;amp;ndash;2019. The results show that apprenticeship contracts are associated with lower exit rates from employment than other temporary contracts, but not with faster transitions from unemployment back into employment. The estimates also reveal substantial state-specific unobserved heterogeneity, with a large latent group characterized by persistent spells in both states. Analytical second-order information also markedly reduces convergence time under richer heterogeneity structures. Overall, the article makes this class of two-state hazard models operational for applied research and provides new evidence on apprenticeship and temporary contracts in Spain.</description>
	<pubDate>2026-04-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 22: Estimation of Two-States Proportional Hazard Rates Models with Unobserved Heterogeneity</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/2/22">doi: 10.3390/econometrics14020022</a></p>
	<p>Authors:
		Emilio Congregado
		David Troncoso-Ponce
		Nicola Rubino
		Alejandro Morales-Kirioukhina
		</p>
	<p>This article examines two-state proportional hazard rate models with unobserved heterogeneity specific to each state, a framework that is especially relevant for labor market transitions. To make estimation feasible in large longitudinal datasets, we implement hshaz2s, a Stata routine that uses analytical expressions for the gradient vector and Hessian matrix of the log-likelihood function through the dual second-order moment (d2 ml) method. The empirical application estimates a discrete-time duration model for transitions between employment and unemployment using Spanish labor market microdata for young low-skilled workers over 2000&amp;amp;ndash;2019. The results show that apprenticeship contracts are associated with lower exit rates from employment than other temporary contracts, but not with faster transitions from unemployment back into employment. The estimates also reveal substantial state-specific unobserved heterogeneity, with a large latent group characterized by persistent spells in both states. Analytical second-order information also markedly reduces convergence time under richer heterogeneity structures. Overall, the article makes this class of two-state hazard models operational for applied research and provides new evidence on apprenticeship and temporary contracts in Spain.</p>
	]]></content:encoded>

	<dc:title>Estimation of Two-States Proportional Hazard Rates Models with Unobserved Heterogeneity</dc:title>
			<dc:creator>Emilio Congregado</dc:creator>
			<dc:creator>David Troncoso-Ponce</dc:creator>
			<dc:creator>Nicola Rubino</dc:creator>
			<dc:creator>Alejandro Morales-Kirioukhina</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14020022</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-04-28</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-04-28</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>22</prism:startingPage>
		<prism:doi>10.3390/econometrics14020022</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/2/22</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/2/21">

	<title>Econometrics, Vol. 14, Pages 21: Edgeworth Expansions When the Parameter Dimension Increases with Sample Size</title>
	<link>https://www.mdpi.com/2225-1146/14/2/21</link>
	<description>Suppose that we have a statistical model with q unknown parameters w, and an estimate w^, based on a sample of size n. A basic question is: what is the covariance of the estimate? The covariance is needed for the Central Limit Theorem (CLT). This gives a first approximation for the distribution of w^. But what if qn=n increases with n? How fast can it increase and the CLT still hold? An answer has so far only been given for the sample mean. The same is true for the Edgeworth expansions. These are expansions in powers of n&amp;amp;minus;1/2 for the density and distribution of w^. For fixed q, these expansions are important, as they show how small n can be for the CLT to apply. When it does, they can greatly improve the accuracy of the CLT. I give conditions that allow for the Edgeworth expansions to remain valid when qn=q increases with n. Earlier Edgeworth expansions when qn=q increases, have only been done for a sample mean, and only for a 2nd order Edgeworth expansion. In contrast, I consider a very large class of estimates, the class of non-lattice standard estimates. An estimate is said to be a standard estimate if its mean converges to its true value as n increases, and for r&amp;amp;ge;1, its rth order cumulants have magnitude n1&amp;amp;minus;r and can be expanded in powers of n&amp;amp;minus;1. For this class of estimates, I show that the Edgeworth expansions hold if qn grows as a power of n less than 1/6. That is, I give these expansions in powers of n&amp;amp;minus;1/2qn3. This large class of estimates has a huge range of potential applications, as estimates of high dimension are common in nearly all areas of applied statistics. The most important type of standard estimate is when w^ is a smooth function of a sample mean, of dimension p say. When either or both qn=q and pn=p increase with n, I give conditions on their growth for the Edgeworth expansions for w^ to remain valid: the eighth power of p times the sixth power of q cannot grow as fast as n. This holds for fixed q=qn if pn grows less than a power of n less than 1/8. This appears to be the first time when Edgeworth expansions have been given when not one, but two dimensions, are allowed to increase to &amp;amp;infin; with n. This gives two different pathways for allowing an increase in dimensionality. When q=1, I give 5th order Edgeworth-Cornish-Fisher expansions for the standardized distribution and its quantiles of any smooth function of a sample mean of dimension pn, when pn is a power of n less than 1/2. However for the special case when this function is linear, there is no restriction whatever on how fast pn can increase! If also the components of the sample mean are independent, then these expansions are in powers of (np)&amp;amp;minus;1/2. I also give a method that greatly reduces the number of terms needed for the 2nd and 3rd order terms in the Edgeworth expansions, that is, for the 1st and 2nd order corrections to the CLTs. I also extend these results to the case where w^&amp;amp;isin;Rq is a function of several independent sample means, each of dimension increasing with n, with total dimension p.</description>
	<pubDate>2026-04-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 21: Edgeworth Expansions When the Parameter Dimension Increases with Sample Size</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/2/21">doi: 10.3390/econometrics14020021</a></p>
	<p>Authors:
		Christopher Stroude Withers
		</p>
	<p>Suppose that we have a statistical model with q unknown parameters w, and an estimate w^, based on a sample of size n. A basic question is: what is the covariance of the estimate? The covariance is needed for the Central Limit Theorem (CLT). This gives a first approximation for the distribution of w^. But what if qn=n increases with n? How fast can it increase and the CLT still hold? An answer has so far only been given for the sample mean. The same is true for the Edgeworth expansions. These are expansions in powers of n&amp;amp;minus;1/2 for the density and distribution of w^. For fixed q, these expansions are important, as they show how small n can be for the CLT to apply. When it does, they can greatly improve the accuracy of the CLT. I give conditions that allow for the Edgeworth expansions to remain valid when qn=q increases with n. Earlier Edgeworth expansions when qn=q increases, have only been done for a sample mean, and only for a 2nd order Edgeworth expansion. In contrast, I consider a very large class of estimates, the class of non-lattice standard estimates. An estimate is said to be a standard estimate if its mean converges to its true value as n increases, and for r&amp;amp;ge;1, its rth order cumulants have magnitude n1&amp;amp;minus;r and can be expanded in powers of n&amp;amp;minus;1. For this class of estimates, I show that the Edgeworth expansions hold if qn grows as a power of n less than 1/6. That is, I give these expansions in powers of n&amp;amp;minus;1/2qn3. This large class of estimates has a huge range of potential applications, as estimates of high dimension are common in nearly all areas of applied statistics. The most important type of standard estimate is when w^ is a smooth function of a sample mean, of dimension p say. When either or both qn=q and pn=p increase with n, I give conditions on their growth for the Edgeworth expansions for w^ to remain valid: the eighth power of p times the sixth power of q cannot grow as fast as n. This holds for fixed q=qn if pn grows less than a power of n less than 1/8. This appears to be the first time when Edgeworth expansions have been given when not one, but two dimensions, are allowed to increase to &amp;amp;infin; with n. This gives two different pathways for allowing an increase in dimensionality. When q=1, I give 5th order Edgeworth-Cornish-Fisher expansions for the standardized distribution and its quantiles of any smooth function of a sample mean of dimension pn, when pn is a power of n less than 1/2. However for the special case when this function is linear, there is no restriction whatever on how fast pn can increase! If also the components of the sample mean are independent, then these expansions are in powers of (np)&amp;amp;minus;1/2. I also give a method that greatly reduces the number of terms needed for the 2nd and 3rd order terms in the Edgeworth expansions, that is, for the 1st and 2nd order corrections to the CLTs. I also extend these results to the case where w^&amp;amp;isin;Rq is a function of several independent sample means, each of dimension increasing with n, with total dimension p.</p>
	]]></content:encoded>

	<dc:title>Edgeworth Expansions When the Parameter Dimension Increases with Sample Size</dc:title>
			<dc:creator>Christopher Stroude Withers</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14020021</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-04-27</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-04-27</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>21</prism:startingPage>
		<prism:doi>10.3390/econometrics14020021</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/2/21</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/2/20">

	<title>Econometrics, Vol. 14, Pages 20: Fuzzy Approach to Analysis of Investment Alternatives</title>
	<link>https://www.mdpi.com/2225-1146/14/2/20</link>
	<description>With significant market unsureness, &amp;amp;ldquo;static&amp;amp;rdquo; methods fail to account for economic uncertainty, may be less precise and, accordingly, less helpful when selecting investment alternatives. Methods that take into account the current economic situation and allow for adapting the alternative selection to external uncertainty are becoming more relevant. One of such methods is the fuzzy set theory. This article addresses the mathematical framework of such an approach for the economic analysis of investment project selection. A step-by-step scheme for implementing the fuzzy set method for investment projects is presented. Studies performed on the example of three investment alternatives give grounds for asserting the compatibility and feasibility of using two methods (the fuzzy set method may be partly based on the results of pairwise comparisons of experts according to the Saaty method) and confirmation or refutation of previous intuitive decisions of investors based on a comprehensive analysis of the criterion composition and the use of mathematical grounded technique.</description>
	<pubDate>2026-04-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 20: Fuzzy Approach to Analysis of Investment Alternatives</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/2/20">doi: 10.3390/econometrics14020020</a></p>
	<p>Authors:
		Tamara Kyrylych
		Yuriy Povstenko
		</p>
	<p>With significant market unsureness, &amp;amp;ldquo;static&amp;amp;rdquo; methods fail to account for economic uncertainty, may be less precise and, accordingly, less helpful when selecting investment alternatives. Methods that take into account the current economic situation and allow for adapting the alternative selection to external uncertainty are becoming more relevant. One of such methods is the fuzzy set theory. This article addresses the mathematical framework of such an approach for the economic analysis of investment project selection. A step-by-step scheme for implementing the fuzzy set method for investment projects is presented. Studies performed on the example of three investment alternatives give grounds for asserting the compatibility and feasibility of using two methods (the fuzzy set method may be partly based on the results of pairwise comparisons of experts according to the Saaty method) and confirmation or refutation of previous intuitive decisions of investors based on a comprehensive analysis of the criterion composition and the use of mathematical grounded technique.</p>
	]]></content:encoded>

	<dc:title>Fuzzy Approach to Analysis of Investment Alternatives</dc:title>
			<dc:creator>Tamara Kyrylych</dc:creator>
			<dc:creator>Yuriy Povstenko</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14020020</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-04-13</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-04-13</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>20</prism:startingPage>
		<prism:doi>10.3390/econometrics14020020</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/2/20</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/2/19">

	<title>Econometrics, Vol. 14, Pages 19: When Better Prediction Reduces Overlap: The Predictability Paradox in Propensity Score Matching with Machine Learning</title>
	<link>https://www.mdpi.com/2225-1146/14/2/19</link>
	<description>Evidence from observational studies plays a central role in shaping public policy in health, education, and financial regulation, where randomized experiments are rarely feasible. Propensity score matching (PSM) is a widely used method to approximate fair comparisons between treatment and control groups. Incorporating machine learning into the estimation of propensity scores can strengthen prediction and enhance the credibility of findings. However, stronger predictive models create a &amp;amp;ldquo;predictability paradox&amp;amp;rdquo;. As predictive accuracy improves, estimated propensity scores for treated and control units become more distinct when treatment assignment is strongly predictable from observed covariates, revealing limited overlap between groups. In the limit, near-perfect prediction produces near-complete separation between groups, rendering traditional matching infeasible and confining inference to a narrow subset of units near the boundary of the propensity score distribution, a setting analogous to a regression discontinuity design (RDD). Researchers thus face perverse incentives to use weaker models for statistically significant but spurious results. These dynamics jeopardize the reliability of evidence for policy. To safeguard decision-making, we propose a simple reform: require that studies using PSM disclose model error rates, including false positive and false negative rates, along with information on overlap and effective sample size.</description>
	<pubDate>2026-04-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 19: When Better Prediction Reduces Overlap: The Predictability Paradox in Propensity Score Matching with Machine Learning</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/2/19">doi: 10.3390/econometrics14020019</a></p>
	<p>Authors:
		Foong Soon Cheong
		</p>
	<p>Evidence from observational studies plays a central role in shaping public policy in health, education, and financial regulation, where randomized experiments are rarely feasible. Propensity score matching (PSM) is a widely used method to approximate fair comparisons between treatment and control groups. Incorporating machine learning into the estimation of propensity scores can strengthen prediction and enhance the credibility of findings. However, stronger predictive models create a &amp;amp;ldquo;predictability paradox&amp;amp;rdquo;. As predictive accuracy improves, estimated propensity scores for treated and control units become more distinct when treatment assignment is strongly predictable from observed covariates, revealing limited overlap between groups. In the limit, near-perfect prediction produces near-complete separation between groups, rendering traditional matching infeasible and confining inference to a narrow subset of units near the boundary of the propensity score distribution, a setting analogous to a regression discontinuity design (RDD). Researchers thus face perverse incentives to use weaker models for statistically significant but spurious results. These dynamics jeopardize the reliability of evidence for policy. To safeguard decision-making, we propose a simple reform: require that studies using PSM disclose model error rates, including false positive and false negative rates, along with information on overlap and effective sample size.</p>
	]]></content:encoded>

	<dc:title>When Better Prediction Reduces Overlap: The Predictability Paradox in Propensity Score Matching with Machine Learning</dc:title>
			<dc:creator>Foong Soon Cheong</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14020019</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-04-01</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-04-01</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>19</prism:startingPage>
		<prism:doi>10.3390/econometrics14020019</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/2/19</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/2/18">

	<title>Econometrics, Vol. 14, Pages 18: Propensity Score and the Double Robust Estimator in the Tails</title>
	<link>https://www.mdpi.com/2225-1146/14/2/18</link>
	<description>This study analyzes the performance of the double robust estimator to compute the treatment effect, not only at the mean but also in the tails in a Monte Carlo experiment. While previous research focused on shifting the regression component of the double robust estimator toward the tail, here we focus on the behavior of the propensity score away from the mean. Investigating the tails of the regression outcome allows for a closer look at the observations that are either highly or poorly responsive to treatment. Examining the tails of the propensity score distribution scrutinizes the observations with a higher or lower probability of being treated, which can be non-constant and even asymmetric. The goal is to assess the behavior of the double robust estimator when both components are computed away from the sample mean, in the tails of the treatment and control distributions. A case study on Italian education concludes the analysis. We find a positive double robust difference in higher education across regions, larger at the top location, due to the significant internal migration of qualified workers toward the northern regions. Women&amp;amp;rsquo;s employment is higher for highly educated women, and gender has a significant impact: the analysis of the mismatch between probabilities and outcomes signals that women achieve higher education at rates exceeding their probabilities; they are more likely to exceed their predicted likelihood of attaining higher education.</description>
	<pubDate>2026-03-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 18: Propensity Score and the Double Robust Estimator in the Tails</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/2/18">doi: 10.3390/econometrics14020018</a></p>
	<p>Authors:
		Marilena Furno
		</p>
	<p>This study analyzes the performance of the double robust estimator to compute the treatment effect, not only at the mean but also in the tails in a Monte Carlo experiment. While previous research focused on shifting the regression component of the double robust estimator toward the tail, here we focus on the behavior of the propensity score away from the mean. Investigating the tails of the regression outcome allows for a closer look at the observations that are either highly or poorly responsive to treatment. Examining the tails of the propensity score distribution scrutinizes the observations with a higher or lower probability of being treated, which can be non-constant and even asymmetric. The goal is to assess the behavior of the double robust estimator when both components are computed away from the sample mean, in the tails of the treatment and control distributions. A case study on Italian education concludes the analysis. We find a positive double robust difference in higher education across regions, larger at the top location, due to the significant internal migration of qualified workers toward the northern regions. Women&amp;amp;rsquo;s employment is higher for highly educated women, and gender has a significant impact: the analysis of the mismatch between probabilities and outcomes signals that women achieve higher education at rates exceeding their probabilities; they are more likely to exceed their predicted likelihood of attaining higher education.</p>
	]]></content:encoded>

	<dc:title>Propensity Score and the Double Robust Estimator in the Tails</dc:title>
			<dc:creator>Marilena Furno</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14020018</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-03-31</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-03-31</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>18</prism:startingPage>
		<prism:doi>10.3390/econometrics14020018</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/2/18</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/2/17">

	<title>Econometrics, Vol. 14, Pages 17: Nonparametric Autoregressive Copula Forecasting via Boundary-Reflected Kernel Estimation</title>
	<link>https://www.mdpi.com/2225-1146/14/2/17</link>
	<description>We propose a fully nonparametric empirical autoregressive copula framework for univariate time series, designed to capture nonlinear and asymmetric serial dependence while exactly preserving the empirical marginal distribution. The method decouples marginal behavior from temporal dependence by (i) constructing a shape-preserving empirical marginal via monotone interpolation and mapping observations to the unit interval, and (ii) estimating the lag&amp;amp;ndash;lead dependence through a nonparametric conditional AR(1) copula density on (0,1)2. To ensure stable estimation near the boundaries, we employ reflection-based kernel methods that mitigate edge effects and yield well-behaved conditional densities on the unit support. Forecasts are obtained from the implied conditional predictive density: we compute point forecasts either as conditional modes (maximum a posteriori) on the copula scale or as conditional means, and then back-transform exactly using the empirical quantile function, guaranteeing marginal fidelity and support-respecting predictions. Empirically, we evaluate the approach on three CBOE volatility indices (VIX, VXD, and RVX) and benchmark it against linear ARMA models, copula-based parametric competitors, and state-space/heteroskedasticity baselines (Local level, TVP&amp;amp;ndash;AR, and ARMA&amp;amp;ndash;GARCH). The results highlight that modeling the full conditional transition density nonparametrically can deliver competitive&amp;amp;mdash;often best or near-best&amp;amp;mdash;forecast accuracy across horizons, particularly in the presence of pronounced volatility regimes and asymmetric adjustments.</description>
	<pubDate>2026-03-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 17: Nonparametric Autoregressive Copula Forecasting via Boundary-Reflected Kernel Estimation</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/2/17">doi: 10.3390/econometrics14020017</a></p>
	<p>Authors:
		Guilherme Colombo Soares
		Márcio Poletti Laurini
		</p>
	<p>We propose a fully nonparametric empirical autoregressive copula framework for univariate time series, designed to capture nonlinear and asymmetric serial dependence while exactly preserving the empirical marginal distribution. The method decouples marginal behavior from temporal dependence by (i) constructing a shape-preserving empirical marginal via monotone interpolation and mapping observations to the unit interval, and (ii) estimating the lag&amp;amp;ndash;lead dependence through a nonparametric conditional AR(1) copula density on (0,1)2. To ensure stable estimation near the boundaries, we employ reflection-based kernel methods that mitigate edge effects and yield well-behaved conditional densities on the unit support. Forecasts are obtained from the implied conditional predictive density: we compute point forecasts either as conditional modes (maximum a posteriori) on the copula scale or as conditional means, and then back-transform exactly using the empirical quantile function, guaranteeing marginal fidelity and support-respecting predictions. Empirically, we evaluate the approach on three CBOE volatility indices (VIX, VXD, and RVX) and benchmark it against linear ARMA models, copula-based parametric competitors, and state-space/heteroskedasticity baselines (Local level, TVP&amp;amp;ndash;AR, and ARMA&amp;amp;ndash;GARCH). The results highlight that modeling the full conditional transition density nonparametrically can deliver competitive&amp;amp;mdash;often best or near-best&amp;amp;mdash;forecast accuracy across horizons, particularly in the presence of pronounced volatility regimes and asymmetric adjustments.</p>
	]]></content:encoded>

	<dc:title>Nonparametric Autoregressive Copula Forecasting via Boundary-Reflected Kernel Estimation</dc:title>
			<dc:creator>Guilherme Colombo Soares</dc:creator>
			<dc:creator>Márcio Poletti Laurini</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14020017</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-03-28</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-03-28</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>17</prism:startingPage>
		<prism:doi>10.3390/econometrics14020017</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/2/17</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/16">

	<title>Econometrics, Vol. 14, Pages 16: Navigating Extreme Market Fluctuations: Asset Allocation Strategies in Developed vs. Emerging Economies</title>
	<link>https://www.mdpi.com/2225-1146/14/1/16</link>
	<description>This paper examines how assets from emerging and developed stock markets can be efficiently allocated during periods of financial crisis by integrating traditional portfolio theory with Extreme Value Theory (EVT), using the Generalized Pareto Distribution (GPD) and Generalized Extreme Value (GEV) approaches to model tail risks. This study evaluates mean-variance portfolios constructed under each EVT framework and finds that portfolios based on GPD estimates consistently favour emerging market assets, which outperform both developed market and internationally diversified portfolios during extreme market conditions. In contrast, GEV-based portfolios indicate superior performance for developed market assets, highlighting the distinct behaviour of returns in the upper and lower tails of the distribution. These contrasting results reveal the unique nature of safe-haven characteristics associated with developed economies, the assets of which demonstrate greater stability and resilience during episodes of financial stress. By showing how tail-risk modelling alters optimal portfolio weights across market types, this paper contributes new evidence to the literature on crisis-informed asset allocation and offers practical insights for investors seeking robust diversification strategies under extreme market fluctuations.</description>
	<pubDate>2026-03-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 16: Navigating Extreme Market Fluctuations: Asset Allocation Strategies in Developed vs. Emerging Economies</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/16">doi: 10.3390/econometrics14010016</a></p>
	<p>Authors:
		Lumengo Bonga-Bonga
		</p>
	<p>This paper examines how assets from emerging and developed stock markets can be efficiently allocated during periods of financial crisis by integrating traditional portfolio theory with Extreme Value Theory (EVT), using the Generalized Pareto Distribution (GPD) and Generalized Extreme Value (GEV) approaches to model tail risks. This study evaluates mean-variance portfolios constructed under each EVT framework and finds that portfolios based on GPD estimates consistently favour emerging market assets, which outperform both developed market and internationally diversified portfolios during extreme market conditions. In contrast, GEV-based portfolios indicate superior performance for developed market assets, highlighting the distinct behaviour of returns in the upper and lower tails of the distribution. These contrasting results reveal the unique nature of safe-haven characteristics associated with developed economies, the assets of which demonstrate greater stability and resilience during episodes of financial stress. By showing how tail-risk modelling alters optimal portfolio weights across market types, this paper contributes new evidence to the literature on crisis-informed asset allocation and offers practical insights for investors seeking robust diversification strategies under extreme market fluctuations.</p>
	]]></content:encoded>

	<dc:title>Navigating Extreme Market Fluctuations: Asset Allocation Strategies in Developed vs. Emerging Economies</dc:title>
			<dc:creator>Lumengo Bonga-Bonga</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010016</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-03-17</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-03-17</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>16</prism:startingPage>
		<prism:doi>10.3390/econometrics14010016</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/16</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/15">

	<title>Econometrics, Vol. 14, Pages 15: Double-Edged Sword of Diversification: Commodities and African Equity Indices in Robust vs. Optimal Portfolio Strategies</title>
	<link>https://www.mdpi.com/2225-1146/14/1/15</link>
	<description>This study empirically investigates a central tension in quantitative finance: the divergence between theoretically optimal and robust portfolio construction under real-world estimation uncertainty. Using a dynamic, time-varying optimization framework, we compare the performance of three distinct strategies: the Maximum Sharpe ratio (P1), Minimum Variance (P2), and Maximum Entropy (P3) portfolios, with and without commodity proxy inclusion (gold and oil) in a multi-asset universe featuring prominent African equity indices. Our key finding challenges classical theory: the robust Maximum Entropy portfolio (P3) achieved superior realized risk-adjusted returns (Sharpe ratio: 1.164) compared to the theoretically optimal Maximum Sharpe portfolio (P1, Sharpe: 0.788). This result validates the &amp;amp;ldquo;estimation-error maximization&amp;amp;rdquo; critique, as P1&amp;amp;rsquo;s performance was undermined by its sensitivity to noisy inputs. Conversely, the Minimum Variance portfolio (P2) successfully fulfilled its objective, achieving the lowest volatility (~5%) at the cost of modest returns (3.01&amp;amp;ndash;3.64%), illustrating the classic risk&amp;amp;ndash;return trade-off. Euler decomposition revealed that even this low-volatility portfolio exhibited significant concentration risk, with over 40% of its risk attributable to just three assets. The role of commodities is proven to be strategy contingent. They significantly enhanced returns and the Sharpe ratio for the aggressive P1 but were marginally detrimental to the robust P3. African market indices played specialized roles: Egypt and Nigeria acted as return drivers in P1, Morocco became a major risk contributor within the concentrated P2 strategy, and South Africa provided key diversification in the well-balanced P3. Ultimately, the study demonstrates that portfolio risk is determined more by asset concentration and diversification quality than by geographic labels, and that robust diversification methodologies outperform fragile theoretical optima in practice. We conclude that portfolio construction must prioritize robustness to estimation error and explicit risk-balancing to ensure stable, real-world performance.</description>
	<pubDate>2026-03-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 15: Double-Edged Sword of Diversification: Commodities and African Equity Indices in Robust vs. Optimal Portfolio Strategies</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/15">doi: 10.3390/econometrics14010015</a></p>
	<p>Authors:
		Anaclet K. Kitenge
		John W. M. Mwamba
		Jules C. Mba
		</p>
	<p>This study empirically investigates a central tension in quantitative finance: the divergence between theoretically optimal and robust portfolio construction under real-world estimation uncertainty. Using a dynamic, time-varying optimization framework, we compare the performance of three distinct strategies: the Maximum Sharpe ratio (P1), Minimum Variance (P2), and Maximum Entropy (P3) portfolios, with and without commodity proxy inclusion (gold and oil) in a multi-asset universe featuring prominent African equity indices. Our key finding challenges classical theory: the robust Maximum Entropy portfolio (P3) achieved superior realized risk-adjusted returns (Sharpe ratio: 1.164) compared to the theoretically optimal Maximum Sharpe portfolio (P1, Sharpe: 0.788). This result validates the &amp;amp;ldquo;estimation-error maximization&amp;amp;rdquo; critique, as P1&amp;amp;rsquo;s performance was undermined by its sensitivity to noisy inputs. Conversely, the Minimum Variance portfolio (P2) successfully fulfilled its objective, achieving the lowest volatility (~5%) at the cost of modest returns (3.01&amp;amp;ndash;3.64%), illustrating the classic risk&amp;amp;ndash;return trade-off. Euler decomposition revealed that even this low-volatility portfolio exhibited significant concentration risk, with over 40% of its risk attributable to just three assets. The role of commodities is proven to be strategy contingent. They significantly enhanced returns and the Sharpe ratio for the aggressive P1 but were marginally detrimental to the robust P3. African market indices played specialized roles: Egypt and Nigeria acted as return drivers in P1, Morocco became a major risk contributor within the concentrated P2 strategy, and South Africa provided key diversification in the well-balanced P3. Ultimately, the study demonstrates that portfolio risk is determined more by asset concentration and diversification quality than by geographic labels, and that robust diversification methodologies outperform fragile theoretical optima in practice. We conclude that portfolio construction must prioritize robustness to estimation error and explicit risk-balancing to ensure stable, real-world performance.</p>
	]]></content:encoded>

	<dc:title>Double-Edged Sword of Diversification: Commodities and African Equity Indices in Robust vs. Optimal Portfolio Strategies</dc:title>
			<dc:creator>Anaclet K. Kitenge</dc:creator>
			<dc:creator>John W. M. Mwamba</dc:creator>
			<dc:creator>Jules C. Mba</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010015</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-03-16</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-03-16</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>15</prism:startingPage>
		<prism:doi>10.3390/econometrics14010015</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/15</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/14">

	<title>Econometrics, Vol. 14, Pages 14: A New Functional Setting for Term Structure Modeling Using the Heath&amp;ndash;Jarrow&amp;ndash;Morton Framework</title>
	<link>https://www.mdpi.com/2225-1146/14/1/14</link>
	<description>The well-known Heath&amp;amp;ndash;Jarrow&amp;amp;ndash;Morton (HJM) framework provides a universal and efficacious instrument for modeling the stochastic evolution of an entire yield curve by explaining the interest rate dynamics in continuous time under no-arbitrage conditions. Existing implementations involve exponentially weighted function spaces as theoretical settings for the former stochastic evolution. While the choice of weight can have a drastic effect on model calibration and subsequent forecasting, it cannot be estimated from market data and does not allow for any objective interpretation. The proposed approach does not have this shortcoming as it adopts a suitably designed unweighted function space. The HJM equation is discretized using a finite difference approach. The resulting semiparametric model is then calibrated on real-world yield data with a new type of functional principal component analysis (PCA)-based approach. Backtesting and benchmarking are conducted against the one-factor Vasicek model using historical data to illustrate its simulation capabilities for prediction and uncertainty quantification. Additionally, in contrast to widely studied US treasuries, negative interest rates are observed for AAA Euro Bonds during the sample period employed for this study. Accordingly, the framework allows for the possibility of negative yields.</description>
	<pubDate>2026-03-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 14: A New Functional Setting for Term Structure Modeling Using the Heath&amp;ndash;Jarrow&amp;ndash;Morton Framework</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/14">doi: 10.3390/econometrics14010014</a></p>
	<p>Authors:
		Michael Pokojovy
		Ebenezer Nkum
		Thomas M. Fullerton
		</p>
	<p>The well-known Heath&amp;amp;ndash;Jarrow&amp;amp;ndash;Morton (HJM) framework provides a universal and efficacious instrument for modeling the stochastic evolution of an entire yield curve by explaining the interest rate dynamics in continuous time under no-arbitrage conditions. Existing implementations involve exponentially weighted function spaces as theoretical settings for the former stochastic evolution. While the choice of weight can have a drastic effect on model calibration and subsequent forecasting, it cannot be estimated from market data and does not allow for any objective interpretation. The proposed approach does not have this shortcoming as it adopts a suitably designed unweighted function space. The HJM equation is discretized using a finite difference approach. The resulting semiparametric model is then calibrated on real-world yield data with a new type of functional principal component analysis (PCA)-based approach. Backtesting and benchmarking are conducted against the one-factor Vasicek model using historical data to illustrate its simulation capabilities for prediction and uncertainty quantification. Additionally, in contrast to widely studied US treasuries, negative interest rates are observed for AAA Euro Bonds during the sample period employed for this study. Accordingly, the framework allows for the possibility of negative yields.</p>
	]]></content:encoded>

	<dc:title>A New Functional Setting for Term Structure Modeling Using the Heath&amp;amp;ndash;Jarrow&amp;amp;ndash;Morton Framework</dc:title>
			<dc:creator>Michael Pokojovy</dc:creator>
			<dc:creator>Ebenezer Nkum</dc:creator>
			<dc:creator>Thomas M. Fullerton</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010014</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-03-11</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-03-11</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>14</prism:startingPage>
		<prism:doi>10.3390/econometrics14010014</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/14</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/13">

	<title>Econometrics, Vol. 14, Pages 13: Analysis of School Absenteeism for Single- vs. Two-Parent Families: A Finite Mixture Roy Approach</title>
	<link>https://www.mdpi.com/2225-1146/14/1/13</link>
	<description>This paper analyzes factors affecting school absenteeism due to an injury or illness among the US school student population between 6 and 15 years of age. The number of missed school days displays overdispersion and is modeled using the Finite Mixture Roy (FMR) model for count variables. The married/single parent family status (treatment) is potentially endogenous to the dependent variable (missed days). The Roy structure controls observed heterogeneity due to the mother&amp;amp;rsquo;s marital status. Finite mixtures are intended to control unobserved heterogeneity due to healthy and unhealthy children in the sample. This approach facilitates identification of latent subpopulations in which treatment and marginal effects are relatively homogeneous. The model also incorporates two application-driven extensions. First, probabilities of the latent components are modeled as functions of regressors. Secondly, the mother&amp;amp;rsquo;s income affects treatment nonparametrically. The FMR model is estimated with two latent components in each state, corresponding to healthy and unhealthy students. The results indicate that maternal marital status decreases annual missed school days by approximately 13 percent for a randomly drawn child; however, this increases absenteeism by about 14 percent among families that self-select into two-parent households, which is evidence of adverse selection.</description>
	<pubDate>2026-03-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 13: Analysis of School Absenteeism for Single- vs. Two-Parent Families: A Finite Mixture Roy Approach</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/13">doi: 10.3390/econometrics14010013</a></p>
	<p>Authors:
		Murat K. Munkin
		David Zimmer
		</p>
	<p>This paper analyzes factors affecting school absenteeism due to an injury or illness among the US school student population between 6 and 15 years of age. The number of missed school days displays overdispersion and is modeled using the Finite Mixture Roy (FMR) model for count variables. The married/single parent family status (treatment) is potentially endogenous to the dependent variable (missed days). The Roy structure controls observed heterogeneity due to the mother&amp;amp;rsquo;s marital status. Finite mixtures are intended to control unobserved heterogeneity due to healthy and unhealthy children in the sample. This approach facilitates identification of latent subpopulations in which treatment and marginal effects are relatively homogeneous. The model also incorporates two application-driven extensions. First, probabilities of the latent components are modeled as functions of regressors. Secondly, the mother&amp;amp;rsquo;s income affects treatment nonparametrically. The FMR model is estimated with two latent components in each state, corresponding to healthy and unhealthy students. The results indicate that maternal marital status decreases annual missed school days by approximately 13 percent for a randomly drawn child; however, this increases absenteeism by about 14 percent among families that self-select into two-parent households, which is evidence of adverse selection.</p>
	]]></content:encoded>

	<dc:title>Analysis of School Absenteeism for Single- vs. Two-Parent Families: A Finite Mixture Roy Approach</dc:title>
			<dc:creator>Murat K. Munkin</dc:creator>
			<dc:creator>David Zimmer</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010013</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-03-09</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-03-09</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>13</prism:startingPage>
		<prism:doi>10.3390/econometrics14010013</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/13</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/12">

	<title>Econometrics, Vol. 14, Pages 12: Using Subspace Algorithms for the Estimation of Linear State Space Models for Over-Differenced Processes</title>
	<link>https://www.mdpi.com/2225-1146/14/1/12</link>
	<description>Subspace algorithms like canonical variate analysis (CVA) are regression-based methods for the estimation of linear dynamic state space models. They have been shown to deliver accurate (consistent and asymptotically equivalent to quasi-maximum likelihood estimation using the Gaussian likelihood) estimators for stably invertible stationary autoregressive moving average (ARMA) processes. These results use the assumption that there are no zeros of the spectral density on the unit circle corresponding to the state space system. In this technical study, we consider vector processes made stationary by applying differencing to all variables, ignoring potential co-integrating relations. This leads to spectral zeros violating the above mentioned assumptions. We show consistency for the CVA estimators, closing a gap in the literature. However, a simulation exercise shows that over-differencing (while leading to consistent estimation of the transfer function) also complicates inference for CVA estimators, not just maximum likelihood-based estimators. This is also demonstrated in a real-world data example. The result also applies to seasonal differencing. The present paper hence suggests working with original data, not working in differences.</description>
	<pubDate>2026-02-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 12: Using Subspace Algorithms for the Estimation of Linear State Space Models for Over-Differenced Processes</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/12">doi: 10.3390/econometrics14010012</a></p>
	<p>Authors:
		Dietmar Bauer
		</p>
	<p>Subspace algorithms like canonical variate analysis (CVA) are regression-based methods for the estimation of linear dynamic state space models. They have been shown to deliver accurate (consistent and asymptotically equivalent to quasi-maximum likelihood estimation using the Gaussian likelihood) estimators for stably invertible stationary autoregressive moving average (ARMA) processes. These results use the assumption that there are no zeros of the spectral density on the unit circle corresponding to the state space system. In this technical study, we consider vector processes made stationary by applying differencing to all variables, ignoring potential co-integrating relations. This leads to spectral zeros violating the above mentioned assumptions. We show consistency for the CVA estimators, closing a gap in the literature. However, a simulation exercise shows that over-differencing (while leading to consistent estimation of the transfer function) also complicates inference for CVA estimators, not just maximum likelihood-based estimators. This is also demonstrated in a real-world data example. The result also applies to seasonal differencing. The present paper hence suggests working with original data, not working in differences.</p>
	]]></content:encoded>

	<dc:title>Using Subspace Algorithms for the Estimation of Linear State Space Models for Over-Differenced Processes</dc:title>
			<dc:creator>Dietmar Bauer</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010012</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-02-28</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-02-28</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>12</prism:startingPage>
		<prism:doi>10.3390/econometrics14010012</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/12</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/11">

	<title>Econometrics, Vol. 14, Pages 11: Graph Attention Networks in Exchange Rate Forecasting</title>
	<link>https://www.mdpi.com/2225-1146/14/1/11</link>
	<description>Exchange rate forecasting is an important issue in financial market analysis. Currency rates form a dynamic network of connections that can be efficiently modeled using graph neural networks (GNNs). The key mechanism of GNNs is the message passing between nodes, allowing for better modeling of currency interactions. Each node updates its representation by aggregating features from its neighbors and combining them with its own. In convolutional graph neural networks (GCNs), all neighboring nodes are treated equally, but in reality, some may have a greater influence than others. To account for this changing importance of neighbors, graph attention networks (GAT) have been introduced. The aim of the study was to evaluate the effectiveness of GAT in forecasting exchange rates. The analysis covered time series of major world currencies from 2020 to 2024. The forecasting results obtained using GAT were compared with those obtained from benchmark models such as ARIMA, GARCH, MLP, GCN, and LSTM-GCN. The study showed that GAT networks outperform numerous methods. The results may have practical applications, supporting investors and analysts in decision-making.</description>
	<pubDate>2026-02-25</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 11: Graph Attention Networks in Exchange Rate Forecasting</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/11">doi: 10.3390/econometrics14010011</a></p>
	<p>Authors:
		Joanna Landmesser-Rusek
		Arkadiusz Orłowski
		</p>
	<p>Exchange rate forecasting is an important issue in financial market analysis. Currency rates form a dynamic network of connections that can be efficiently modeled using graph neural networks (GNNs). The key mechanism of GNNs is the message passing between nodes, allowing for better modeling of currency interactions. Each node updates its representation by aggregating features from its neighbors and combining them with its own. In convolutional graph neural networks (GCNs), all neighboring nodes are treated equally, but in reality, some may have a greater influence than others. To account for this changing importance of neighbors, graph attention networks (GAT) have been introduced. The aim of the study was to evaluate the effectiveness of GAT in forecasting exchange rates. The analysis covered time series of major world currencies from 2020 to 2024. The forecasting results obtained using GAT were compared with those obtained from benchmark models such as ARIMA, GARCH, MLP, GCN, and LSTM-GCN. The study showed that GAT networks outperform numerous methods. The results may have practical applications, supporting investors and analysts in decision-making.</p>
	]]></content:encoded>

	<dc:title>Graph Attention Networks in Exchange Rate Forecasting</dc:title>
			<dc:creator>Joanna Landmesser-Rusek</dc:creator>
			<dc:creator>Arkadiusz Orłowski</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010011</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-02-25</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-02-25</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>11</prism:startingPage>
		<prism:doi>10.3390/econometrics14010011</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/11</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/10">

	<title>Econometrics, Vol. 14, Pages 10: Application of Resolution Regression and Resolution Graphs in Evaluating Probability Forecasts Generated Using Binary Choice Models</title>
	<link>https://www.mdpi.com/2225-1146/14/1/10</link>
	<description>Binary choice models are widely used in econometric modeling when the dependent variable corresponds to discrete outcomes. With appropriate decision rules, these models provide predictions of binary choices generated from predicted probabilities. The accuracy of these predictions in terms of classifying probabilities to events that occurred versus those that did not is a key issue. The use of expectation-prediction success at present is the standard method used to assess the accuracy of these predictions. However, this method is limited in its ability to correctly classify probabilities in the absence of appropriate predetermined cut-off levels. We propose alternative methods to classify probabilities generated through binary choice models, namely resolution graphs and resolution regressions that measure the ability to sort predicted probabilities against observed outcomes. Using probabilities generated from the use of logit models applied to purchasing decisions of various non-alcoholic beverages made by U.S. households, we compare probability sorting power using expectation-prediction success as well as resolution graphs and resolution regressions. Based on expectation-prediction success, the logit models performed better at classifying outcomes related to purchasing isotonic drinks, regular soft drinks, diet drinks, bottled water, and tea. Based on resolution regressions, the null hypothesis of perfect sorting of probabilities was rejected for all non-alcoholic beverages. Although the logit models generated upward-sloping resolution graphs as expected, they were relatively flat compared to the 45-degree perfect sorting line. Going forward, we recommend using resolution regression and resolution graphs to capture sorting of probabilities in addition to the conventional metrics used in ascertaining the ability of binary choice models to predict out-of-sample behavior.</description>
	<pubDate>2026-02-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 10: Application of Resolution Regression and Resolution Graphs in Evaluating Probability Forecasts Generated Using Binary Choice Models</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/10">doi: 10.3390/econometrics14010010</a></p>
	<p>Authors:
		Senarath Dharmasena
		David A. Bessler
		Oral Capps
		</p>
	<p>Binary choice models are widely used in econometric modeling when the dependent variable corresponds to discrete outcomes. With appropriate decision rules, these models provide predictions of binary choices generated from predicted probabilities. The accuracy of these predictions in terms of classifying probabilities to events that occurred versus those that did not is a key issue. The use of expectation-prediction success at present is the standard method used to assess the accuracy of these predictions. However, this method is limited in its ability to correctly classify probabilities in the absence of appropriate predetermined cut-off levels. We propose alternative methods to classify probabilities generated through binary choice models, namely resolution graphs and resolution regressions that measure the ability to sort predicted probabilities against observed outcomes. Using probabilities generated from the use of logit models applied to purchasing decisions of various non-alcoholic beverages made by U.S. households, we compare probability sorting power using expectation-prediction success as well as resolution graphs and resolution regressions. Based on expectation-prediction success, the logit models performed better at classifying outcomes related to purchasing isotonic drinks, regular soft drinks, diet drinks, bottled water, and tea. Based on resolution regressions, the null hypothesis of perfect sorting of probabilities was rejected for all non-alcoholic beverages. Although the logit models generated upward-sloping resolution graphs as expected, they were relatively flat compared to the 45-degree perfect sorting line. Going forward, we recommend using resolution regression and resolution graphs to capture sorting of probabilities in addition to the conventional metrics used in ascertaining the ability of binary choice models to predict out-of-sample behavior.</p>
	]]></content:encoded>

	<dc:title>Application of Resolution Regression and Resolution Graphs in Evaluating Probability Forecasts Generated Using Binary Choice Models</dc:title>
			<dc:creator>Senarath Dharmasena</dc:creator>
			<dc:creator>David A. Bessler</dc:creator>
			<dc:creator>Oral Capps</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010010</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-02-24</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-02-24</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>10</prism:startingPage>
		<prism:doi>10.3390/econometrics14010010</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/10</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/9">

	<title>Econometrics, Vol. 14, Pages 9: Econometric Analysis and Forecasts on Exports of Emerging Economies from Central and Eastern Europe</title>
	<link>https://www.mdpi.com/2225-1146/14/1/9</link>
	<description>This study examines the evolution, heterogeneity, and short-term prospects of export performance in seven Central and Eastern European (CEE) economies&amp;amp;mdash;Croatia, Czech Republic, Hungary, Poland, Romania, Bulgaria, and Slovakia&amp;amp;mdash;over the period 1995&amp;amp;ndash;2024. Using annual World Bank data, exports are modeled as a share of GDP to ensure cross-country comparability and to capture differences in trade dependence. The analysis combines descriptive and inferential statistics with Augmented Dickey&amp;amp;ndash;Fuller tests, non-parametric comparisons, Granger causality analysis, and country-specific ARIMA models to investigate export dynamics, the role of foreign direct investment (FDI), and future export trajectories. The results reveal a common long-term upward trend in export intensity across all countries, driven by European integration and structural transformation, but with pronounced cross-country differences in export dependence and volatility. Highly open economies such as Slovakia, Hungary, and the Czech Republic exhibit strong export performance alongside greater exposure to external shocks, while larger domestic markets such as Poland and Romania display lower export intensity and greater stabilization. Granger causality tests indicate that FDI contributes to export growth in several economies, often with multi-year lags, highlighting the importance of absorptive capacity and institutional quality in translating investment inflows into export competitiveness. ARIMA-based forecasts for 2025&amp;amp;ndash;2027 suggest continued export expansion and relative stabilization despite recent global disruptions. This study&amp;amp;rsquo;s primary contribution lies in integrating comparative export analysis, causality testing, and short-term forecasting within a unified econometric framework, offering policy-relevant insights into export-led growth and economic convergence in post-transition European economies.</description>
	<pubDate>2026-02-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 9: Econometric Analysis and Forecasts on Exports of Emerging Economies from Central and Eastern Europe</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/9">doi: 10.3390/econometrics14010009</a></p>
	<p>Authors:
		Liviu Popescu
		Mirela Găman
		Laurențiu Stelian Mihai
		Cristian Ovidiu Drăgan
		Daniel Militaru
		Ion Buligiu
		</p>
	<p>This study examines the evolution, heterogeneity, and short-term prospects of export performance in seven Central and Eastern European (CEE) economies&amp;amp;mdash;Croatia, Czech Republic, Hungary, Poland, Romania, Bulgaria, and Slovakia&amp;amp;mdash;over the period 1995&amp;amp;ndash;2024. Using annual World Bank data, exports are modeled as a share of GDP to ensure cross-country comparability and to capture differences in trade dependence. The analysis combines descriptive and inferential statistics with Augmented Dickey&amp;amp;ndash;Fuller tests, non-parametric comparisons, Granger causality analysis, and country-specific ARIMA models to investigate export dynamics, the role of foreign direct investment (FDI), and future export trajectories. The results reveal a common long-term upward trend in export intensity across all countries, driven by European integration and structural transformation, but with pronounced cross-country differences in export dependence and volatility. Highly open economies such as Slovakia, Hungary, and the Czech Republic exhibit strong export performance alongside greater exposure to external shocks, while larger domestic markets such as Poland and Romania display lower export intensity and greater stabilization. Granger causality tests indicate that FDI contributes to export growth in several economies, often with multi-year lags, highlighting the importance of absorptive capacity and institutional quality in translating investment inflows into export competitiveness. ARIMA-based forecasts for 2025&amp;amp;ndash;2027 suggest continued export expansion and relative stabilization despite recent global disruptions. This study&amp;amp;rsquo;s primary contribution lies in integrating comparative export analysis, causality testing, and short-term forecasting within a unified econometric framework, offering policy-relevant insights into export-led growth and economic convergence in post-transition European economies.</p>
	]]></content:encoded>

	<dc:title>Econometric Analysis and Forecasts on Exports of Emerging Economies from Central and Eastern Europe</dc:title>
			<dc:creator>Liviu Popescu</dc:creator>
			<dc:creator>Mirela Găman</dc:creator>
			<dc:creator>Laurențiu Stelian Mihai</dc:creator>
			<dc:creator>Cristian Ovidiu Drăgan</dc:creator>
			<dc:creator>Daniel Militaru</dc:creator>
			<dc:creator>Ion Buligiu</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010009</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-02-14</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-02-14</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>9</prism:startingPage>
		<prism:doi>10.3390/econometrics14010009</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/9</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/8">

	<title>Econometrics, Vol. 14, Pages 8: Posterior Probabilities of Dominance for Wealth Distributions</title>
	<link>https://www.mdpi.com/2225-1146/14/1/8</link>
	<description>Probability distributions, which are typically used to describe income distributions, are not suitable to describe a population&amp;amp;rsquo;s distribution of wealth because of the existence of negative observations and a large concentration of values close to zero. To overcome these problems, we describe how the asymmetric Laplace distribution can be used for modelling wealth distributions and illustrate how it can be used to compute the posterior probabilities of first- and second-order stochastic dominance. Stochastic dominance concepts are useful for comparing wealth distributions and assessing whether changes in welfare have increased or decreased welfare in society. We use three distributions to make two such comparisons. The results are such that, in one comparison, one distribution clearly dominates the other. There is more uncertainty about dominance in the other comparison, with no dominance being the most likely outcome.</description>
	<pubDate>2026-02-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 8: Posterior Probabilities of Dominance for Wealth Distributions</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/8">doi: 10.3390/econometrics14010008</a></p>
	<p>Authors:
		William Griffiths
		Duangkamon Chotikapanich
		</p>
	<p>Probability distributions, which are typically used to describe income distributions, are not suitable to describe a population&amp;amp;rsquo;s distribution of wealth because of the existence of negative observations and a large concentration of values close to zero. To overcome these problems, we describe how the asymmetric Laplace distribution can be used for modelling wealth distributions and illustrate how it can be used to compute the posterior probabilities of first- and second-order stochastic dominance. Stochastic dominance concepts are useful for comparing wealth distributions and assessing whether changes in welfare have increased or decreased welfare in society. We use three distributions to make two such comparisons. The results are such that, in one comparison, one distribution clearly dominates the other. There is more uncertainty about dominance in the other comparison, with no dominance being the most likely outcome.</p>
	]]></content:encoded>

	<dc:title>Posterior Probabilities of Dominance for Wealth Distributions</dc:title>
			<dc:creator>William Griffiths</dc:creator>
			<dc:creator>Duangkamon Chotikapanich</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010008</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-02-12</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-02-12</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>8</prism:startingPage>
		<prism:doi>10.3390/econometrics14010008</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/8</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/7">

	<title>Econometrics, Vol. 14, Pages 7: Social Security Transfers and Fiscal Sustainability in Turkey: Evidence from 1984&amp;ndash;2024</title>
	<link>https://www.mdpi.com/2225-1146/14/1/7</link>
	<description>Social security systems constitute a structurally significant component of public finance in developing economies and often generate persistent fiscal pressures through budgetary transfers. Demographic transformation, widespread informality in labor markets, and weaknesses in contribution-based financing increase the dependence of social security systems on public resources. The objective of this study is to examine whether budget transfers to the social security system affect fiscal sustainability in Turkey by analyzing their relationship with the budget deficit and the public sector borrowing requirement. The analysis employs annual data for Turkey covering the period of 1984&amp;amp;ndash;2024. A comprehensive time-series econometric framework is adopted, incorporating conventional and structural-break unit root tests, the ARDL bounds testing approach with error correction modeling, and the Toda&amp;amp;ndash;Yamamoto causality method. The empirical findings provide evidence of a stable long-run relationship among the variables. The results indicate that social security budget transfers exert a statistically significant and persistent effect on the public sector borrowing requirement, while no direct long-run effect on the headline budget deficit is detected. Causality results further confirm that fiscal pressures associated with social security financing materialize primarily through borrowing dynamics rather than short-term budgetary imbalances. By explicitly modelling social security budget transfers as an independent fiscal channel over a long historical horizon, this study contributes to the literature by offering new empirical insights into the fiscal sustainability implications of social security financing in Turkey. The findings also provide policy-relevant evidence for developing economies facing similar institutional, demographic, and fiscal challenges.</description>
	<pubDate>2026-01-31</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 7: Social Security Transfers and Fiscal Sustainability in Turkey: Evidence from 1984&amp;ndash;2024</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/7">doi: 10.3390/econometrics14010007</a></p>
	<p>Authors:
		Huriye Gonca Diler
		Nurgül E. Barın
		Ercan Özen
		Simon Grima
		</p>
	<p>Social security systems constitute a structurally significant component of public finance in developing economies and often generate persistent fiscal pressures through budgetary transfers. Demographic transformation, widespread informality in labor markets, and weaknesses in contribution-based financing increase the dependence of social security systems on public resources. The objective of this study is to examine whether budget transfers to the social security system affect fiscal sustainability in Turkey by analyzing their relationship with the budget deficit and the public sector borrowing requirement. The analysis employs annual data for Turkey covering the period of 1984&amp;amp;ndash;2024. A comprehensive time-series econometric framework is adopted, incorporating conventional and structural-break unit root tests, the ARDL bounds testing approach with error correction modeling, and the Toda&amp;amp;ndash;Yamamoto causality method. The empirical findings provide evidence of a stable long-run relationship among the variables. The results indicate that social security budget transfers exert a statistically significant and persistent effect on the public sector borrowing requirement, while no direct long-run effect on the headline budget deficit is detected. Causality results further confirm that fiscal pressures associated with social security financing materialize primarily through borrowing dynamics rather than short-term budgetary imbalances. By explicitly modelling social security budget transfers as an independent fiscal channel over a long historical horizon, this study contributes to the literature by offering new empirical insights into the fiscal sustainability implications of social security financing in Turkey. The findings also provide policy-relevant evidence for developing economies facing similar institutional, demographic, and fiscal challenges.</p>
	]]></content:encoded>

	<dc:title>Social Security Transfers and Fiscal Sustainability in Turkey: Evidence from 1984&amp;amp;ndash;2024</dc:title>
			<dc:creator>Huriye Gonca Diler</dc:creator>
			<dc:creator>Nurgül E. Barın</dc:creator>
			<dc:creator>Ercan Özen</dc:creator>
			<dc:creator>Simon Grima</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010007</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-01-31</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-01-31</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>7</prism:startingPage>
		<prism:doi>10.3390/econometrics14010007</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/7</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/6">

	<title>Econometrics, Vol. 14, Pages 6: Binance USD Delisting and Stablecoins Repercussions: A Local Projections Approach</title>
	<link>https://www.mdpi.com/2225-1146/14/1/6</link>
	<description>The delisting of Binance USD (BUSD) constitutes a major regulatory intervention in the stablecoin market and provides a unique opportunity to examine how targeted regulation affects liquidity allocation, market concentration, and short-run systemic risk in crypto-asset markets. Using daily data for 2023 and a linear and nonlinear Local Projections event-study framework, this paper analyzes the dynamic market responses to the BUSD delisting across major stablecoins and cryptocurrencies. The results show that liquidity displaced from BUSD is reallocated primarily toward USDT and USDC, leading to a measurable increase in stablecoin market concentration, while decentralized and algorithmic stablecoins absorb only a limited share of the shock. At the same time, Bitcoin and Ethereum experience temporary liquidity contractions followed by a relatively rapid recovery, suggesting conditional resilience of core crypto-assets. Overall, the findings document how a regulatory-induced exit of a major stablecoin reshapes short-run market dynamics and concentration patterns, highlighting potential trade-offs between regulatory enforcement and market structure. The paper contributes to the literature by providing the first empirical analysis of the BUSD delisting and by illustrating the usefulness of Local Projections for studying regulatory shocks in cryptocurrency markets.</description>
	<pubDate>2026-01-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 6: Binance USD Delisting and Stablecoins Repercussions: A Local Projections Approach</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/6">doi: 10.3390/econometrics14010006</a></p>
	<p>Authors:
		Papa Ousseynou Diop
		Julien Chevallier
		</p>
	<p>The delisting of Binance USD (BUSD) constitutes a major regulatory intervention in the stablecoin market and provides a unique opportunity to examine how targeted regulation affects liquidity allocation, market concentration, and short-run systemic risk in crypto-asset markets. Using daily data for 2023 and a linear and nonlinear Local Projections event-study framework, this paper analyzes the dynamic market responses to the BUSD delisting across major stablecoins and cryptocurrencies. The results show that liquidity displaced from BUSD is reallocated primarily toward USDT and USDC, leading to a measurable increase in stablecoin market concentration, while decentralized and algorithmic stablecoins absorb only a limited share of the shock. At the same time, Bitcoin and Ethereum experience temporary liquidity contractions followed by a relatively rapid recovery, suggesting conditional resilience of core crypto-assets. Overall, the findings document how a regulatory-induced exit of a major stablecoin reshapes short-run market dynamics and concentration patterns, highlighting potential trade-offs between regulatory enforcement and market structure. The paper contributes to the literature by providing the first empirical analysis of the BUSD delisting and by illustrating the usefulness of Local Projections for studying regulatory shocks in cryptocurrency markets.</p>
	]]></content:encoded>

	<dc:title>Binance USD Delisting and Stablecoins Repercussions: A Local Projections Approach</dc:title>
			<dc:creator>Papa Ousseynou Diop</dc:creator>
			<dc:creator>Julien Chevallier</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010006</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-01-16</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-01-16</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>6</prism:startingPage>
		<prism:doi>10.3390/econometrics14010006</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/6</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/5">

	<title>Econometrics, Vol. 14, Pages 5: Shock Next Door: Geographic Spillovers in FinTech Lending After Natural Disasters</title>
	<link>https://www.mdpi.com/2225-1146/14/1/5</link>
	<description>We examine geographic spillovers in digital credit markets by studying how natural disasters affect borrowing behavior in adjacent, physically undamaged regions. Using granular loan-level data from Indonesia&amp;amp;rsquo;s largest FinTech lender (2021&amp;amp;ndash;2023) and leveraging quasi-random variation in disaster timing and location, we estimate fixed-effects specifications that incorporate spatially lagged disaster exposure (an SLX-type spatial approach) to quantify spillovers. Disasters generate economically significant spillovers in neighboring provinces: a 1% increase in disaster frequency raises local borrowing by 0.036%, approximately 20% of the direct effect. Spillovers vary sharply with geographic connectivity&amp;amp;mdash;land-connected provinces experience effects about 6.6 times larger than sea-connected provinces. These results highlight that digital lending platforms can transmit geographically proximate risks beyond directly affected areas through channels that differ from traditional banking networks. The systematic nature of these spillovers suggests that disaster-response strategies may be more effective when they consider adjacent regions. That platform risk management can be strengthened by integrating spatial disaster exposure and connectivity into credit monitoring and decision rules.</description>
	<pubDate>2026-01-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 5: Shock Next Door: Geographic Spillovers in FinTech Lending After Natural Disasters</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/5">doi: 10.3390/econometrics14010005</a></p>
	<p>Authors:
		David Kuo Chuen Lee
		Weibiao Xu
		Jianzheng Shi
		Yue Wang
		Ding Ding
		</p>
	<p>We examine geographic spillovers in digital credit markets by studying how natural disasters affect borrowing behavior in adjacent, physically undamaged regions. Using granular loan-level data from Indonesia&amp;amp;rsquo;s largest FinTech lender (2021&amp;amp;ndash;2023) and leveraging quasi-random variation in disaster timing and location, we estimate fixed-effects specifications that incorporate spatially lagged disaster exposure (an SLX-type spatial approach) to quantify spillovers. Disasters generate economically significant spillovers in neighboring provinces: a 1% increase in disaster frequency raises local borrowing by 0.036%, approximately 20% of the direct effect. Spillovers vary sharply with geographic connectivity&amp;amp;mdash;land-connected provinces experience effects about 6.6 times larger than sea-connected provinces. These results highlight that digital lending platforms can transmit geographically proximate risks beyond directly affected areas through channels that differ from traditional banking networks. The systematic nature of these spillovers suggests that disaster-response strategies may be more effective when they consider adjacent regions. That platform risk management can be strengthened by integrating spatial disaster exposure and connectivity into credit monitoring and decision rules.</p>
	]]></content:encoded>

	<dc:title>Shock Next Door: Geographic Spillovers in FinTech Lending After Natural Disasters</dc:title>
			<dc:creator>David Kuo Chuen Lee</dc:creator>
			<dc:creator>Weibiao Xu</dc:creator>
			<dc:creator>Jianzheng Shi</dc:creator>
			<dc:creator>Yue Wang</dc:creator>
			<dc:creator>Ding Ding</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010005</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-01-15</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-01-15</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>5</prism:startingPage>
		<prism:doi>10.3390/econometrics14010005</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/5</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/4">

	<title>Econometrics, Vol. 14, Pages 4: A Theory-Based Formal-Econometric Interpretation of an Econometric Model</title>
	<link>https://www.mdpi.com/2225-1146/14/1/4</link>
	<description>The references of most of the observations that econometricians have are ill defined. To use such data in an empirical analysis, the econometrician in charge must find a way to give them economic meaning. In this paper, I have data and an econometric model, and I set out to show how economic theory can be used to interpret the variables and parameters of my econometric model. According to Ragnar Frisch, that is a difficult task. Economic theories reside in a Model World and the econometrician&amp;amp;rsquo;s data reside in the Real World; the rational laws in the model world are fundamentally different from the empirical laws in the real world; and between the two worlds there is a gap that can never be bridged To accomplish my task, I build a bridge between Frisch&amp;amp;rsquo;s two worlds with applied formal-econometric arguments, invent a pertinent model-world economic theory, walk the bridge with the invented theory, and use it to give economic meaning to the variables and parameters of my econometric model. At the end I demonstrate that the invented theory and the bridge I use in my analysis are empirically relevant in the empirical context of my econometric model.</description>
	<pubDate>2026-01-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 4: A Theory-Based Formal-Econometric Interpretation of an Econometric Model</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/4">doi: 10.3390/econometrics14010004</a></p>
	<p>Authors:
		Bernt Petter Stigum
		</p>
	<p>The references of most of the observations that econometricians have are ill defined. To use such data in an empirical analysis, the econometrician in charge must find a way to give them economic meaning. In this paper, I have data and an econometric model, and I set out to show how economic theory can be used to interpret the variables and parameters of my econometric model. According to Ragnar Frisch, that is a difficult task. Economic theories reside in a Model World and the econometrician&amp;amp;rsquo;s data reside in the Real World; the rational laws in the model world are fundamentally different from the empirical laws in the real world; and between the two worlds there is a gap that can never be bridged To accomplish my task, I build a bridge between Frisch&amp;amp;rsquo;s two worlds with applied formal-econometric arguments, invent a pertinent model-world economic theory, walk the bridge with the invented theory, and use it to give economic meaning to the variables and parameters of my econometric model. At the end I demonstrate that the invented theory and the bridge I use in my analysis are empirically relevant in the empirical context of my econometric model.</p>
	]]></content:encoded>

	<dc:title>A Theory-Based Formal-Econometric Interpretation of an Econometric Model</dc:title>
			<dc:creator>Bernt Petter Stigum</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010004</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-01-06</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-01-06</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>4</prism:startingPage>
		<prism:doi>10.3390/econometrics14010004</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/4</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/3">

	<title>Econometrics, Vol. 14, Pages 3: Bayesian Panel Variable Selection Under Model Uncertainty for High-Dimensional Data</title>
	<link>https://www.mdpi.com/2225-1146/14/1/3</link>
	<description>Selecting the relevant covariates in high-dimensional panel data remains a central challenge in applied econometrics. Conventional fixed effects and random effects models are not designed for systematic variable selection under model uncertainty. In addition, many existing models such as LASSO in machine learning or Bayesian approaches like model averaging, Bayesian Additive Regression Trees, and Bayesian Variable Selection with Shrinking and Diffusing Priors have been primarily developed for time series analysis. This paper develops and applies Bayesian Panel Variable Selection (BPVS) models to simulation and empirical applications. These models are designed to assist researchers in identifying which input covariates matter most, while also determining whether their effects should be treated as fixed or random through Bayesian hierarchical modeling and posterior inference, which jointly accounts for variable importance ranking. Both the simulation studies and the empirical application to socioeconomic determinants of subjective well-being show that Bayesian panel models outperform classical models, especially in terms of convergence stability, predictive accuracy, and reliable variable selection. Classical panel models, in contrast, remain attractive for their computational efficiency and simplicity. The Hausman test is used as a robustness check. The study adds an econometric approach for dealing with model uncertainty in high-dimensional panel analysis and offers open-source R 4.5.1 code to support future applications.</description>
	<pubDate>2026-01-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 3: Bayesian Panel Variable Selection Under Model Uncertainty for High-Dimensional Data</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/3">doi: 10.3390/econometrics14010003</a></p>
	<p>Authors:
		Pathairat Pastpipatkul
		Htwe Ko
		</p>
	<p>Selecting the relevant covariates in high-dimensional panel data remains a central challenge in applied econometrics. Conventional fixed effects and random effects models are not designed for systematic variable selection under model uncertainty. In addition, many existing models such as LASSO in machine learning or Bayesian approaches like model averaging, Bayesian Additive Regression Trees, and Bayesian Variable Selection with Shrinking and Diffusing Priors have been primarily developed for time series analysis. This paper develops and applies Bayesian Panel Variable Selection (BPVS) models to simulation and empirical applications. These models are designed to assist researchers in identifying which input covariates matter most, while also determining whether their effects should be treated as fixed or random through Bayesian hierarchical modeling and posterior inference, which jointly accounts for variable importance ranking. Both the simulation studies and the empirical application to socioeconomic determinants of subjective well-being show that Bayesian panel models outperform classical models, especially in terms of convergence stability, predictive accuracy, and reliable variable selection. Classical panel models, in contrast, remain attractive for their computational efficiency and simplicity. The Hausman test is used as a robustness check. The study adds an econometric approach for dealing with model uncertainty in high-dimensional panel analysis and offers open-source R 4.5.1 code to support future applications.</p>
	]]></content:encoded>

	<dc:title>Bayesian Panel Variable Selection Under Model Uncertainty for High-Dimensional Data</dc:title>
			<dc:creator>Pathairat Pastpipatkul</dc:creator>
			<dc:creator>Htwe Ko</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010003</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-01-04</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-01-04</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>3</prism:startingPage>
		<prism:doi>10.3390/econometrics14010003</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/3</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/2">

	<title>Econometrics, Vol. 14, Pages 2: I(2) Cointegration in Macroeconometric Modelling: Tourism Price and Inflation Dynamics</title>
	<link>https://www.mdpi.com/2225-1146/14/1/2</link>
	<description>This study enhances macroeconometric modelling by utilising an I(2) cointegration framework to analyse the dynamic link between tourism prices and inflation in Slovenia and the Eurozone. Using monthly data from 2000 to 2017, we estimate cointegrated VAR models that capture long-run equilibria, short-run adjustments, and persistent deviations inherent in I(2) processes. The results reveal strong spillover effects from Slovenian tourism and input prices to Eurozone inflation and hospitality prices in the short run, while Eurozone-wide shocks dominate the long-run dynamics. By explicitly accounting for nonstationarity, structural breaks, and seasonal patterns, the I(2) model provides a more reliable framework than traditional I(1)-based approaches, which are often prone to misspecification when higher-order integration and persistent deviations are ignored. The findings contribute to macroeconometric theory by demonstrating the value of I(2) cointegration in modelling complex price systems and offer policy insights into inflation management and competitiveness in tourism-dependent economies.</description>
	<pubDate>2026-01-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 2: I(2) Cointegration in Macroeconometric Modelling: Tourism Price and Inflation Dynamics</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/2">doi: 10.3390/econometrics14010002</a></p>
	<p>Authors:
		Sergej Gričar
		Štefan Bojnec
		Bjørnar Karlsen Kivedal
		</p>
	<p>This study enhances macroeconometric modelling by utilising an I(2) cointegration framework to analyse the dynamic link between tourism prices and inflation in Slovenia and the Eurozone. Using monthly data from 2000 to 2017, we estimate cointegrated VAR models that capture long-run equilibria, short-run adjustments, and persistent deviations inherent in I(2) processes. The results reveal strong spillover effects from Slovenian tourism and input prices to Eurozone inflation and hospitality prices in the short run, while Eurozone-wide shocks dominate the long-run dynamics. By explicitly accounting for nonstationarity, structural breaks, and seasonal patterns, the I(2) model provides a more reliable framework than traditional I(1)-based approaches, which are often prone to misspecification when higher-order integration and persistent deviations are ignored. The findings contribute to macroeconometric theory by demonstrating the value of I(2) cointegration in modelling complex price systems and offer policy insights into inflation management and competitiveness in tourism-dependent economies.</p>
	]]></content:encoded>

	<dc:title>I(2) Cointegration in Macroeconometric Modelling: Tourism Price and Inflation Dynamics</dc:title>
			<dc:creator>Sergej Gričar</dc:creator>
			<dc:creator>Štefan Bojnec</dc:creator>
			<dc:creator>Bjørnar Karlsen Kivedal</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010002</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2026-01-04</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2026-01-04</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2</prism:startingPage>
		<prism:doi>10.3390/econometrics14010002</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/2</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/14/1/1">

	<title>Econometrics, Vol. 14, Pages 1: Complexity-Aware Vector-Valued Machine Learning of State-Level Bond Returns: Evidence on South African Trade Spillovers Under SALT and OBBBA</title>
	<link>https://www.mdpi.com/2225-1146/14/1/1</link>
	<description>This study examines the impact of international trade shocks from South Africa and recent U.S. federal tax reforms on state-level municipal bond returns within the United States. Employing a unique transaction-level dataset comprising more than 50 million municipal bond trades from 2020 to 2024, the empirical approach integrates machine learning estimators with econometric volatility models to examine daily nonlinear spillovers and structural complexity across twenty U.S. states. The study introduces and extends the application of a vector radial basis function neural network framework, leveraging its universal approximation capacity to jointly model multiple state-level outcomes and uncover complex response patterns The empirical results reveal substantial cross-state heterogeneity in bond-return resilience, influenced by variation in state tax regimes, economic complexity, and differential exposure to external financial forces. States exhibiting higher economic adaptability demonstrate faster recovery and weaker shock amplification, whereas structurally rigid states experience persistent volatility and slower mean reversion. These findings demonstrate that complexity-aware predictive modeling, when combined with granular fiscal and trade-linkage data, provides valuable insight into the pathways through which global and domestic shocks propagate into U.S. municipal bond markets and shape subnational financial stability.</description>
	<pubDate>2025-12-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 14, Pages 1: Complexity-Aware Vector-Valued Machine Learning of State-Level Bond Returns: Evidence on South African Trade Spillovers Under SALT and OBBBA</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/14/1/1">doi: 10.3390/econometrics14010001</a></p>
	<p>Authors:
		Gordon Dash
		Nina Kajiji
		Domenic Vonella
		Helper Zhou
		</p>
	<p>This study examines the impact of international trade shocks from South Africa and recent U.S. federal tax reforms on state-level municipal bond returns within the United States. Employing a unique transaction-level dataset comprising more than 50 million municipal bond trades from 2020 to 2024, the empirical approach integrates machine learning estimators with econometric volatility models to examine daily nonlinear spillovers and structural complexity across twenty U.S. states. The study introduces and extends the application of a vector radial basis function neural network framework, leveraging its universal approximation capacity to jointly model multiple state-level outcomes and uncover complex response patterns The empirical results reveal substantial cross-state heterogeneity in bond-return resilience, influenced by variation in state tax regimes, economic complexity, and differential exposure to external financial forces. States exhibiting higher economic adaptability demonstrate faster recovery and weaker shock amplification, whereas structurally rigid states experience persistent volatility and slower mean reversion. These findings demonstrate that complexity-aware predictive modeling, when combined with granular fiscal and trade-linkage data, provides valuable insight into the pathways through which global and domestic shocks propagate into U.S. municipal bond markets and shape subnational financial stability.</p>
	]]></content:encoded>

	<dc:title>Complexity-Aware Vector-Valued Machine Learning of State-Level Bond Returns: Evidence on South African Trade Spillovers Under SALT and OBBBA</dc:title>
			<dc:creator>Gordon Dash</dc:creator>
			<dc:creator>Nina Kajiji</dc:creator>
			<dc:creator>Domenic Vonella</dc:creator>
			<dc:creator>Helper Zhou</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics14010001</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-12-23</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-12-23</prism:publicationDate>
	<prism:volume>14</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1</prism:startingPage>
		<prism:doi>10.3390/econometrics14010001</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/14/1/1</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/52">

	<title>Econometrics, Vol. 13, Pages 52: Econometric and Python-Based Forecasting Tools for Global Market Price Prediction in the Context of Economic Security</title>
	<link>https://www.mdpi.com/2225-1146/13/4/52</link>
	<description>Debate persists over whether classical econometric or modern machine learning (ML) approaches provide superior forecasts for volatile monthly price series. Despite extensive research, no systematic cross-domain comparison exists to guide model selection across diverse asset types. In this study, we compare traditional econometric models with classical ML baselines and hybrid approaches across financial assets, futures, commodities, and market index domains. Universal Python-based forecasting tools include month-end preprocessing, automated ARIMA order selection, Fourier terms for seasonality, circular terms, and ML frameworks for forecasting and residual corrections. Performance is assessed via anchored rolling-origin backtests with expanding windows and a fixed 12-month horizon. MAPE comparisons show that ARIMA-based models provide stable, transparent benchmarks but often fail to capture the nonlinear structure of high-volatility series. ML tools can enhance accuracy in these cases, but they are susceptible to stability and overfitting on monthly histories. The most accurate and reliable forecasts come from models that combine ARIMA-based methods with Fourier transformation and a slight enhancement using machine learning residual correction. ARIMA-based approaches achieve about 30% lower forecast errors than pure ML (18.5% vs. 26.2% average MAPE and 11.6% vs. 16.8% median MAPE), with hybrid models offering only marginal gains (0.1 pp median improvement) at significantly higher computational cost. This work demonstrates the domain-specific nature of model performance, clarifying when hybridization is effective and providing reproducible Python pipelines suited for economic security applications.</description>
	<pubDate>2025-12-15</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 52: Econometric and Python-Based Forecasting Tools for Global Market Price Prediction in the Context of Economic Security</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/52">doi: 10.3390/econometrics13040052</a></p>
	<p>Authors:
		Dmytro Zherlitsyn
		Volodymyr Kravchenko
		Oleksiy Mints
		Oleh Kolodiziev
		Olena Khadzhynova
		Oleksandr Shchepka
		</p>
	<p>Debate persists over whether classical econometric or modern machine learning (ML) approaches provide superior forecasts for volatile monthly price series. Despite extensive research, no systematic cross-domain comparison exists to guide model selection across diverse asset types. In this study, we compare traditional econometric models with classical ML baselines and hybrid approaches across financial assets, futures, commodities, and market index domains. Universal Python-based forecasting tools include month-end preprocessing, automated ARIMA order selection, Fourier terms for seasonality, circular terms, and ML frameworks for forecasting and residual corrections. Performance is assessed via anchored rolling-origin backtests with expanding windows and a fixed 12-month horizon. MAPE comparisons show that ARIMA-based models provide stable, transparent benchmarks but often fail to capture the nonlinear structure of high-volatility series. ML tools can enhance accuracy in these cases, but they are susceptible to stability and overfitting on monthly histories. The most accurate and reliable forecasts come from models that combine ARIMA-based methods with Fourier transformation and a slight enhancement using machine learning residual correction. ARIMA-based approaches achieve about 30% lower forecast errors than pure ML (18.5% vs. 26.2% average MAPE and 11.6% vs. 16.8% median MAPE), with hybrid models offering only marginal gains (0.1 pp median improvement) at significantly higher computational cost. This work demonstrates the domain-specific nature of model performance, clarifying when hybridization is effective and providing reproducible Python pipelines suited for economic security applications.</p>
	]]></content:encoded>

	<dc:title>Econometric and Python-Based Forecasting Tools for Global Market Price Prediction in the Context of Economic Security</dc:title>
			<dc:creator>Dmytro Zherlitsyn</dc:creator>
			<dc:creator>Volodymyr Kravchenko</dc:creator>
			<dc:creator>Oleksiy Mints</dc:creator>
			<dc:creator>Oleh Kolodiziev</dc:creator>
			<dc:creator>Olena Khadzhynova</dc:creator>
			<dc:creator>Oleksandr Shchepka</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040052</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-12-15</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-12-15</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>52</prism:startingPage>
		<prism:doi>10.3390/econometrics13040052</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/52</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/51">

	<title>Econometrics, Vol. 13, Pages 51: Credit Rationing, Its Determinants and Non-Performing Loans: An Empirical Analysis of Credit Markets in Polish Banking Sector</title>
	<link>https://www.mdpi.com/2225-1146/13/4/51</link>
	<description>In a situation where the number of non-performing loans (NPLs) increases, lenders may raise interest rates to compensate for potential losses, and the amount of credit granted in the market may decrease, leading to credit rationing. Such actions may become vital based on their potential consequences for the economy, entrepreneurs and consumers, which makes this topic extremely important. This study, by using an empirical VAR analysis, has strived to determine whether credit rationing by banks operating in the Polish banking sector is driven by risky loans (which are the main determinant of credit rationing and are represented by the ratio of NPLs to total loans). According to the results, it has been found that credit rationing, made by Polish banks, is not statistically significant when the risk in the credit market rises due to non-performing loans. Therefore, it can be claimed that the risky structure due to NPL in the credit market may not be one of the determinant factors of credit rationing in the Polish banking sector. The low sensitivity of the Polish banking sector to the risky structure of the credit market may result from the relatively low share of loans in total assets compared to debt instruments. Furthermore, restrictive lending policies and the predominance of mortgage loans secured directly by real estate limit portfolio risk, which may reduce the need for a risk-sensitive lending strategy.</description>
	<pubDate>2025-12-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 51: Credit Rationing, Its Determinants and Non-Performing Loans: An Empirical Analysis of Credit Markets in Polish Banking Sector</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/51">doi: 10.3390/econometrics13040051</a></p>
	<p>Authors:
		Cenap Mengü Tunçay
		Elżbieta Grzegorczyk-Akın
		</p>
	<p>In a situation where the number of non-performing loans (NPLs) increases, lenders may raise interest rates to compensate for potential losses, and the amount of credit granted in the market may decrease, leading to credit rationing. Such actions may become vital based on their potential consequences for the economy, entrepreneurs and consumers, which makes this topic extremely important. This study, by using an empirical VAR analysis, has strived to determine whether credit rationing by banks operating in the Polish banking sector is driven by risky loans (which are the main determinant of credit rationing and are represented by the ratio of NPLs to total loans). According to the results, it has been found that credit rationing, made by Polish banks, is not statistically significant when the risk in the credit market rises due to non-performing loans. Therefore, it can be claimed that the risky structure due to NPL in the credit market may not be one of the determinant factors of credit rationing in the Polish banking sector. The low sensitivity of the Polish banking sector to the risky structure of the credit market may result from the relatively low share of loans in total assets compared to debt instruments. Furthermore, restrictive lending policies and the predominance of mortgage loans secured directly by real estate limit portfolio risk, which may reduce the need for a risk-sensitive lending strategy.</p>
	]]></content:encoded>

	<dc:title>Credit Rationing, Its Determinants and Non-Performing Loans: An Empirical Analysis of Credit Markets in Polish Banking Sector</dc:title>
			<dc:creator>Cenap Mengü Tunçay</dc:creator>
			<dc:creator>Elżbieta Grzegorczyk-Akın</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040051</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-12-08</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-12-08</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>51</prism:startingPage>
		<prism:doi>10.3390/econometrics13040051</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/51</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/50">

	<title>Econometrics, Vol. 13, Pages 50: Exploring Poverty and SDG Indicators in Italy: An Identity Spline Approach to Partial Least Squares Regression</title>
	<link>https://www.mdpi.com/2225-1146/13/4/50</link>
	<description>Poverty is a complex global issue, closely linked to economic and social inequalities. It encompasses not only a lack of financial resources but also disparities in access to education, healthcare, employment, and social participation. In alignment with the United Nations&amp;amp;rsquo; Sustainable Development Goals&amp;amp;mdash;specifically SDGs 3 (Good Health and Well-being), 4 (Quality Education), and 8 (Decent Work and Economic Growth)&amp;amp;mdash;this study investigates the relationship between poverty and a set of socioeconomic indicators across Italy&amp;amp;rsquo;s 20 regions. To explore how poverty levels respond to different predictors, we apply an identity spline transformation to simulate controlled changes in the poverty indicator. The resulting scenarios are analyzed using partial least squares regression, enabling the identification of the most influential variables. The findings offer insights into regional disparities and contribute to evidence-based strategies aimed at reducing poverty and promoting inclusive, sustainable development.</description>
	<pubDate>2025-12-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 50: Exploring Poverty and SDG Indicators in Italy: An Identity Spline Approach to Partial Least Squares Regression</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/50">doi: 10.3390/econometrics13040050</a></p>
	<p>Authors:
		Rosaria Lombardo
		Jean-François Durand
		Ida Camminatiello
		Corrado Cuccurullo
		</p>
	<p>Poverty is a complex global issue, closely linked to economic and social inequalities. It encompasses not only a lack of financial resources but also disparities in access to education, healthcare, employment, and social participation. In alignment with the United Nations&amp;amp;rsquo; Sustainable Development Goals&amp;amp;mdash;specifically SDGs 3 (Good Health and Well-being), 4 (Quality Education), and 8 (Decent Work and Economic Growth)&amp;amp;mdash;this study investigates the relationship between poverty and a set of socioeconomic indicators across Italy&amp;amp;rsquo;s 20 regions. To explore how poverty levels respond to different predictors, we apply an identity spline transformation to simulate controlled changes in the poverty indicator. The resulting scenarios are analyzed using partial least squares regression, enabling the identification of the most influential variables. The findings offer insights into regional disparities and contribute to evidence-based strategies aimed at reducing poverty and promoting inclusive, sustainable development.</p>
	]]></content:encoded>

	<dc:title>Exploring Poverty and SDG Indicators in Italy: An Identity Spline Approach to Partial Least Squares Regression</dc:title>
			<dc:creator>Rosaria Lombardo</dc:creator>
			<dc:creator>Jean-François Durand</dc:creator>
			<dc:creator>Ida Camminatiello</dc:creator>
			<dc:creator>Corrado Cuccurullo</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040050</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-12-08</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-12-08</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>50</prism:startingPage>
		<prism:doi>10.3390/econometrics13040050</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/50</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/49">

	<title>Econometrics, Vol. 13, Pages 49: Choosing Right Bayesian Tools: A Comparative Study of Modern Bayesian Methods in Spatial Econometric Models</title>
	<link>https://www.mdpi.com/2225-1146/13/4/49</link>
	<description>We compare three modern Bayesian approaches, Hamiltonian Monte Carlo (HMC), Variational Bayes (VB), and Integrated Nested Laplace Approximation (INLA), for two classic spatial econometric specifications: the spatial lag model and spatial error model. Our Monte Carlo experiments span a range of sample sizes and spatial neighborhood structures to assess accuracy and computational efficiency. Overall, posterior means exhibit minimal bias for most parameters, with precision improving as sample size grows. VB and INLA deliver substantial computational gains over HMC, with VB typically fastest at small and moderate samples and INLA showing excellent scalability at larger samples. However, INLA can be sensitive to dense spatial weight matrices, showing elevated bias and error dispersion for variance and some regression parameters. Two empirical illustrations underscore these findings: a municipal expenditure reaction function for &amp;amp;Icirc;le-de-France and a hedonic price for housing in Ames, Iowa. Our results yield actionable guidance. HMC remains a gold standard for accuracy when computation permits; VB is a strong, scalable default; and INLA is attractive for large samples provided the weight matrix is not overly dense. These insights help practitioners select Bayesian tools aligned with data size, spatial neighborhood structure, and time constraints.</description>
	<pubDate>2025-12-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 49: Choosing Right Bayesian Tools: A Comparative Study of Modern Bayesian Methods in Spatial Econometric Models</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/49">doi: 10.3390/econometrics13040049</a></p>
	<p>Authors:
		Yuheng Ling
		Julie Le Gallo
		</p>
	<p>We compare three modern Bayesian approaches, Hamiltonian Monte Carlo (HMC), Variational Bayes (VB), and Integrated Nested Laplace Approximation (INLA), for two classic spatial econometric specifications: the spatial lag model and spatial error model. Our Monte Carlo experiments span a range of sample sizes and spatial neighborhood structures to assess accuracy and computational efficiency. Overall, posterior means exhibit minimal bias for most parameters, with precision improving as sample size grows. VB and INLA deliver substantial computational gains over HMC, with VB typically fastest at small and moderate samples and INLA showing excellent scalability at larger samples. However, INLA can be sensitive to dense spatial weight matrices, showing elevated bias and error dispersion for variance and some regression parameters. Two empirical illustrations underscore these findings: a municipal expenditure reaction function for &amp;amp;Icirc;le-de-France and a hedonic price for housing in Ames, Iowa. Our results yield actionable guidance. HMC remains a gold standard for accuracy when computation permits; VB is a strong, scalable default; and INLA is attractive for large samples provided the weight matrix is not overly dense. These insights help practitioners select Bayesian tools aligned with data size, spatial neighborhood structure, and time constraints.</p>
	]]></content:encoded>

	<dc:title>Choosing Right Bayesian Tools: A Comparative Study of Modern Bayesian Methods in Spatial Econometric Models</dc:title>
			<dc:creator>Yuheng Ling</dc:creator>
			<dc:creator>Julie Le Gallo</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040049</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-12-04</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-12-04</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>49</prism:startingPage>
		<prism:doi>10.3390/econometrics13040049</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/49</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/48">

	<title>Econometrics, Vol. 13, Pages 48: Construction and Applications of a Composite Model Based on Skew-Normal and Skew-t Distributions</title>
	<link>https://www.mdpi.com/2225-1146/13/4/48</link>
	<description>Financial return distributions often exhibit central asymmetry and heavy-tailed extremes, challenging standard parametric models. We propose a novel composite distribution integrating a skew-normal center with skew-t tails, partitioning the support into three regions with smooth junctions. The skew-normal component captures moderate central asymmetry, while the skew-t tails model extreme events with power-law decay, with tail weights determined by continuity constraints and thresholds selected via Hill plots. Monte Carlo simulations show that the composite model achieves superior global fit, lower-tail KS statistics, and stable parameter estimation compared with skew-normal and skew-t benchmarks. We further conduct simulation-based and empirical backtesting of risk measures, including Value-at-Risk (VaR) and Expected Shortfall (ES), using generated datasets and 2083 TSLA daily log returns (2017&amp;amp;ndash;2025), demonstrating accurate tail risk capture and reliable risk forecasts. Empirical fitting also yields improved log-likelihood and diagnostic measures (P&amp;amp;ndash;P, Q&amp;amp;ndash;Q, and negative log P&amp;amp;ndash;P plots). Overall, the proposed composite distribution provides a flexible theoretically grounded framework for modeling asymmetric and heavy-tailed financial returns, with practical advantages in risk assessment, extreme event analysis, and financial risk management.</description>
	<pubDate>2025-12-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 48: Construction and Applications of a Composite Model Based on Skew-Normal and Skew-t Distributions</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/48">doi: 10.3390/econometrics13040048</a></p>
	<p>Authors:
		Jingjie Yuan
		Zuoquan Zhang
		</p>
	<p>Financial return distributions often exhibit central asymmetry and heavy-tailed extremes, challenging standard parametric models. We propose a novel composite distribution integrating a skew-normal center with skew-t tails, partitioning the support into three regions with smooth junctions. The skew-normal component captures moderate central asymmetry, while the skew-t tails model extreme events with power-law decay, with tail weights determined by continuity constraints and thresholds selected via Hill plots. Monte Carlo simulations show that the composite model achieves superior global fit, lower-tail KS statistics, and stable parameter estimation compared with skew-normal and skew-t benchmarks. We further conduct simulation-based and empirical backtesting of risk measures, including Value-at-Risk (VaR) and Expected Shortfall (ES), using generated datasets and 2083 TSLA daily log returns (2017&amp;amp;ndash;2025), demonstrating accurate tail risk capture and reliable risk forecasts. Empirical fitting also yields improved log-likelihood and diagnostic measures (P&amp;amp;ndash;P, Q&amp;amp;ndash;Q, and negative log P&amp;amp;ndash;P plots). Overall, the proposed composite distribution provides a flexible theoretically grounded framework for modeling asymmetric and heavy-tailed financial returns, with practical advantages in risk assessment, extreme event analysis, and financial risk management.</p>
	]]></content:encoded>

	<dc:title>Construction and Applications of a Composite Model Based on Skew-Normal and Skew-t Distributions</dc:title>
			<dc:creator>Jingjie Yuan</dc:creator>
			<dc:creator>Zuoquan Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040048</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-12-02</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-12-02</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>48</prism:startingPage>
		<prism:doi>10.3390/econometrics13040048</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/48</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/47">

	<title>Econometrics, Vol. 13, Pages 47: Robust Learning of Tail Dependence</title>
	<link>https://www.mdpi.com/2225-1146/13/4/47</link>
	<description>Accurate estimation of tail dependence is difficult due to model misspecification and data contamination. This paper introduces a class of minimum f-divergence estimators for the tail dependence coefficient that unifies robust estimation with extreme value theory. I establish strong consistency and derive the semiparametric efficiency bound for estimating extremal dependence, the extremal Cram&amp;amp;eacute;r&amp;amp;ndash;Rao bound. I show that the estimator achieves this bound if and only if the second derivative of its generating function at unity equals one, formally characterizing the trade-off between robustness and asymptotic efficiency. An empirical application to systemic risk in the US banking sector shows that the robust Hellinger estimator provides stability during crises, while the efficient maximum likelihood estimator offers precision during normal periods.</description>
	<pubDate>2025-11-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 47: Robust Learning of Tail Dependence</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/47">doi: 10.3390/econometrics13040047</a></p>
	<p>Authors:
		Omid M. Ardakani
		</p>
	<p>Accurate estimation of tail dependence is difficult due to model misspecification and data contamination. This paper introduces a class of minimum f-divergence estimators for the tail dependence coefficient that unifies robust estimation with extreme value theory. I establish strong consistency and derive the semiparametric efficiency bound for estimating extremal dependence, the extremal Cram&amp;amp;eacute;r&amp;amp;ndash;Rao bound. I show that the estimator achieves this bound if and only if the second derivative of its generating function at unity equals one, formally characterizing the trade-off between robustness and asymptotic efficiency. An empirical application to systemic risk in the US banking sector shows that the robust Hellinger estimator provides stability during crises, while the efficient maximum likelihood estimator offers precision during normal periods.</p>
	]]></content:encoded>

	<dc:title>Robust Learning of Tail Dependence</dc:title>
			<dc:creator>Omid M. Ardakani</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040047</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-11-20</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-11-20</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>47</prism:startingPage>
		<prism:doi>10.3390/econometrics13040047</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/47</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/46">

	<title>Econometrics, Vol. 13, Pages 46: A Model of the Impact of Government Revenue and Quality of Governance on the Pupil/Teacher Ratio for Every Country in the World</title>
	<link>https://www.mdpi.com/2225-1146/13/4/46</link>
	<description>This study explores the relationship between government revenue per capita, governance quality, and the supply of teachers&amp;amp;mdash;an indicator under Sustainable Development Goal 4 (Target 4.c). Using annual data from 217 countries spanning 1980 to 2022, we apply a non-linear panel model with a logistic function that incorporates country-specific governance measures. Our findings reveal that increased government revenue is positively associated with teacher supply, and that improvements in governance amplify this effect. The model provides predictive insights into how changes in revenue may influence progress toward education-related SDG targets at the country level.</description>
	<pubDate>2025-11-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 46: A Model of the Impact of Government Revenue and Quality of Governance on the Pupil/Teacher Ratio for Every Country in the World</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/46">doi: 10.3390/econometrics13040046</a></p>
	<p>Authors:
		Stephen G. Hall
		Bernadette O’Hare
		</p>
	<p>This study explores the relationship between government revenue per capita, governance quality, and the supply of teachers&amp;amp;mdash;an indicator under Sustainable Development Goal 4 (Target 4.c). Using annual data from 217 countries spanning 1980 to 2022, we apply a non-linear panel model with a logistic function that incorporates country-specific governance measures. Our findings reveal that increased government revenue is positively associated with teacher supply, and that improvements in governance amplify this effect. The model provides predictive insights into how changes in revenue may influence progress toward education-related SDG targets at the country level.</p>
	]]></content:encoded>

	<dc:title>A Model of the Impact of Government Revenue and Quality of Governance on the Pupil/Teacher Ratio for Every Country in the World</dc:title>
			<dc:creator>Stephen G. Hall</dc:creator>
			<dc:creator>Bernadette O’Hare</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040046</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-11-19</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-11-19</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>46</prism:startingPage>
		<prism:doi>10.3390/econometrics13040046</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/46</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/45">

	<title>Econometrics, Vol. 13, Pages 45: Dynamic Volatility Spillovers Among G20 Economies During the Global Crisis Periods&amp;mdash;A TVP VAR Analysis</title>
	<link>https://www.mdpi.com/2225-1146/13/4/45</link>
	<description>Previous research on financial contagion has mostly looked at volatility spillovers using static or fixed parameter models. These models don&amp;amp;rsquo;t always take into account how inter-market links change and depend on frequency during big crises. This study fills in that gap by looking at how changes in volatility in the G20 equity markets affected four big global events: the global financial crisis of 2008, the European debt crisis, the COVID-19 pandemic, and the Russia-Ukraine war. The study uses a Time-Varying Parameter Vector Autoregression (TVP VAR) framework along with the Barun&amp;amp;iacute;k-K&amp;amp;#345;ehl&amp;amp;iacute;k frequency domain spillover measure to look at how connectedness changes over short-term (1&amp;amp;ndash;5 days) and long-term (5&amp;amp;ndash;Inf days) time periods. The results show that systemic connectedness changes a lot during crises. For example, the Total Connectedness Index (TCI) was 24&amp;amp;ndash;25 percent during the GFC and EDC, 34 percent during COVID-19, and a huge jump to 60 percent during the Russia-Ukraine war. During the global financial crisis and the war between Russia and Ukraine, the US constantly emerged as the largest transmitter. During the European debt crisis, on the other hand, emerging markets like Turkey, South Africa, and Japan acted as net transmitters. During all crisis times, short-term spillovers are the most common. This shows how important high-frequency volatility transmission is. This study is different from others because it uses both time-varying and frequency domain views. This gives us a better idea of how crises change the way global finances are linked. The results are very important for policymakers and investors because they show how important it is to coordinate risk management, improve market safety, and make systemic stress testing better in a global financial world.</description>
	<pubDate>2025-11-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 45: Dynamic Volatility Spillovers Among G20 Economies During the Global Crisis Periods&amp;mdash;A TVP VAR Analysis</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/45">doi: 10.3390/econometrics13040045</a></p>
	<p>Authors:
		Himanshu Goel
		Parminder Bajaj
		Monika Agarwal
		Abdallah AlKhawaja
		Suzan Dsouza
		</p>
	<p>Previous research on financial contagion has mostly looked at volatility spillovers using static or fixed parameter models. These models don&amp;amp;rsquo;t always take into account how inter-market links change and depend on frequency during big crises. This study fills in that gap by looking at how changes in volatility in the G20 equity markets affected four big global events: the global financial crisis of 2008, the European debt crisis, the COVID-19 pandemic, and the Russia-Ukraine war. The study uses a Time-Varying Parameter Vector Autoregression (TVP VAR) framework along with the Barun&amp;amp;iacute;k-K&amp;amp;#345;ehl&amp;amp;iacute;k frequency domain spillover measure to look at how connectedness changes over short-term (1&amp;amp;ndash;5 days) and long-term (5&amp;amp;ndash;Inf days) time periods. The results show that systemic connectedness changes a lot during crises. For example, the Total Connectedness Index (TCI) was 24&amp;amp;ndash;25 percent during the GFC and EDC, 34 percent during COVID-19, and a huge jump to 60 percent during the Russia-Ukraine war. During the global financial crisis and the war between Russia and Ukraine, the US constantly emerged as the largest transmitter. During the European debt crisis, on the other hand, emerging markets like Turkey, South Africa, and Japan acted as net transmitters. During all crisis times, short-term spillovers are the most common. This shows how important high-frequency volatility transmission is. This study is different from others because it uses both time-varying and frequency domain views. This gives us a better idea of how crises change the way global finances are linked. The results are very important for policymakers and investors because they show how important it is to coordinate risk management, improve market safety, and make systemic stress testing better in a global financial world.</p>
	]]></content:encoded>

	<dc:title>Dynamic Volatility Spillovers Among G20 Economies During the Global Crisis Periods&amp;amp;mdash;A TVP VAR Analysis</dc:title>
			<dc:creator>Himanshu Goel</dc:creator>
			<dc:creator>Parminder Bajaj</dc:creator>
			<dc:creator>Monika Agarwal</dc:creator>
			<dc:creator>Abdallah AlKhawaja</dc:creator>
			<dc:creator>Suzan Dsouza</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040045</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-11-14</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-11-14</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>45</prism:startingPage>
		<prism:doi>10.3390/econometrics13040045</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/45</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/44">

	<title>Econometrics, Vol. 13, Pages 44: Dual Effects of Education Expenditure on Life Expectancy: An Empirical Assessment of Crowding-Out and Complementarity</title>
	<link>https://www.mdpi.com/2225-1146/13/4/44</link>
	<description>This study investigates whether public education expenditure crowds out or complements health investment in influencing life expectancy across 158 countries from 1990 to 2023. Graphical analysis shows that in high-income countries, health expenditure consistently exceeds education spending, reflecting mature complementarity between the two sectors. In contrast, in low- and middle-income countries, education spending often surpasses health expenditure, suggesting potential short-term crowding-out risks where fiscal resources are limited. Using Fully Modified Ordinary Least Squares (FMOLS), Two-Stage Least Squares (2SLS), and bootstrap estimation, the results reveal a predominantly crowding-in relationship that varies by income level. Bootstrap estimates from the life expectancy Model indicate that the coefficient of education expenditure (eexp) is &amp;amp;minus;0.003 for high-income countries (HICs), 0.005 for upper-middle-income countries (UMCs), 0.045 *** for lower-middle-income countries (LMCs), and &amp;amp;minus;0.010 for low-income countries (LICs). Bootstrap estimates show that the effect of education expenditure on life expectancy is insignificant in high- and upper-middle-income countries, strongly positive in lower-middle-income countries, and negative but insignificant in low-income countries. The coefficient of government health expenditure (dgghe) is 0.007 ***, 0.007 ***, 0.017 ***, and 0.035 *** for HICs, UMCs, LMCs, and LICs, respectively. Government health expenditure exerts a consistently positive and highly significant effect across all groups, strongest in low- and lower-middle-income countries. Sobel&amp;amp;rsquo;s z-statistics (9.62, 8.70, 7.68, and 3.07) confirm a significant indirect effect of education on life expectancy through health expenditure. Health expenditure and GDP per capita enhance life expectancy, while inequality and inflation reduce it. Overall, education and health investments are mutually reinforcing but depend on fiscal capacity and governance quality, necessitating coordinated fiscal frameworks for sustainable human development.</description>
	<pubDate>2025-11-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 44: Dual Effects of Education Expenditure on Life Expectancy: An Empirical Assessment of Crowding-Out and Complementarity</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/44">doi: 10.3390/econometrics13040044</a></p>
	<p>Authors:
		Jayadevan CM
		Nam Trung Hoang
		Subba Reddy Yarram
		</p>
	<p>This study investigates whether public education expenditure crowds out or complements health investment in influencing life expectancy across 158 countries from 1990 to 2023. Graphical analysis shows that in high-income countries, health expenditure consistently exceeds education spending, reflecting mature complementarity between the two sectors. In contrast, in low- and middle-income countries, education spending often surpasses health expenditure, suggesting potential short-term crowding-out risks where fiscal resources are limited. Using Fully Modified Ordinary Least Squares (FMOLS), Two-Stage Least Squares (2SLS), and bootstrap estimation, the results reveal a predominantly crowding-in relationship that varies by income level. Bootstrap estimates from the life expectancy Model indicate that the coefficient of education expenditure (eexp) is &amp;amp;minus;0.003 for high-income countries (HICs), 0.005 for upper-middle-income countries (UMCs), 0.045 *** for lower-middle-income countries (LMCs), and &amp;amp;minus;0.010 for low-income countries (LICs). Bootstrap estimates show that the effect of education expenditure on life expectancy is insignificant in high- and upper-middle-income countries, strongly positive in lower-middle-income countries, and negative but insignificant in low-income countries. The coefficient of government health expenditure (dgghe) is 0.007 ***, 0.007 ***, 0.017 ***, and 0.035 *** for HICs, UMCs, LMCs, and LICs, respectively. Government health expenditure exerts a consistently positive and highly significant effect across all groups, strongest in low- and lower-middle-income countries. Sobel&amp;amp;rsquo;s z-statistics (9.62, 8.70, 7.68, and 3.07) confirm a significant indirect effect of education on life expectancy through health expenditure. Health expenditure and GDP per capita enhance life expectancy, while inequality and inflation reduce it. Overall, education and health investments are mutually reinforcing but depend on fiscal capacity and governance quality, necessitating coordinated fiscal frameworks for sustainable human development.</p>
	]]></content:encoded>

	<dc:title>Dual Effects of Education Expenditure on Life Expectancy: An Empirical Assessment of Crowding-Out and Complementarity</dc:title>
			<dc:creator>Jayadevan CM</dc:creator>
			<dc:creator>Nam Trung Hoang</dc:creator>
			<dc:creator>Subba Reddy Yarram</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040044</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-11-14</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-11-14</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>44</prism:startingPage>
		<prism:doi>10.3390/econometrics13040044</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/44</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/43">

	<title>Econometrics, Vol. 13, Pages 43: Fractional Probit with Cross-Sectional Volatility: Bridging Heteroskedastic Probit and Fractional Response Models</title>
	<link>https://www.mdpi.com/2225-1146/13/4/43</link>
	<description>This paper introduces a new econometric framework for modeling fractional outcomes bounded between zero and one. We propose the Fractional Probit with Cross-Sectional Volatility (FPCV), which specifies the conditional mean through a probit link and allows the conditional variance to depend on observable heterogeneity. The model extends heteroskedastic probit methods to fractional responses and unifies them with existing approaches for proportions. Monte Carlo simulations demonstrate that the FPCV estimator achieves lower bias, more reliable inference, and superior predictive accuracy compared with standard alternatives. The framework is particularly suited to empirical settings where fractional outcomes display systematic variability across units, such as participation rates, market shares, health indices, financial ratios, and vote shares. By modeling both mean and variance, FPCV provides interpretable measures of volatility and offers a robust tool for empirical analysis and policy evaluation.</description>
	<pubDate>2025-11-03</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 43: Fractional Probit with Cross-Sectional Volatility: Bridging Heteroskedastic Probit and Fractional Response Models</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/43">doi: 10.3390/econometrics13040043</a></p>
	<p>Authors:
		Songsak Sriboonchitta
		Aree Wiboonpongse
		Jittaporn Sriboonjit
		Woraphon Yamaka
		</p>
	<p>This paper introduces a new econometric framework for modeling fractional outcomes bounded between zero and one. We propose the Fractional Probit with Cross-Sectional Volatility (FPCV), which specifies the conditional mean through a probit link and allows the conditional variance to depend on observable heterogeneity. The model extends heteroskedastic probit methods to fractional responses and unifies them with existing approaches for proportions. Monte Carlo simulations demonstrate that the FPCV estimator achieves lower bias, more reliable inference, and superior predictive accuracy compared with standard alternatives. The framework is particularly suited to empirical settings where fractional outcomes display systematic variability across units, such as participation rates, market shares, health indices, financial ratios, and vote shares. By modeling both mean and variance, FPCV provides interpretable measures of volatility and offers a robust tool for empirical analysis and policy evaluation.</p>
	]]></content:encoded>

	<dc:title>Fractional Probit with Cross-Sectional Volatility: Bridging Heteroskedastic Probit and Fractional Response Models</dc:title>
			<dc:creator>Songsak Sriboonchitta</dc:creator>
			<dc:creator>Aree Wiboonpongse</dc:creator>
			<dc:creator>Jittaporn Sriboonjit</dc:creator>
			<dc:creator>Woraphon Yamaka</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040043</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-11-03</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-11-03</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Communication</prism:section>
	<prism:startingPage>43</prism:startingPage>
		<prism:doi>10.3390/econometrics13040043</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/43</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/42">

	<title>Econometrics, Vol. 13, Pages 42: Counterfactual Duration Analysis</title>
	<link>https://www.mdpi.com/2225-1146/13/4/42</link>
	<description>This article introduces new counterfactual standardization techniques for comparing duration distributions subject to random censoring through counterfactual decompositions. The counterfactual distribution of one population relative to another is computed after estimating the conditional distribution, using either a semiparametric or a nonparametric specification. We consider both the semiparametric proportional hazard model and a fully nonparametric partition-based estimator. The finite-sample performance of the proposed methods is evaluated through Monte Carlo experiments. We also illustrate the methodology with an application to unemployment duration in Spain during the period between 2004 and 2007, focusing on gender differences. The results indicate that observable characteristics account for only a small portion of the observed gap.</description>
	<pubDate>2025-10-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 42: Counterfactual Duration Analysis</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/42">doi: 10.3390/econometrics13040042</a></p>
	<p>Authors:
		Miguel A. Delgado
		Andrés García-Suaza
		</p>
	<p>This article introduces new counterfactual standardization techniques for comparing duration distributions subject to random censoring through counterfactual decompositions. The counterfactual distribution of one population relative to another is computed after estimating the conditional distribution, using either a semiparametric or a nonparametric specification. We consider both the semiparametric proportional hazard model and a fully nonparametric partition-based estimator. The finite-sample performance of the proposed methods is evaluated through Monte Carlo experiments. We also illustrate the methodology with an application to unemployment duration in Spain during the period between 2004 and 2007, focusing on gender differences. The results indicate that observable characteristics account for only a small portion of the observed gap.</p>
	]]></content:encoded>

	<dc:title>Counterfactual Duration Analysis</dc:title>
			<dc:creator>Miguel A. Delgado</dc:creator>
			<dc:creator>Andrés García-Suaza</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040042</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-10-30</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-10-30</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>42</prism:startingPage>
		<prism:doi>10.3390/econometrics13040042</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/42</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/41">

	<title>Econometrics, Vol. 13, Pages 41: Consistency of the OLS Bootstrap for Independently but Not-Identically Distributed Data: A Permutation Perspective</title>
	<link>https://www.mdpi.com/2225-1146/13/4/41</link>
	<description>This paper introduces a new approach to proving bootstrap consistency based upon the distribution of permutation statistics, using it to derive results covering fundamentally not-identically distributed groups of data, in which average moments do not converge to anything, with moment conditions that are less demanding than earlier results for either identically distributed or not-identically distributed data.</description>
	<pubDate>2025-10-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 41: Consistency of the OLS Bootstrap for Independently but Not-Identically Distributed Data: A Permutation Perspective</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/41">doi: 10.3390/econometrics13040041</a></p>
	<p>Authors:
		Alwyn Young
		</p>
	<p>This paper introduces a new approach to proving bootstrap consistency based upon the distribution of permutation statistics, using it to derive results covering fundamentally not-identically distributed groups of data, in which average moments do not converge to anything, with moment conditions that are less demanding than earlier results for either identically distributed or not-identically distributed data.</p>
	]]></content:encoded>

	<dc:title>Consistency of the OLS Bootstrap for Independently but Not-Identically Distributed Data: A Permutation Perspective</dc:title>
			<dc:creator>Alwyn Young</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040041</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-10-23</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-10-23</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>41</prism:startingPage>
		<prism:doi>10.3390/econometrics13040041</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/41</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/40">

	<title>Econometrics, Vol. 13, Pages 40: VAR Models with an Index Structure: A Survey with New Results</title>
	<link>https://www.mdpi.com/2225-1146/13/4/40</link>
	<description>The main aim of this paper is to review recent advances in the multivariate autoregressive index model [MAI] and their applications to economic and financial time series. MAI has recently gained momentum because it can be seen as a link between two popular but distinct multivariate time series approaches: vector autoregressive modeling [VAR] and the dynamic factor model [DFM]. Indeed, on the one hand, MAI is a VAR model with a peculiar reduced-rank structure that can lead to a significant dimension reduction; on the other hand, it allows for the identification of common components and common shocks in a similar way as the DFM. Our focus is on recent developments of the MAI, which include extending the original model with individual autoregressive structures, stochastic volatility, time-varying parameters, high-dimensionality, and co-integration. In addition, some gaps in the literature are filled by providing new results on the representation theory underlying previous contributions, and a novel model is provided.</description>
	<pubDate>2025-10-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 40: VAR Models with an Index Structure: A Survey with New Results</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/40">doi: 10.3390/econometrics13040040</a></p>
	<p>Authors:
		Gianluca Cubadda
		</p>
	<p>The main aim of this paper is to review recent advances in the multivariate autoregressive index model [MAI] and their applications to economic and financial time series. MAI has recently gained momentum because it can be seen as a link between two popular but distinct multivariate time series approaches: vector autoregressive modeling [VAR] and the dynamic factor model [DFM]. Indeed, on the one hand, MAI is a VAR model with a peculiar reduced-rank structure that can lead to a significant dimension reduction; on the other hand, it allows for the identification of common components and common shocks in a similar way as the DFM. Our focus is on recent developments of the MAI, which include extending the original model with individual autoregressive structures, stochastic volatility, time-varying parameters, high-dimensionality, and co-integration. In addition, some gaps in the literature are filled by providing new results on the representation theory underlying previous contributions, and a novel model is provided.</p>
	]]></content:encoded>

	<dc:title>VAR Models with an Index Structure: A Survey with New Results</dc:title>
			<dc:creator>Gianluca Cubadda</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040040</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-10-22</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-10-22</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>40</prism:startingPage>
		<prism:doi>10.3390/econometrics13040040</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/40</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/39">

	<title>Econometrics, Vol. 13, Pages 39: Demonstrating That the Autoregressive Distributed Lag Bounds Test Can Detect a Long-Run Levels Relationship When the Dependent Variable Is I(0)</title>
	<link>https://www.mdpi.com/2225-1146/13/4/39</link>
	<description>The autoregressive distributed lag bounds t-test and F-test for a long-run relationship that allows level variables to be either I(1) or I(0) is widely used in the literature. However, a long-run levels relationship cannot be detected when the dependent variable is I0, because both tests will always reject their null hypotheses. It has subsequently been argued that a third test determines whether the dependent variable is I(1), such that when all three tests reject their null hypotheses, a cointegrating equation with an I(1) dependent variable is identified. It is argued that all three tests rejecting their null hypotheses rules out the possibility that the dependent variable is I(0), implying that the three tests cannot detect an equilibrium when the dependent variable is I(0). Our first contribution is to demonstrate and explain that rejection of all three tests&amp;amp;rsquo; null hypotheses can also indicate an equilibrium when the dependent variable is I(0) and not only when it is I(1). Our second contribution is to produce previously unavailable critical values for the third test in the cases where an intercept or trend is restricted into the equilibrium.</description>
	<pubDate>2025-10-22</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 39: Demonstrating That the Autoregressive Distributed Lag Bounds Test Can Detect a Long-Run Levels Relationship When the Dependent Variable Is I(0)</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/39">doi: 10.3390/econometrics13040039</a></p>
	<p>Authors:
		Chris Stewart
		</p>
	<p>The autoregressive distributed lag bounds t-test and F-test for a long-run relationship that allows level variables to be either I(1) or I(0) is widely used in the literature. However, a long-run levels relationship cannot be detected when the dependent variable is I0, because both tests will always reject their null hypotheses. It has subsequently been argued that a third test determines whether the dependent variable is I(1), such that when all three tests reject their null hypotheses, a cointegrating equation with an I(1) dependent variable is identified. It is argued that all three tests rejecting their null hypotheses rules out the possibility that the dependent variable is I(0), implying that the three tests cannot detect an equilibrium when the dependent variable is I(0). Our first contribution is to demonstrate and explain that rejection of all three tests&amp;amp;rsquo; null hypotheses can also indicate an equilibrium when the dependent variable is I(0) and not only when it is I(1). Our second contribution is to produce previously unavailable critical values for the third test in the cases where an intercept or trend is restricted into the equilibrium.</p>
	]]></content:encoded>

	<dc:title>Demonstrating That the Autoregressive Distributed Lag Bounds Test Can Detect a Long-Run Levels Relationship When the Dependent Variable Is I(0)</dc:title>
			<dc:creator>Chris Stewart</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040039</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-10-22</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-10-22</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>39</prism:startingPage>
		<prism:doi>10.3390/econometrics13040039</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/39</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/4/38">

	<title>Econometrics, Vol. 13, Pages 38: Vis Inertiae and Statistical Inference: A Review of Difference-in-Differences Methods Employed in Economics and Other Subjects</title>
	<link>https://www.mdpi.com/2225-1146/13/4/38</link>
	<description>Difference in Differences (DiD) is a useful statistical technique employed by researchers to estimate the effects of exogenous events on the outcome of some response variables in random samples of treated units (i.e., units exposed to the event) ideally drawn from an infinite population. The term &amp;amp;ldquo;effect&amp;amp;rdquo; should be understood as the discrepancy between the post-event realisation of the response and the hypothetical realisation of that same outcome for the same treated units in the absence of the event. This theoretical discrepancy is clearly unobservable. To circumvent the implicit missing variable problem, DiD methods utilise the realisations of the response variable observed in comparable random samples of untreated units. The latter are samples of units drawn from the same population, but they are not exposed to the event under investigation. They function as the control or comparison group and serve as proxies for the non-existent untreated realisations of the responses in treated units during post-treatment periods. In summary, the DiD model posits that, in the absence of intervention and under specific conditions, treated units would exhibit behaviours that are indistinguishable from those of control or untreated units during the post-treatment periods. For the purpose of estimation, the method employs a combination of before&amp;amp;ndash;after and treatment&amp;amp;ndash;control group comparisons. The event that affects the response variables is referred to as &amp;amp;ldquo;treatment.&amp;amp;rdquo; However, it could also be referred to as &amp;amp;ldquo;causal factor&amp;amp;rdquo; to emphasise that, in the DiD approach, the objective is not to estimate a mere statistical association among variables. This review introduces the DiD techniques for researchers in economics, public policy, health research, management, environmental analysis, and other fields. It commences with the rudimentary methods employed to estimate the so-called Average Treatment Effect upon Treated (ATET) in a two-period and two-group case and subsequently addresses numerous issues that arise in a multi-unit and multi-period context. A particular focus is placed on the statistical assumptions necessary for a precise delineation of the identification process of the cause&amp;amp;ndash;effect relationship in the multi-period case. These assumptions include the parallel trend hypothesis, the no-anticipation assumption, and the SUTVA assumption. In the multi-period case, both the homogeneous and heterogeneous scenarios are taken into consideration. The homogeneous scenario refers to the situation in which the treated units are initially treated in the same periods. In contrast, the heterogeneous scenario involves the treatment of treated units in different periods. A portion of the presentation will be allocated to the developments associated with the DiD techniques that can be employed in the context of data clustering or spatio-temporal dependence. The present review includes a concise exposition of some policy-oriented papers that incorporate applications of DiD. The areas of focus encompass income taxation, migration, regulation, and environmental management.</description>
	<pubDate>2025-09-30</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 38: Vis Inertiae and Statistical Inference: A Review of Difference-in-Differences Methods Employed in Economics and Other Subjects</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/4/38">doi: 10.3390/econometrics13040038</a></p>
	<p>Authors:
		Bruno Paolo Bosco
		Paolo Maranzano
		</p>
	<p>Difference in Differences (DiD) is a useful statistical technique employed by researchers to estimate the effects of exogenous events on the outcome of some response variables in random samples of treated units (i.e., units exposed to the event) ideally drawn from an infinite population. The term &amp;amp;ldquo;effect&amp;amp;rdquo; should be understood as the discrepancy between the post-event realisation of the response and the hypothetical realisation of that same outcome for the same treated units in the absence of the event. This theoretical discrepancy is clearly unobservable. To circumvent the implicit missing variable problem, DiD methods utilise the realisations of the response variable observed in comparable random samples of untreated units. The latter are samples of units drawn from the same population, but they are not exposed to the event under investigation. They function as the control or comparison group and serve as proxies for the non-existent untreated realisations of the responses in treated units during post-treatment periods. In summary, the DiD model posits that, in the absence of intervention and under specific conditions, treated units would exhibit behaviours that are indistinguishable from those of control or untreated units during the post-treatment periods. For the purpose of estimation, the method employs a combination of before&amp;amp;ndash;after and treatment&amp;amp;ndash;control group comparisons. The event that affects the response variables is referred to as &amp;amp;ldquo;treatment.&amp;amp;rdquo; However, it could also be referred to as &amp;amp;ldquo;causal factor&amp;amp;rdquo; to emphasise that, in the DiD approach, the objective is not to estimate a mere statistical association among variables. This review introduces the DiD techniques for researchers in economics, public policy, health research, management, environmental analysis, and other fields. It commences with the rudimentary methods employed to estimate the so-called Average Treatment Effect upon Treated (ATET) in a two-period and two-group case and subsequently addresses numerous issues that arise in a multi-unit and multi-period context. A particular focus is placed on the statistical assumptions necessary for a precise delineation of the identification process of the cause&amp;amp;ndash;effect relationship in the multi-period case. These assumptions include the parallel trend hypothesis, the no-anticipation assumption, and the SUTVA assumption. In the multi-period case, both the homogeneous and heterogeneous scenarios are taken into consideration. The homogeneous scenario refers to the situation in which the treated units are initially treated in the same periods. In contrast, the heterogeneous scenario involves the treatment of treated units in different periods. A portion of the presentation will be allocated to the developments associated with the DiD techniques that can be employed in the context of data clustering or spatio-temporal dependence. The present review includes a concise exposition of some policy-oriented papers that incorporate applications of DiD. The areas of focus encompass income taxation, migration, regulation, and environmental management.</p>
	]]></content:encoded>

	<dc:title>Vis Inertiae and Statistical Inference: A Review of Difference-in-Differences Methods Employed in Economics and Other Subjects</dc:title>
			<dc:creator>Bruno Paolo Bosco</dc:creator>
			<dc:creator>Paolo Maranzano</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13040038</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-09-30</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-09-30</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Review</prism:section>
	<prism:startingPage>38</prism:startingPage>
		<prism:doi>10.3390/econometrics13040038</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/4/38</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/37">

	<title>Econometrics, Vol. 13, Pages 37: Re-Examining Confidence Intervals for Ratios of Parameters</title>
	<link>https://www.mdpi.com/2225-1146/13/3/37</link>
	<description>This paper considers the problem of constructing confidence intervals (CIs) for nonlinear functions of parameters, particularly ratios of parameters a common issue in econometrics and statistics. Classical CIs (such as the Delta method and the Fieller method) often fail in small samples due to biased parameter estimators and skewed distributions. We extended the Delta method using the Edgeworth expansion to correct for skewness due to estimated parameters having non-normal and asymmetric distributions. The resulting bias-corrected confidence intervals are easy to compute and have a good coverage probability that converges to the nominal level at a rate of O(n&amp;amp;minus;1/2) where n is the sample size. We also propose bias-corrected estimators based on second-order Taylor expansions, aligning with the &amp;amp;ldquo;almost unbiased ratio estimator&amp;amp;rdquo; . We then correct the CIs according to the Delta method and the Edgeworth expansion. Thus, our new methods for constructing confidence intervals account for both the bias and the skewness of the distribution of the nonlinear functions of parameters. We conduct a simulation study to compare the confidence intervals of our new methods with the two classical methods. The methods evaluated include Fieller&amp;amp;rsquo;s interval, Delta with and without the bias correction interval, and Edgeworth expansion with and without the bias correction interval. The results show that our new methods with bias correction generally have good performance in terms of controlling the coverage probabilities and average length intervals. They should be recommended for constructing confidence intervals for nonlinear functions of estimated parameters.</description>
	<pubDate>2025-09-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 37: Re-Examining Confidence Intervals for Ratios of Parameters</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/37">doi: 10.3390/econometrics13030037</a></p>
	<p>Authors:
		Zaka Ratsimalahelo
		</p>
	<p>This paper considers the problem of constructing confidence intervals (CIs) for nonlinear functions of parameters, particularly ratios of parameters a common issue in econometrics and statistics. Classical CIs (such as the Delta method and the Fieller method) often fail in small samples due to biased parameter estimators and skewed distributions. We extended the Delta method using the Edgeworth expansion to correct for skewness due to estimated parameters having non-normal and asymmetric distributions. The resulting bias-corrected confidence intervals are easy to compute and have a good coverage probability that converges to the nominal level at a rate of O(n&amp;amp;minus;1/2) where n is the sample size. We also propose bias-corrected estimators based on second-order Taylor expansions, aligning with the &amp;amp;ldquo;almost unbiased ratio estimator&amp;amp;rdquo; . We then correct the CIs according to the Delta method and the Edgeworth expansion. Thus, our new methods for constructing confidence intervals account for both the bias and the skewness of the distribution of the nonlinear functions of parameters. We conduct a simulation study to compare the confidence intervals of our new methods with the two classical methods. The methods evaluated include Fieller&amp;amp;rsquo;s interval, Delta with and without the bias correction interval, and Edgeworth expansion with and without the bias correction interval. The results show that our new methods with bias correction generally have good performance in terms of controlling the coverage probabilities and average length intervals. They should be recommended for constructing confidence intervals for nonlinear functions of estimated parameters.</p>
	]]></content:encoded>

	<dc:title>Re-Examining Confidence Intervals for Ratios of Parameters</dc:title>
			<dc:creator>Zaka Ratsimalahelo</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030037</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-09-20</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-09-20</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>37</prism:startingPage>
		<prism:doi>10.3390/econometrics13030037</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/37</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/36">

	<title>Econometrics, Vol. 13, Pages 36: Integration and Risk Transmission Dynamics Between Bitcoin, Currency Pairs, and Traditional Financial Assets in South Africa</title>
	<link>https://www.mdpi.com/2225-1146/13/3/36</link>
	<description>This study explores the new insights into the integration and dynamic asymmetric volatility risk spillovers between Bitcoin, currency pairs (USD/ZAR, GBP/ZAR and EUR/ZAR), and traditional financial assets (ALSI, Bond, and Gold) in South Africa using daily data spanning the period from 2010 to 2024 and employing Time-Varying Parameter Vector Autoregression (TVP-VAR) and wavelet coherence. The findings revealed strengthened integration between traditional financial assets and currency pairs, as well as weak integration with BTC/ZAR. Furthermore, BTC/ZAR and traditional financial assets were receivers of shocks, while the currency pairs were transmitters of spillovers. Gold emerged as an attractive investment during periods of inflation or currency devaluation. However, the assets have a total connectedness index of 28.37%, offering a reduced systemic risk. Distinct patterns were observed in the short, medium, and long term in time scales and frequency. There is a diversification benefit and potential hedging strategies due to gold&amp;amp;rsquo;s negative influence on BTC/ZAR. Bitcoin&amp;amp;rsquo;s high volatility and lack of regulatory oversight continue to be deterrents for institutional investors. This study lays a solid foundation for understanding the financial dynamics in South Africa, offering valuable insights for investors and policymakers interested in the intricate linkages between BTC/ZAR, currency pairs, and traditional financial assets, allowing for more targeted policy measures.</description>
	<pubDate>2025-09-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 36: Integration and Risk Transmission Dynamics Between Bitcoin, Currency Pairs, and Traditional Financial Assets in South Africa</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/36">doi: 10.3390/econometrics13030036</a></p>
	<p>Authors:
		Benjamin Mudiangombe Mudiangombe
		John Weirstrass Muteba Mwamba
		</p>
	<p>This study explores the new insights into the integration and dynamic asymmetric volatility risk spillovers between Bitcoin, currency pairs (USD/ZAR, GBP/ZAR and EUR/ZAR), and traditional financial assets (ALSI, Bond, and Gold) in South Africa using daily data spanning the period from 2010 to 2024 and employing Time-Varying Parameter Vector Autoregression (TVP-VAR) and wavelet coherence. The findings revealed strengthened integration between traditional financial assets and currency pairs, as well as weak integration with BTC/ZAR. Furthermore, BTC/ZAR and traditional financial assets were receivers of shocks, while the currency pairs were transmitters of spillovers. Gold emerged as an attractive investment during periods of inflation or currency devaluation. However, the assets have a total connectedness index of 28.37%, offering a reduced systemic risk. Distinct patterns were observed in the short, medium, and long term in time scales and frequency. There is a diversification benefit and potential hedging strategies due to gold&amp;amp;rsquo;s negative influence on BTC/ZAR. Bitcoin&amp;amp;rsquo;s high volatility and lack of regulatory oversight continue to be deterrents for institutional investors. This study lays a solid foundation for understanding the financial dynamics in South Africa, offering valuable insights for investors and policymakers interested in the intricate linkages between BTC/ZAR, currency pairs, and traditional financial assets, allowing for more targeted policy measures.</p>
	]]></content:encoded>

	<dc:title>Integration and Risk Transmission Dynamics Between Bitcoin, Currency Pairs, and Traditional Financial Assets in South Africa</dc:title>
			<dc:creator>Benjamin Mudiangombe Mudiangombe</dc:creator>
			<dc:creator>John Weirstrass Muteba Mwamba</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030036</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-09-19</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-09-19</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>36</prism:startingPage>
		<prism:doi>10.3390/econometrics13030036</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/36</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/35">

	<title>Econometrics, Vol. 13, Pages 35: Forecasting of GDP Growth in the South Caucasian Countries Using Hybrid Ensemble Models</title>
	<link>https://www.mdpi.com/2225-1146/13/3/35</link>
	<description>This study aimed to forecast the gross domestic product (GDP) of the South Caucasian nations (Armenia, Azerbaijan, and Georgia) by scrutinizing the accuracy of various econometric methodologies. This topic is noteworthy considering the significant economic development exhibited by these countries in the context of recovery post COVID-19. The seasonal autoregressive integrated moving average (SARIMA), exponential smoothing state space (ETS) model, neural network autoregressive (NNAR) model, and trigonometric exponential smoothing state space model with Box&amp;amp;ndash;Cox transformation, ARMA errors, and trend and seasonal components (TBATS), together with their feasible hybrid combinations, were employed. The empirical investigation utilized quarterly GDP data at market prices from 1Q-2010 to 2Q-2024. According to the results, the hybrid models significantly outperformed the corresponding single models, handling the linear and nonlinear components of the GDP time series more effectively. Rolling-window cross-validation showed that hybrid ETS-NNAR-TBATS for Armenia, hybrid ETS-NNAR-SARIMA for Azerbaijan, and hybrid ETS-SARIMA for Georgia were the best-performing models. The forecasts also suggest that Georgia is likely to record the strongest GDP growth over the projection horizon, followed by Armenia and Azerbaijan. These findings confirm that hybrid models constitute a reliable technique for forecasting GDP in the South Caucasian countries. This region is not only economically dynamic but also strategically important, with direct implications for policy and regional planning.</description>
	<pubDate>2025-09-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 35: Forecasting of GDP Growth in the South Caucasian Countries Using Hybrid Ensemble Models</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/35">doi: 10.3390/econometrics13030035</a></p>
	<p>Authors:
		Gaetano Perone
		Manuel A. Zambrano-Monserrate
		</p>
	<p>This study aimed to forecast the gross domestic product (GDP) of the South Caucasian nations (Armenia, Azerbaijan, and Georgia) by scrutinizing the accuracy of various econometric methodologies. This topic is noteworthy considering the significant economic development exhibited by these countries in the context of recovery post COVID-19. The seasonal autoregressive integrated moving average (SARIMA), exponential smoothing state space (ETS) model, neural network autoregressive (NNAR) model, and trigonometric exponential smoothing state space model with Box&amp;amp;ndash;Cox transformation, ARMA errors, and trend and seasonal components (TBATS), together with their feasible hybrid combinations, were employed. The empirical investigation utilized quarterly GDP data at market prices from 1Q-2010 to 2Q-2024. According to the results, the hybrid models significantly outperformed the corresponding single models, handling the linear and nonlinear components of the GDP time series more effectively. Rolling-window cross-validation showed that hybrid ETS-NNAR-TBATS for Armenia, hybrid ETS-NNAR-SARIMA for Azerbaijan, and hybrid ETS-SARIMA for Georgia were the best-performing models. The forecasts also suggest that Georgia is likely to record the strongest GDP growth over the projection horizon, followed by Armenia and Azerbaijan. These findings confirm that hybrid models constitute a reliable technique for forecasting GDP in the South Caucasian countries. This region is not only economically dynamic but also strategically important, with direct implications for policy and regional planning.</p>
	]]></content:encoded>

	<dc:title>Forecasting of GDP Growth in the South Caucasian Countries Using Hybrid Ensemble Models</dc:title>
			<dc:creator>Gaetano Perone</dc:creator>
			<dc:creator>Manuel A. Zambrano-Monserrate</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030035</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-09-10</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-09-10</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>35</prism:startingPage>
		<prism:doi>10.3390/econometrics13030035</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/35</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/34">

	<title>Econometrics, Vol. 13, Pages 34: Volatility Analysis of Returns of Financial Assets Using a Bayesian Time-Varying Realized GARCH-It&amp;ocirc; Model</title>
	<link>https://www.mdpi.com/2225-1146/13/3/34</link>
	<description>In a stage of more and more complex and high-frequency financial markets, the volatility analysis is a cornerstone of modern financial econometrics with practical applications in portfolio optimization, derivative pricing, and systematic risk assessment. This paper introduces a novel Bayesian Time-varying Generalized Autoregressive Conditional Heteroskedasticity (BtvGARCH-It&amp;amp;ocirc;) model designed to improve the precision and flexibility of volatility modeling in financial markets. Original GARCH-It&amp;amp;ocirc; models, while effective in capturing realized volatility and intraday patterns, rely on fixed or constant parameters; thus, it is limited to studying structural changes. Our proposed model addresses this restraint by integrating the continuous-time Ito process with a time-varying Bayesian inference to allow parameters to vary over time based on prior beliefs to quantify uncertainty and minimize overfitting, especially in small-sample or high-dimensional settings. Through simulation studies, using sample sizes of N = 100 and N = 200, we find that BtvGARCH-It&amp;amp;ocirc; outperformed original GARCH-It&amp;amp;ocirc; in-sample fit and out-of-sample forecast accuracy based on posterior estimates comparison with true parameter values and forecasting error metrics. For the empirical validation, this model is applied to analyze the volatility of S&amp;amp;amp;P 500 and Bitcoin (BTC) using one-minute length data for S&amp;amp;amp;P 500 (from 3 January 2023 to 31 December 2024) and BTC (from 1 January 2023 to 1 January 2025). This model has potential as a robust tool and a new direction in volatility modeling for financial risk management.</description>
	<pubDate>2025-09-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 34: Volatility Analysis of Returns of Financial Assets Using a Bayesian Time-Varying Realized GARCH-It&amp;ocirc; Model</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/34">doi: 10.3390/econometrics13030034</a></p>
	<p>Authors:
		Pathairat Pastpipatkul
		Htwe Ko
		</p>
	<p>In a stage of more and more complex and high-frequency financial markets, the volatility analysis is a cornerstone of modern financial econometrics with practical applications in portfolio optimization, derivative pricing, and systematic risk assessment. This paper introduces a novel Bayesian Time-varying Generalized Autoregressive Conditional Heteroskedasticity (BtvGARCH-It&amp;amp;ocirc;) model designed to improve the precision and flexibility of volatility modeling in financial markets. Original GARCH-It&amp;amp;ocirc; models, while effective in capturing realized volatility and intraday patterns, rely on fixed or constant parameters; thus, it is limited to studying structural changes. Our proposed model addresses this restraint by integrating the continuous-time Ito process with a time-varying Bayesian inference to allow parameters to vary over time based on prior beliefs to quantify uncertainty and minimize overfitting, especially in small-sample or high-dimensional settings. Through simulation studies, using sample sizes of N = 100 and N = 200, we find that BtvGARCH-It&amp;amp;ocirc; outperformed original GARCH-It&amp;amp;ocirc; in-sample fit and out-of-sample forecast accuracy based on posterior estimates comparison with true parameter values and forecasting error metrics. For the empirical validation, this model is applied to analyze the volatility of S&amp;amp;amp;P 500 and Bitcoin (BTC) using one-minute length data for S&amp;amp;amp;P 500 (from 3 January 2023 to 31 December 2024) and BTC (from 1 January 2023 to 1 January 2025). This model has potential as a robust tool and a new direction in volatility modeling for financial risk management.</p>
	]]></content:encoded>

	<dc:title>Volatility Analysis of Returns of Financial Assets Using a Bayesian Time-Varying Realized GARCH-It&amp;amp;ocirc; Model</dc:title>
			<dc:creator>Pathairat Pastpipatkul</dc:creator>
			<dc:creator>Htwe Ko</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030034</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-09-09</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-09-09</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>34</prism:startingPage>
		<prism:doi>10.3390/econometrics13030034</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/34</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/33">

	<title>Econometrics, Vol. 13, Pages 33: Modelling and Forecasting Financial Volatility with Realized GARCH Model: A Comparative Study of Skew-t Distributions Using GRG and MCMC Methods</title>
	<link>https://www.mdpi.com/2225-1146/13/3/33</link>
	<description>Financial time-series data often exhibit statistically significant skewness and heavy tails, and numerous flexible distributions have been proposed to model them. In the context of the Log-linear Realized GARCH model with Skew-t (ST) distributions, our objective is to explore how the choice of prior distributions in the Adaptive Random Walk Metropolis method and initial parameter values in the Generalized Reduced Gradient (GRG) Solver method affect ST parameter and log-likelihood estimates. An empirical study was conducted using the FTSE 100 index to evaluate model performance. We provide a comprehensive step-by-step tutorial demonstrating how to perform estimation and sensitivity analysis using data tables in Microsoft Excel. Among seven ST distributions&amp;amp;mdash;namely, the asymmetric, epsilon, exponentiated half-logistic, Hansen, Jones&amp;amp;ndash;Faddy, Mittnik&amp;amp;ndash;Paolella, and Rosco&amp;amp;ndash;Jones&amp;amp;ndash;Pewsey distributions&amp;amp;mdash;Hansen&amp;amp;rsquo;s ST distribution is found to be superior. This study also applied the GRG method to estimate new approaches, including Realized Real-Time GARCH, Realized ASHARV, and GARCH@CARR models. An empirical study showed that the GARCH@CARR model with the feedback effect provides the best goodness of fit. Out-of-sample forecasting evaluations further confirm the predictive dominance of models incorporating real-time information, particularly Realized Real-Time GARCH for volatility forecasting and Realized ASHARV for 1% VaR estimation. The findings offer actionable insights for portfolio managers and risk analysts, particularly in improving volatility forecasts and tail-risk assessments during market crises, thereby enhancing risk-adjusted returns and regulatory compliance. Although the GRG method is sensitive to initial values, its presence in the spreadsheet method can be a powerful and promising tool in working with probability density functions that have explicit forms and are unimodal, high-dimensional, and complex, without the need for programming experience.</description>
	<pubDate>2025-09-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 33: Modelling and Forecasting Financial Volatility with Realized GARCH Model: A Comparative Study of Skew-t Distributions Using GRG and MCMC Methods</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/33">doi: 10.3390/econometrics13030033</a></p>
	<p>Authors:
		Didit Budi Nugroho
		Adi Setiawan
		Takayuki Morimoto
		</p>
	<p>Financial time-series data often exhibit statistically significant skewness and heavy tails, and numerous flexible distributions have been proposed to model them. In the context of the Log-linear Realized GARCH model with Skew-t (ST) distributions, our objective is to explore how the choice of prior distributions in the Adaptive Random Walk Metropolis method and initial parameter values in the Generalized Reduced Gradient (GRG) Solver method affect ST parameter and log-likelihood estimates. An empirical study was conducted using the FTSE 100 index to evaluate model performance. We provide a comprehensive step-by-step tutorial demonstrating how to perform estimation and sensitivity analysis using data tables in Microsoft Excel. Among seven ST distributions&amp;amp;mdash;namely, the asymmetric, epsilon, exponentiated half-logistic, Hansen, Jones&amp;amp;ndash;Faddy, Mittnik&amp;amp;ndash;Paolella, and Rosco&amp;amp;ndash;Jones&amp;amp;ndash;Pewsey distributions&amp;amp;mdash;Hansen&amp;amp;rsquo;s ST distribution is found to be superior. This study also applied the GRG method to estimate new approaches, including Realized Real-Time GARCH, Realized ASHARV, and GARCH@CARR models. An empirical study showed that the GARCH@CARR model with the feedback effect provides the best goodness of fit. Out-of-sample forecasting evaluations further confirm the predictive dominance of models incorporating real-time information, particularly Realized Real-Time GARCH for volatility forecasting and Realized ASHARV for 1% VaR estimation. The findings offer actionable insights for portfolio managers and risk analysts, particularly in improving volatility forecasts and tail-risk assessments during market crises, thereby enhancing risk-adjusted returns and regulatory compliance. Although the GRG method is sensitive to initial values, its presence in the spreadsheet method can be a powerful and promising tool in working with probability density functions that have explicit forms and are unimodal, high-dimensional, and complex, without the need for programming experience.</p>
	]]></content:encoded>

	<dc:title>Modelling and Forecasting Financial Volatility with Realized GARCH Model: A Comparative Study of Skew-t Distributions Using GRG and MCMC Methods</dc:title>
			<dc:creator>Didit Budi Nugroho</dc:creator>
			<dc:creator>Adi Setiawan</dc:creator>
			<dc:creator>Takayuki Morimoto</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030033</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-09-04</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-09-04</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>33</prism:startingPage>
		<prism:doi>10.3390/econometrics13030033</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/33</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/32">

	<title>Econometrics, Vol. 13, Pages 32: Comparisons Between Frequency Distributions Based on Gini&amp;rsquo;s Approach: Principal Component Analysis Addressed to Time Series</title>
	<link>https://www.mdpi.com/2225-1146/13/3/32</link>
	<description>In this paper, time series of length T are seen as frequency distributions. Each distribution is defined with respect to a statistical variable having T observed values. A methodological system based on Gini&amp;amp;rsquo;s approach is put forward, so the statistical model through which time series are handled is a frequency distribution studied inside a linear system. In addition to the starting frequency distributions that are observed, other frequency distributions are treated. Thus, marginal distributions based on the notion of proportionality are introduced together with joint distributions. Both distributions are statistical models. A fundamental invariance property related to marginal distributions is made explicit in this research work, so one can focus on collections of marginal frequency distributions, identifying multiple frequency distributions. For this reason, the latter is studied via a tensor. As frequency distributions are practical realizations of nonparametric probability distributions over R, one passes from frequency distributions to discrete random variables. In this paper, a mathematical model that generates time series is put forward. It is a stochastic process based on subjective previsions of random variables. A subdivision of the exchangeability of variables of a statistical nature is shown, so a reinterpretation of principal component analysis that is based on the notion of proportionality also characterizes this research work.</description>
	<pubDate>2025-08-13</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 32: Comparisons Between Frequency Distributions Based on Gini&amp;rsquo;s Approach: Principal Component Analysis Addressed to Time Series</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/32">doi: 10.3390/econometrics13030032</a></p>
	<p>Authors:
		Pierpaolo Angelini
		</p>
	<p>In this paper, time series of length T are seen as frequency distributions. Each distribution is defined with respect to a statistical variable having T observed values. A methodological system based on Gini&amp;amp;rsquo;s approach is put forward, so the statistical model through which time series are handled is a frequency distribution studied inside a linear system. In addition to the starting frequency distributions that are observed, other frequency distributions are treated. Thus, marginal distributions based on the notion of proportionality are introduced together with joint distributions. Both distributions are statistical models. A fundamental invariance property related to marginal distributions is made explicit in this research work, so one can focus on collections of marginal frequency distributions, identifying multiple frequency distributions. For this reason, the latter is studied via a tensor. As frequency distributions are practical realizations of nonparametric probability distributions over R, one passes from frequency distributions to discrete random variables. In this paper, a mathematical model that generates time series is put forward. It is a stochastic process based on subjective previsions of random variables. A subdivision of the exchangeability of variables of a statistical nature is shown, so a reinterpretation of principal component analysis that is based on the notion of proportionality also characterizes this research work.</p>
	]]></content:encoded>

	<dc:title>Comparisons Between Frequency Distributions Based on Gini&amp;amp;rsquo;s Approach: Principal Component Analysis Addressed to Time Series</dc:title>
			<dc:creator>Pierpaolo Angelini</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030032</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-08-13</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-08-13</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>32</prism:startingPage>
		<prism:doi>10.3390/econometrics13030032</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/32</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/31">

	<title>Econometrics, Vol. 13, Pages 31: A Statistical Characterization of Median-Based Inequality Measures</title>
	<link>https://www.mdpi.com/2225-1146/13/3/31</link>
	<description>For income distributions divided into middle, lower, and higher regions based on scalar median cut-offs, this paper establishes the asymptotic distribution properties&amp;amp;mdash;including explicit empirically applicable variance formulas and hence standard errors&amp;amp;mdash;of sample estimates of the proportion of the population within the group, their share of total income, and the groups&amp;amp;rsquo; mean incomes. It then applies these results for relative mean income ratios, various polarization measures, and decile-mean income ratios. Since the derived formulas are not distribution-free, the study advises using a density estimation technique proposed by Comte and Genon-Catalot. A shrinking middle-income group with declining relative incomes and marked upper-tail polarization among men&amp;amp;rsquo;s incomes are all found to be highly statistically significant.</description>
	<pubDate>2025-08-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 31: A Statistical Characterization of Median-Based Inequality Measures</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/31">doi: 10.3390/econometrics13030031</a></p>
	<p>Authors:
		Charles M. Beach
		Russell Davidson
		</p>
	<p>For income distributions divided into middle, lower, and higher regions based on scalar median cut-offs, this paper establishes the asymptotic distribution properties&amp;amp;mdash;including explicit empirically applicable variance formulas and hence standard errors&amp;amp;mdash;of sample estimates of the proportion of the population within the group, their share of total income, and the groups&amp;amp;rsquo; mean incomes. It then applies these results for relative mean income ratios, various polarization measures, and decile-mean income ratios. Since the derived formulas are not distribution-free, the study advises using a density estimation technique proposed by Comte and Genon-Catalot. A shrinking middle-income group with declining relative incomes and marked upper-tail polarization among men&amp;amp;rsquo;s incomes are all found to be highly statistically significant.</p>
	]]></content:encoded>

	<dc:title>A Statistical Characterization of Median-Based Inequality Measures</dc:title>
			<dc:creator>Charles M. Beach</dc:creator>
			<dc:creator>Russell Davidson</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030031</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-08-09</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-08-09</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>31</prism:startingPage>
		<prism:doi>10.3390/econometrics13030031</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/31</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/30">

	<title>Econometrics, Vol. 13, Pages 30: Simple Approximations and Interpretation of Pareto Index and Gini Coefficient Using Mean Absolute Deviations and Quantile Functions</title>
	<link>https://www.mdpi.com/2225-1146/13/3/30</link>
	<description>The Pareto distribution has been widely used to model income distribution and inequality. The tail index and the Gini index are typically computed by iteration using Maximum Likelihood and are usually interpreted in terms of the Lorenz curve. We derive an alternative method by considering a truncated Pareto distribution and deriving a simple closed-form approximation for the tail index and the Gini coefficient in terms of the mean absolute deviation and weighted quartile differences. The obtained expressions can be used for any Pareto distribution, even without a finite mean or variance. These expressions are resistant to outliers and have a simple geometric and &amp;amp;ldquo;economic&amp;amp;rdquo; interpretation in terms of the quantile function and quartiles. Extensive simulations demonstrate that the proposed approximate values for the tail index and the Gini coefficient are within a few percent relative error of the exact values, even for a moderate number of data points. Our paper offers practical and computationally simple methods to analyze a class of models with Pareto distributions. The proposed methodology can be extended to many other distributions used in econometrics and related fields.</description>
	<pubDate>2025-08-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 30: Simple Approximations and Interpretation of Pareto Index and Gini Coefficient Using Mean Absolute Deviations and Quantile Functions</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/30">doi: 10.3390/econometrics13030030</a></p>
	<p>Authors:
		Eugene Pinsky
		Qifu Wen
		</p>
	<p>The Pareto distribution has been widely used to model income distribution and inequality. The tail index and the Gini index are typically computed by iteration using Maximum Likelihood and are usually interpreted in terms of the Lorenz curve. We derive an alternative method by considering a truncated Pareto distribution and deriving a simple closed-form approximation for the tail index and the Gini coefficient in terms of the mean absolute deviation and weighted quartile differences. The obtained expressions can be used for any Pareto distribution, even without a finite mean or variance. These expressions are resistant to outliers and have a simple geometric and &amp;amp;ldquo;economic&amp;amp;rdquo; interpretation in terms of the quantile function and quartiles. Extensive simulations demonstrate that the proposed approximate values for the tail index and the Gini coefficient are within a few percent relative error of the exact values, even for a moderate number of data points. Our paper offers practical and computationally simple methods to analyze a class of models with Pareto distributions. The proposed methodology can be extended to many other distributions used in econometrics and related fields.</p>
	]]></content:encoded>

	<dc:title>Simple Approximations and Interpretation of Pareto Index and Gini Coefficient Using Mean Absolute Deviations and Quantile Functions</dc:title>
			<dc:creator>Eugene Pinsky</dc:creator>
			<dc:creator>Qifu Wen</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030030</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-08-08</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-08-08</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>30</prism:startingPage>
		<prism:doi>10.3390/econometrics13030030</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/30</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/29">

	<title>Econometrics, Vol. 13, Pages 29: Beyond GDP: COVID-19&amp;rsquo;s Effects on Macroeconomic Efficiency and Productivity Dynamics in OECD Countries</title>
	<link>https://www.mdpi.com/2225-1146/13/3/29</link>
	<description>The COVID-19 pandemic triggered unprecedented economic disruptions, raising critical questions about the resilience and adaptability of macroeconomic productivity across countries. This study examines the impact of COVID-19 on macroeconomic efficiency and productivity dynamics in 37 OECD countries using quarterly data from 2018Q1 to 2024Q4. By employing a Slack-Based Measure Data Envelopment Analysis (SBM-DEA) and the Malmquist Productivity Index (MPI), we decompose total factor productivity (TFP) into efficiency change (EC) and technological change (TC) across three periods: pre-pandemic, during-pandemic, and post-pandemic. Our framework incorporates both desirable (GDP) and undesirable outputs (inflation, unemployment, housing price inflation, and interest rate distortions), offering a multidimensional view of macroeconomic efficiency. Results show broad but uneven productivity gains, with technological progress proving more resilient than efficiency during the pandemic. Post-COVID recovery trajectories diverged, reflecting differences in structural adaptability and innovation capacity. Regression analysis reveals that stringent lockdowns in 2020 were associated with lower productivity in 2023&amp;amp;ndash;2024, while more adaptive policies in 2021 supported long-term technological gains. These findings highlight the importance of aligning crisis response with forward-looking economic strategies and demonstrate the value of DEA-based methods for evaluating macroeconomic performance beyond GDP.</description>
	<pubDate>2025-08-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 29: Beyond GDP: COVID-19&amp;rsquo;s Effects on Macroeconomic Efficiency and Productivity Dynamics in OECD Countries</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/29">doi: 10.3390/econometrics13030029</a></p>
	<p>Authors:
		Ümit Sağlam
		</p>
	<p>The COVID-19 pandemic triggered unprecedented economic disruptions, raising critical questions about the resilience and adaptability of macroeconomic productivity across countries. This study examines the impact of COVID-19 on macroeconomic efficiency and productivity dynamics in 37 OECD countries using quarterly data from 2018Q1 to 2024Q4. By employing a Slack-Based Measure Data Envelopment Analysis (SBM-DEA) and the Malmquist Productivity Index (MPI), we decompose total factor productivity (TFP) into efficiency change (EC) and technological change (TC) across three periods: pre-pandemic, during-pandemic, and post-pandemic. Our framework incorporates both desirable (GDP) and undesirable outputs (inflation, unemployment, housing price inflation, and interest rate distortions), offering a multidimensional view of macroeconomic efficiency. Results show broad but uneven productivity gains, with technological progress proving more resilient than efficiency during the pandemic. Post-COVID recovery trajectories diverged, reflecting differences in structural adaptability and innovation capacity. Regression analysis reveals that stringent lockdowns in 2020 were associated with lower productivity in 2023&amp;amp;ndash;2024, while more adaptive policies in 2021 supported long-term technological gains. These findings highlight the importance of aligning crisis response with forward-looking economic strategies and demonstrate the value of DEA-based methods for evaluating macroeconomic performance beyond GDP.</p>
	]]></content:encoded>

	<dc:title>Beyond GDP: COVID-19&amp;amp;rsquo;s Effects on Macroeconomic Efficiency and Productivity Dynamics in OECD Countries</dc:title>
			<dc:creator>Ümit Sağlam</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030029</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-08-04</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-08-04</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>29</prism:startingPage>
		<prism:doi>10.3390/econometrics13030029</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/29</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/28">

	<title>Econometrics, Vol. 13, Pages 28: Analyzing the Impact of Carbon Mitigation on the Eurozone&amp;rsquo;s Trade Dynamics with the US and China</title>
	<link>https://www.mdpi.com/2225-1146/13/3/28</link>
	<description>This study focusses on the transmission of carbon pricing mechanisms in shaping trade dynamics between the Eurozone and key partners: the USA and China. Using Bayesian variable selection methods and a Time-Varying Structural Vector Autoregressions (TV-SVAR) model, the research identifies the key variables impacting EU carbon emissions over time. The results reveal that manufactured products from the US have a diminishing positive impact on EU carbon emissions, suggesting potential exemption from future regulations. In contrast, manufactured goods from the US and petroleum products from China are expected to increase emissions, indicating a need for stricter trade policies. These findings provide strategic insights for policymakers aiming to balance trade and environmental objectives.</description>
	<pubDate>2025-07-29</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 28: Analyzing the Impact of Carbon Mitigation on the Eurozone&amp;rsquo;s Trade Dynamics with the US and China</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/28">doi: 10.3390/econometrics13030028</a></p>
	<p>Authors:
		Pathairat Pastpipatkul
		Terdthiti Chitkasame
		</p>
	<p>This study focusses on the transmission of carbon pricing mechanisms in shaping trade dynamics between the Eurozone and key partners: the USA and China. Using Bayesian variable selection methods and a Time-Varying Structural Vector Autoregressions (TV-SVAR) model, the research identifies the key variables impacting EU carbon emissions over time. The results reveal that manufactured products from the US have a diminishing positive impact on EU carbon emissions, suggesting potential exemption from future regulations. In contrast, manufactured goods from the US and petroleum products from China are expected to increase emissions, indicating a need for stricter trade policies. These findings provide strategic insights for policymakers aiming to balance trade and environmental objectives.</p>
	]]></content:encoded>

	<dc:title>Analyzing the Impact of Carbon Mitigation on the Eurozone&amp;amp;rsquo;s Trade Dynamics with the US and China</dc:title>
			<dc:creator>Pathairat Pastpipatkul</dc:creator>
			<dc:creator>Terdthiti Chitkasame</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030028</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-07-29</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-07-29</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>28</prism:startingPage>
		<prism:doi>10.3390/econometrics13030028</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/28</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/27">

	<title>Econometrics, Vol. 13, Pages 27: Pseudo-Panel Decomposition of the Blinder&amp;ndash;Oaxaca Gender Wage Gap</title>
	<link>https://www.mdpi.com/2225-1146/13/3/27</link>
	<description>This article introduces a novel approach to decomposing the Blinder&amp;amp;ndash;Oaxaca gender wage gap using pseudo-panel data. In many developing countries, panel data are not available; however, understanding the evolution of the gender wage gap over time requires tracking individuals longitudinally. When individuals change across time periods, estimators tend to be inconsistent and inefficient. To address this issue, and building upon the traditional Blinder&amp;amp;ndash;Oaxaca methodology, we propose an alternative procedure that follows cohorts over time rather than individuals. This approach enables the estimation of both the explained and unexplained components&amp;amp;mdash;&amp;amp;ldquo;endowment effect&amp;amp;rdquo; and &amp;amp;ldquo;remuneration effect&amp;amp;rdquo;&amp;amp;mdash;of the wage gap, along with their respective standard errors, even in the absence of true panel data. We apply this methodology to the case of Colombia, finding a gender wage gap of approximately 15% in favor of male cohorts. This gap comprises a &amp;amp;minus;5.6% explained component and a 20% unexplained component without controls. When we control by informality, size of the firm and sector the gap comprises a &amp;amp;minus;3.5% explained component and a 18.7% unexplained component.</description>
	<pubDate>2025-07-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 27: Pseudo-Panel Decomposition of the Blinder&amp;ndash;Oaxaca Gender Wage Gap</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/27">doi: 10.3390/econometrics13030027</a></p>
	<p>Authors:
		Jhon James Mora
		Diana Yaneth Herrera
		</p>
	<p>This article introduces a novel approach to decomposing the Blinder&amp;amp;ndash;Oaxaca gender wage gap using pseudo-panel data. In many developing countries, panel data are not available; however, understanding the evolution of the gender wage gap over time requires tracking individuals longitudinally. When individuals change across time periods, estimators tend to be inconsistent and inefficient. To address this issue, and building upon the traditional Blinder&amp;amp;ndash;Oaxaca methodology, we propose an alternative procedure that follows cohorts over time rather than individuals. This approach enables the estimation of both the explained and unexplained components&amp;amp;mdash;&amp;amp;ldquo;endowment effect&amp;amp;rdquo; and &amp;amp;ldquo;remuneration effect&amp;amp;rdquo;&amp;amp;mdash;of the wage gap, along with their respective standard errors, even in the absence of true panel data. We apply this methodology to the case of Colombia, finding a gender wage gap of approximately 15% in favor of male cohorts. This gap comprises a &amp;amp;minus;5.6% explained component and a 20% unexplained component without controls. When we control by informality, size of the firm and sector the gap comprises a &amp;amp;minus;3.5% explained component and a 18.7% unexplained component.</p>
	]]></content:encoded>

	<dc:title>Pseudo-Panel Decomposition of the Blinder&amp;amp;ndash;Oaxaca Gender Wage Gap</dc:title>
			<dc:creator>Jhon James Mora</dc:creator>
			<dc:creator>Diana Yaneth Herrera</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030027</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-07-19</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-07-19</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>27</prism:startingPage>
		<prism:doi>10.3390/econometrics13030027</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/27</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/26">

	<title>Econometrics, Vol. 13, Pages 26: Daily Emissions of CO2 in the World: A Fractional Integration Approach</title>
	<link>https://www.mdpi.com/2225-1146/13/3/26</link>
	<description>In this article, daily CO2 emissions for the years 2019&amp;amp;ndash;2022 are examined using fractional integration for Brazil, China, EU-27 (and the UK), India, and the USA. According to the findings, all series exhibit long memory mean-reversion tendencies, with orders of integration ranging between 0.22 in the case of India (with white noise errors) and 0.70 for Brazil (under autocorrelated disturbances). Nevertheless, the differencing parameter estimates are all considerably below 1, which supports the theory of mean reversion and transient shocks. These results suggest the need for a greater intensification of green policies complemented with economic structural reforms to achieve the zero-emissions target by 2050.</description>
	<pubDate>2025-07-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 26: Daily Emissions of CO2 in the World: A Fractional Integration Approach</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/26">doi: 10.3390/econometrics13030026</a></p>
	<p>Authors:
		Luis Alberiko Gil-Alana
		Carlos Poza
		</p>
	<p>In this article, daily CO2 emissions for the years 2019&amp;amp;ndash;2022 are examined using fractional integration for Brazil, China, EU-27 (and the UK), India, and the USA. According to the findings, all series exhibit long memory mean-reversion tendencies, with orders of integration ranging between 0.22 in the case of India (with white noise errors) and 0.70 for Brazil (under autocorrelated disturbances). Nevertheless, the differencing parameter estimates are all considerably below 1, which supports the theory of mean reversion and transient shocks. These results suggest the need for a greater intensification of green policies complemented with economic structural reforms to achieve the zero-emissions target by 2050.</p>
	]]></content:encoded>

	<dc:title>Daily Emissions of CO2 in the World: A Fractional Integration Approach</dc:title>
			<dc:creator>Luis Alberiko Gil-Alana</dc:creator>
			<dc:creator>Carlos Poza</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030026</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-07-17</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-07-17</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>26</prism:startingPage>
		<prism:doi>10.3390/econometrics13030026</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/26</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/25">

	<title>Econometrics, Vol. 13, Pages 25: The Long-Run Impact of Changes in Prescription Drug Sales on Mortality and Hospital Utilization in Belgium, 1998&amp;ndash;2019</title>
	<link>https://www.mdpi.com/2225-1146/13/3/25</link>
	<description>Objectives: We investigate the long-run impact of changes in prescription drug sales on mortality and hospital utilization in Belgium during the first two decades of the 21st century. Methods: We analyze the correlation across diseases between changes in the drugs used to treat the disease and changes in mortality or hospital utilization from that disease. The measure of the change in prescription drug sales we use is the long-run (1998&amp;amp;ndash;2018 or 2000&amp;amp;ndash;2019) change in the fraction of post-1999 drugs sold. A post-1999 drug is a drug that was not sold during 1989&amp;amp;ndash;1999. Results: The 1998&amp;amp;ndash;2018 increase in the fraction of post-1999 drugs sold is estimated to have reduced the number of years of life lost before ages 85, 75, and 65 in 2018 by about 438 thousand (31%), 225 thousand (31%), and 114 thousand (32%), respectively. The 1995&amp;amp;ndash;2014 increase in in the fraction of post-1999 drugs sold is estimated to have reduced the number of hospital days in 2019 by 2.66 million (20%). Conclusions: Even if we ignore the reduction in hospital utilization attributable to changes in pharmaceutical consumption, a conservative estimate of the 2018 cost per life-year before age 85 gained is EUR 6824. We estimate that previous changes in pharmaceutical consumption reduced 2019 expenditure on inpatient curative and rehabilitative care by EUR 3.55 billion, which is higher than the 2018 expenditure on drugs that were authorized during the period 1998&amp;amp;ndash;2018: EUR 2.99 billion.</description>
	<pubDate>2025-06-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 25: The Long-Run Impact of Changes in Prescription Drug Sales on Mortality and Hospital Utilization in Belgium, 1998&amp;ndash;2019</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/25">doi: 10.3390/econometrics13030025</a></p>
	<p>Authors:
		Frank R. Lichtenberg
		</p>
	<p>Objectives: We investigate the long-run impact of changes in prescription drug sales on mortality and hospital utilization in Belgium during the first two decades of the 21st century. Methods: We analyze the correlation across diseases between changes in the drugs used to treat the disease and changes in mortality or hospital utilization from that disease. The measure of the change in prescription drug sales we use is the long-run (1998&amp;amp;ndash;2018 or 2000&amp;amp;ndash;2019) change in the fraction of post-1999 drugs sold. A post-1999 drug is a drug that was not sold during 1989&amp;amp;ndash;1999. Results: The 1998&amp;amp;ndash;2018 increase in the fraction of post-1999 drugs sold is estimated to have reduced the number of years of life lost before ages 85, 75, and 65 in 2018 by about 438 thousand (31%), 225 thousand (31%), and 114 thousand (32%), respectively. The 1995&amp;amp;ndash;2014 increase in in the fraction of post-1999 drugs sold is estimated to have reduced the number of hospital days in 2019 by 2.66 million (20%). Conclusions: Even if we ignore the reduction in hospital utilization attributable to changes in pharmaceutical consumption, a conservative estimate of the 2018 cost per life-year before age 85 gained is EUR 6824. We estimate that previous changes in pharmaceutical consumption reduced 2019 expenditure on inpatient curative and rehabilitative care by EUR 3.55 billion, which is higher than the 2018 expenditure on drugs that were authorized during the period 1998&amp;amp;ndash;2018: EUR 2.99 billion.</p>
	]]></content:encoded>

	<dc:title>The Long-Run Impact of Changes in Prescription Drug Sales on Mortality and Hospital Utilization in Belgium, 1998&amp;amp;ndash;2019</dc:title>
			<dc:creator>Frank R. Lichtenberg</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030025</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-06-23</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-06-23</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>25</prism:startingPage>
		<prism:doi>10.3390/econometrics13030025</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/25</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/3/24">

	<title>Econometrics, Vol. 13, Pages 24: The Effect of Macroeconomic Announcements on U.S. Treasury Markets: An Autometric General-to-Specific Analysis of the Greenspan Era</title>
	<link>https://www.mdpi.com/2225-1146/13/3/24</link>
	<description>This research studies the impact of macroeconomic announcement surprises on daily U.S. Treasury excess returns during the heart of Alan Greenspan&amp;amp;rsquo;s tenure as Federal Reserve Chair, addressing the possible limitations of standard static regression (SSR) models, which may suffer from omitted variable bias, parameter instability, and poor mis-specification diagnostics. To complement the SSR framework, an automated general-to-specific (Gets) modeling approach, enhanced with modern indicator saturation methods for robustness, is applied to improve empirical model discovery and mitigate potential biases. By progressively reducing an initially broad set of candidate variables, the Gets methodology steers the model toward congruence, dispenses unstable parameters, and seeks to limit information loss while seeking model congruence and precision. The findings, herein, suggest that U.S. Treasury market responses to macroeconomic news shocks exhibited stability for a core set of announcements that reliably influenced excess returns. In contrast to computationally costless standard static models, the automated Gets-based approach enhances parameter precision and provides a more adaptive structure for identifying relevant predictors. These results demonstrate the potential value of incorporating interpretable automated model selection techniques alongside traditional SSR and Markov switching approaches to improve empirical insights into macroeconomic announcement effects on financial markets.</description>
	<pubDate>2025-06-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 24: The Effect of Macroeconomic Announcements on U.S. Treasury Markets: An Autometric General-to-Specific Analysis of the Greenspan Era</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/3/24">doi: 10.3390/econometrics13030024</a></p>
	<p>Authors:
		James J. Forest
		</p>
	<p>This research studies the impact of macroeconomic announcement surprises on daily U.S. Treasury excess returns during the heart of Alan Greenspan&amp;amp;rsquo;s tenure as Federal Reserve Chair, addressing the possible limitations of standard static regression (SSR) models, which may suffer from omitted variable bias, parameter instability, and poor mis-specification diagnostics. To complement the SSR framework, an automated general-to-specific (Gets) modeling approach, enhanced with modern indicator saturation methods for robustness, is applied to improve empirical model discovery and mitigate potential biases. By progressively reducing an initially broad set of candidate variables, the Gets methodology steers the model toward congruence, dispenses unstable parameters, and seeks to limit information loss while seeking model congruence and precision. The findings, herein, suggest that U.S. Treasury market responses to macroeconomic news shocks exhibited stability for a core set of announcements that reliably influenced excess returns. In contrast to computationally costless standard static models, the automated Gets-based approach enhances parameter precision and provides a more adaptive structure for identifying relevant predictors. These results demonstrate the potential value of incorporating interpretable automated model selection techniques alongside traditional SSR and Markov switching approaches to improve empirical insights into macroeconomic announcement effects on financial markets.</p>
	]]></content:encoded>

	<dc:title>The Effect of Macroeconomic Announcements on U.S. Treasury Markets: An Autometric General-to-Specific Analysis of the Greenspan Era</dc:title>
			<dc:creator>James J. Forest</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13030024</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-06-21</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-06-21</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>24</prism:startingPage>
		<prism:doi>10.3390/econometrics13030024</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/3/24</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/2/23">

	<title>Econometrics, Vol. 13, Pages 23: Leveraging Success: The Hidden Peak in Debt and Firm Performance</title>
	<link>https://www.mdpi.com/2225-1146/13/2/23</link>
	<description>This study investigates the relationship between capital structure and financial performance in South African firms, focusing on the potential non-linear, inverse U-shaped effect of leverage on profitability. Drawing on data from 1548 firm-year observations covering 183 publicly listed South African companies between 2013 and 2022, the analysis employs both Fixed Effects (FE) and System Generalized Method of Moments (System-GMM) models to address endogeneity and capture dynamic adjustments. The findings indicate that moderate levels of debt enhance profitability, but excessive leverage leads to diminishing returns, confirming an inverse U-shaped relationship. System-GMM results further reveal the persistence of past profitability and validate the dynamic nature of capital structure decisions. Larger firms appear more capable of sustaining higher leverage without adverse effects, while smaller firms benefit from maintaining lower debt levels. The study concludes that strategic debt management, tailored to firm size and economic context, is critical for optimizing financial performance in emerging markets like South Africa. The study identifies the optimal leverage ratio for South African firms and shows how firm size moderates the relationship between debt and profitability, offering tailored insights for firms of different sizes. These insights offer valuable guidance for managers, investors, and policymakers aiming to strengthen financial stability and efficiency through informed capital structure choices.</description>
	<pubDate>2025-06-10</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 23: Leveraging Success: The Hidden Peak in Debt and Firm Performance</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/2/23">doi: 10.3390/econometrics13020023</a></p>
	<p>Authors:
		Suzan Dsouza
		Krishnamoorthy Kathavarayan
		Franklin Mathias
		Dharmesh Bhatia
		Abdallah AlKhawaja
		</p>
	<p>This study investigates the relationship between capital structure and financial performance in South African firms, focusing on the potential non-linear, inverse U-shaped effect of leverage on profitability. Drawing on data from 1548 firm-year observations covering 183 publicly listed South African companies between 2013 and 2022, the analysis employs both Fixed Effects (FE) and System Generalized Method of Moments (System-GMM) models to address endogeneity and capture dynamic adjustments. The findings indicate that moderate levels of debt enhance profitability, but excessive leverage leads to diminishing returns, confirming an inverse U-shaped relationship. System-GMM results further reveal the persistence of past profitability and validate the dynamic nature of capital structure decisions. Larger firms appear more capable of sustaining higher leverage without adverse effects, while smaller firms benefit from maintaining lower debt levels. The study concludes that strategic debt management, tailored to firm size and economic context, is critical for optimizing financial performance in emerging markets like South Africa. The study identifies the optimal leverage ratio for South African firms and shows how firm size moderates the relationship between debt and profitability, offering tailored insights for firms of different sizes. These insights offer valuable guidance for managers, investors, and policymakers aiming to strengthen financial stability and efficiency through informed capital structure choices.</p>
	]]></content:encoded>

	<dc:title>Leveraging Success: The Hidden Peak in Debt and Firm Performance</dc:title>
			<dc:creator>Suzan Dsouza</dc:creator>
			<dc:creator>Krishnamoorthy Kathavarayan</dc:creator>
			<dc:creator>Franklin Mathias</dc:creator>
			<dc:creator>Dharmesh Bhatia</dc:creator>
			<dc:creator>Abdallah AlKhawaja</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13020023</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-06-10</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-06-10</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>23</prism:startingPage>
		<prism:doi>10.3390/econometrics13020023</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/2/23</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/2/22">

	<title>Econometrics, Vol. 13, Pages 22: Dependent and Independent Time Series Errors Under Elliptically Countered Models</title>
	<link>https://www.mdpi.com/2225-1146/13/2/22</link>
	<description>We explore the impact of time series behavior on model errors when working under an elliptically contoured distribution. By adopting a time series approach aligned with the realistic dependence between errors under such distributions, this perspective shifts the focus from increasingly complex and challenging correlation analyses to volatility modeling that utilizes a novel likelihood framework based on dependent probabilistic samples. With the introduction of a modified Bayesian Information Criterion, which incorporates a ranking of degrees of evidence of significant differences between the compared models, the critical issue of model selection is reinforced, clarifying the relationships among the most common information criteria and revealing limited relevance among the models based on independent probabilistic samples, when tested on a well-established database. Our approach challenges the traditional hierarchical models commonly used in time series analysis, which assume independent errors. The application of rigorous differentiation criteria under this novel perspective on likelihood, based on dependent probabilistic samples, provides a new viewpoint on likelihood that arises naturally in the context of finance, adding a novel result. We provide new results for criterion selection, evidence invariance, and transitions between volatility models and heuristic methods to calibrate nested or non-nested models via convergence properties in a distribution.</description>
	<pubDate>2025-05-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 22: Dependent and Independent Time Series Errors Under Elliptically Countered Models</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/2/22">doi: 10.3390/econometrics13020022</a></p>
	<p>Authors:
		Fredy O. Pérez-Ramirez
		Francisco J. Caro-Lopera
		José A. Díaz-García
		Graciela González Farías
		</p>
	<p>We explore the impact of time series behavior on model errors when working under an elliptically contoured distribution. By adopting a time series approach aligned with the realistic dependence between errors under such distributions, this perspective shifts the focus from increasingly complex and challenging correlation analyses to volatility modeling that utilizes a novel likelihood framework based on dependent probabilistic samples. With the introduction of a modified Bayesian Information Criterion, which incorporates a ranking of degrees of evidence of significant differences between the compared models, the critical issue of model selection is reinforced, clarifying the relationships among the most common information criteria and revealing limited relevance among the models based on independent probabilistic samples, when tested on a well-established database. Our approach challenges the traditional hierarchical models commonly used in time series analysis, which assume independent errors. The application of rigorous differentiation criteria under this novel perspective on likelihood, based on dependent probabilistic samples, provides a new viewpoint on likelihood that arises naturally in the context of finance, adding a novel result. We provide new results for criterion selection, evidence invariance, and transitions between volatility models and heuristic methods to calibrate nested or non-nested models via convergence properties in a distribution.</p>
	]]></content:encoded>

	<dc:title>Dependent and Independent Time Series Errors Under Elliptically Countered Models</dc:title>
			<dc:creator>Fredy O. Pérez-Ramirez</dc:creator>
			<dc:creator>Francisco J. Caro-Lopera</dc:creator>
			<dc:creator>José A. Díaz-García</dc:creator>
			<dc:creator>Graciela González Farías</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13020022</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-05-21</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-05-21</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>22</prism:startingPage>
		<prism:doi>10.3390/econometrics13020022</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/2/22</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/2/21">

	<title>Econometrics, Vol. 13, Pages 21: Decomposing the Household Herding Behavior in Stock Investment: The Case of China</title>
	<link>https://www.mdpi.com/2225-1146/13/2/21</link>
	<description>Financial studies on the herding effect have been very popular for decades, as detecting herding behavior helps to explain price deviations and market inefficiencies. However, studying the herding effect as a single influencing factor is believed to be insufficient to explain the changes in investment behavior, as the herding effect itself may be caused by other influencing factors. In other words, the issue must be studied alongside other factors. In this study, we adopt the quantile regression model to comprehensively understand the herding effect&amp;amp;rsquo;s influence on household investment in China, and the empirical results indicate that herding behavior leads to different investment outcomes for households in different scenarios. In this analysis, we consider a variety of household characteristics, such as income level and risk tolerance, to provide a nuanced understanding of investment behavior. Additionally, in this study, we explore the interaction between herding behavior and macroeconomic variables. Nevertheless, the results suggest that, if herding behavior can be reduced by the head of the household, profitability can be increased, or at the very least, losses can be reduced.</description>
	<pubDate>2025-05-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 21: Decomposing the Household Herding Behavior in Stock Investment: The Case of China</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/2/21">doi: 10.3390/econometrics13020021</a></p>
	<p>Authors:
		Yung-Ching Tseng
		I.-Fan Hsiao
		Guo-Chen Wang
		</p>
	<p>Financial studies on the herding effect have been very popular for decades, as detecting herding behavior helps to explain price deviations and market inefficiencies. However, studying the herding effect as a single influencing factor is believed to be insufficient to explain the changes in investment behavior, as the herding effect itself may be caused by other influencing factors. In other words, the issue must be studied alongside other factors. In this study, we adopt the quantile regression model to comprehensively understand the herding effect&amp;amp;rsquo;s influence on household investment in China, and the empirical results indicate that herding behavior leads to different investment outcomes for households in different scenarios. In this analysis, we consider a variety of household characteristics, such as income level and risk tolerance, to provide a nuanced understanding of investment behavior. Additionally, in this study, we explore the interaction between herding behavior and macroeconomic variables. Nevertheless, the results suggest that, if herding behavior can be reduced by the head of the household, profitability can be increased, or at the very least, losses can be reduced.</p>
	]]></content:encoded>

	<dc:title>Decomposing the Household Herding Behavior in Stock Investment: The Case of China</dc:title>
			<dc:creator>Yung-Ching Tseng</dc:creator>
			<dc:creator>I.-Fan Hsiao</dc:creator>
			<dc:creator>Guo-Chen Wang</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13020021</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-05-12</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-05-12</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>21</prism:startingPage>
		<prism:doi>10.3390/econometrics13020021</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/2/21</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/2/20">

	<title>Econometrics, Vol. 13, Pages 20: Government Subsidies and Industrial Productivity in South Africa: A Focus on the Channels</title>
	<link>https://www.mdpi.com/2225-1146/13/2/20</link>
	<description>This article estimates the impact of government subsidies on productivity growth in South Africa, joining the ongoing debate among economists regarding the effectiveness of subsidies as a driver of industrial productivity. While some argue that subsidies address market failures, facilitate R&amp;amp;amp;D, and improve efficiency, others criticise the attendant dependence, which reduces the incentive for industries to operate efficiently. This article contributes by examining the specific channels&amp;amp;mdash;efficiency and technical changes&amp;amp;mdash;through which subsidies affect productivity in South Africa. The analysis is based on a panel dataset comprising 64 three-digit industries observed between 1993 and 2023. Estimation is performed through an endogeneity robust panel stochastic frontier model, which treats subsidies as both an inefficiency driver and a technology variable. An additional estimation approach is proposed integrating the true fixed effects with a control function in a bid to account for both unobserved heterogeneity and idiosyncratic endogeneity. The results show that subsidies are detrimental to productivity, particularly through stifling technological progress. This result supports the view that subsidies reduce the incentive for beneficiaries to innovate. This evidence calls for a reevaluation and a possible restructuring of subsidy programmes in South Africa in a bid to mitigate their adverse effects on industrial productivity.</description>
	<pubDate>2025-05-01</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 20: Government Subsidies and Industrial Productivity in South Africa: A Focus on the Channels</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/2/20">doi: 10.3390/econometrics13020020</a></p>
	<p>Authors:
		Brian Tavonga Mazorodze
		</p>
	<p>This article estimates the impact of government subsidies on productivity growth in South Africa, joining the ongoing debate among economists regarding the effectiveness of subsidies as a driver of industrial productivity. While some argue that subsidies address market failures, facilitate R&amp;amp;amp;D, and improve efficiency, others criticise the attendant dependence, which reduces the incentive for industries to operate efficiently. This article contributes by examining the specific channels&amp;amp;mdash;efficiency and technical changes&amp;amp;mdash;through which subsidies affect productivity in South Africa. The analysis is based on a panel dataset comprising 64 three-digit industries observed between 1993 and 2023. Estimation is performed through an endogeneity robust panel stochastic frontier model, which treats subsidies as both an inefficiency driver and a technology variable. An additional estimation approach is proposed integrating the true fixed effects with a control function in a bid to account for both unobserved heterogeneity and idiosyncratic endogeneity. The results show that subsidies are detrimental to productivity, particularly through stifling technological progress. This result supports the view that subsidies reduce the incentive for beneficiaries to innovate. This evidence calls for a reevaluation and a possible restructuring of subsidy programmes in South Africa in a bid to mitigate their adverse effects on industrial productivity.</p>
	]]></content:encoded>

	<dc:title>Government Subsidies and Industrial Productivity in South Africa: A Focus on the Channels</dc:title>
			<dc:creator>Brian Tavonga Mazorodze</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13020020</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-05-01</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-05-01</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>20</prism:startingPage>
		<prism:doi>10.3390/econometrics13020020</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/2/20</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/2/19">

	<title>Econometrics, Vol. 13, Pages 19: Generalized Recentered Influence Function Regressions</title>
	<link>https://www.mdpi.com/2225-1146/13/2/19</link>
	<description>This paper suggests a generalization of covariate shifts to study distributional impacts on inequality and distributional measures. It builds on the recentered influence function (RIF) regression method, originally designed for location shifts in covariates, and extends it to general policy interventions, such as location&amp;amp;ndash;scale or asymmetric interventions. Numerical simulations for the Gini, Theil, and Atkinson indexes demonstrate strong performance across a myriad of cases and distributional measures. An empirical application examining changes in Mincerian equations is presented to illustrate the method.</description>
	<pubDate>2025-04-18</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 19: Generalized Recentered Influence Function Regressions</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/2/19">doi: 10.3390/econometrics13020019</a></p>
	<p>Authors:
		Javier Alejo
		Antonio Galvao
		Julián Martínez-Iriarte
		Gabriel Montes-Rojas
		</p>
	<p>This paper suggests a generalization of covariate shifts to study distributional impacts on inequality and distributional measures. It builds on the recentered influence function (RIF) regression method, originally designed for location shifts in covariates, and extends it to general policy interventions, such as location&amp;amp;ndash;scale or asymmetric interventions. Numerical simulations for the Gini, Theil, and Atkinson indexes demonstrate strong performance across a myriad of cases and distributional measures. An empirical application examining changes in Mincerian equations is presented to illustrate the method.</p>
	]]></content:encoded>

	<dc:title>Generalized Recentered Influence Function Regressions</dc:title>
			<dc:creator>Javier Alejo</dc:creator>
			<dc:creator>Antonio Galvao</dc:creator>
			<dc:creator>Julián Martínez-Iriarte</dc:creator>
			<dc:creator>Gabriel Montes-Rojas</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13020019</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-04-18</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-04-18</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>19</prism:startingPage>
		<prism:doi>10.3390/econometrics13020019</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/2/19</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/2/18">

	<title>Econometrics, Vol. 13, Pages 18: Is VIX a Contrarian Indicator? On the Positivity of the Conditional Sharpe Ratio &amp;dagger;</title>
	<link>https://www.mdpi.com/2225-1146/13/2/18</link>
	<description>The notion of compensation for systematic risk is well ingrained in finance and constitutes the basis for numerous empirical tests. The concept an increase in systematic risk is accompanied by an increase in the required risk premium has strong intuitive content: The more risk there is to be borne, the greater the compensation therefor. In recognizing previous research on the ex ante and ex post reward to risk, the thrust of this paper is to augment those previous tests of expected and realized returns by providing several distinct empirical tests of the proposition the market rewards the undertaking of systematic equity risk, the latter as measured by the VIX volatility index. Thus, in this paper&amp;amp;rsquo;s empirical section, we use several empirical approaches to answer the question, Using realized returns, is an increase in systematic risk VIX accompanied by an increase in the equity risk premium? While the empirical results are not always statistically significant, our answer is in the affirmative.</description>
	<pubDate>2025-04-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 18: Is VIX a Contrarian Indicator? On the Positivity of the Conditional Sharpe Ratio &amp;dagger;</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/2/18">doi: 10.3390/econometrics13020018</a></p>
	<p>Authors:
		Ehud I. Ronn
		Liying Xu
		</p>
	<p>The notion of compensation for systematic risk is well ingrained in finance and constitutes the basis for numerous empirical tests. The concept an increase in systematic risk is accompanied by an increase in the required risk premium has strong intuitive content: The more risk there is to be borne, the greater the compensation therefor. In recognizing previous research on the ex ante and ex post reward to risk, the thrust of this paper is to augment those previous tests of expected and realized returns by providing several distinct empirical tests of the proposition the market rewards the undertaking of systematic equity risk, the latter as measured by the VIX volatility index. Thus, in this paper&amp;amp;rsquo;s empirical section, we use several empirical approaches to answer the question, Using realized returns, is an increase in systematic risk VIX accompanied by an increase in the equity risk premium? While the empirical results are not always statistically significant, our answer is in the affirmative.</p>
	]]></content:encoded>

	<dc:title>Is VIX a Contrarian Indicator? On the Positivity of the Conditional Sharpe Ratio &amp;amp;dagger;</dc:title>
			<dc:creator>Ehud I. Ronn</dc:creator>
			<dc:creator>Liying Xu</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13020018</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-04-14</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-04-14</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>18</prism:startingPage>
		<prism:doi>10.3390/econometrics13020018</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/2/18</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/2/17">

	<title>Econometrics, Vol. 13, Pages 17: Forecasting Asset Returns Using Nelson&amp;ndash;Siegel Factors Estimated from the US Yield Curve</title>
	<link>https://www.mdpi.com/2225-1146/13/2/17</link>
	<description>This paper explores the hypothesis that the returns of asset classes can be predicted using common, systematic risk factors represented by the level, slope, and curvature of the US interest rate term structure. These are extracted using the Nelson&amp;amp;ndash;Siegel model, which effectively captures the three dimensions of the yield curve. To forecast the factors, we applied autoregressive (AR) and vector autoregressive (VAR) models. Using their forecasts, we predict the returns of government and corporate bonds, equities, REITs, and commodity futures. Our predictions were compared against two benchmarks: the historical mean, and an AR(1) model based on past returns. We employed the Diebold&amp;amp;ndash;Mariano test and the Model Confidence Set procedure to assess the comparative forecast accuracy. We found that Nelson&amp;amp;ndash;Siegel factors had significant predictive power for one-month-ahead returns of bonds, equities, and REITs, but not for commodity futures. However, for 6-month and 12-month-ahead forecasts, neither the AR(1) nor VAR(1) models based on Nelson&amp;amp;ndash;Siegel factors outperformed the benchmarks. These results suggest that the Nelson&amp;amp;ndash;Siegel factors affect the aggregate stochastic discount factor for pricing all assets traded in the US economy.</description>
	<pubDate>2025-04-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 17: Forecasting Asset Returns Using Nelson&amp;ndash;Siegel Factors Estimated from the US Yield Curve</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/2/17">doi: 10.3390/econometrics13020017</a></p>
	<p>Authors:
		Massimo Guidolin
		Serena Ionta
		</p>
	<p>This paper explores the hypothesis that the returns of asset classes can be predicted using common, systematic risk factors represented by the level, slope, and curvature of the US interest rate term structure. These are extracted using the Nelson&amp;amp;ndash;Siegel model, which effectively captures the three dimensions of the yield curve. To forecast the factors, we applied autoregressive (AR) and vector autoregressive (VAR) models. Using their forecasts, we predict the returns of government and corporate bonds, equities, REITs, and commodity futures. Our predictions were compared against two benchmarks: the historical mean, and an AR(1) model based on past returns. We employed the Diebold&amp;amp;ndash;Mariano test and the Model Confidence Set procedure to assess the comparative forecast accuracy. We found that Nelson&amp;amp;ndash;Siegel factors had significant predictive power for one-month-ahead returns of bonds, equities, and REITs, but not for commodity futures. However, for 6-month and 12-month-ahead forecasts, neither the AR(1) nor VAR(1) models based on Nelson&amp;amp;ndash;Siegel factors outperformed the benchmarks. These results suggest that the Nelson&amp;amp;ndash;Siegel factors affect the aggregate stochastic discount factor for pricing all assets traded in the US economy.</p>
	]]></content:encoded>

	<dc:title>Forecasting Asset Returns Using Nelson&amp;amp;ndash;Siegel Factors Estimated from the US Yield Curve</dc:title>
			<dc:creator>Massimo Guidolin</dc:creator>
			<dc:creator>Serena Ionta</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13020017</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-04-11</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-04-11</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>17</prism:startingPage>
		<prism:doi>10.3390/econometrics13020017</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/2/17</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/2/16">

	<title>Econometrics, Vol. 13, Pages 16: A Meta-Analysis of Determinants of Success and Failure of Economic Sanctions</title>
	<link>https://www.mdpi.com/2225-1146/13/2/16</link>
	<description>Political scientists and economists often assert that they understand how economic sanctions function as a foreign policy tool and claim to have backed their theories with compelling statistical evidence. The research puzzle that this article addresses is the observation that despite almost four decades of empirical research on economic sanctions, there is still no consensus on the direction and magnitude of the key variables that theoretically determine the success of economic sanctions. To address part of this research puzzle, we conducted a meta-analysis of 37 studies published between 1985 and 2018, focusing on three key determinants of sanction success: trade linkage, prior relations, and duration. Our analysis examines the factors contributing to the variation in findings reported by these primary studies. By constructing up to 27 moderator variables that capture the contexts in which researchers derive their estimates, we found that the differences across studies are primarily influenced by the data used, the variables controlled for in estimation methods, publication quality, and author characteristics. Our results reveal highly significant effects, indicating that sanctions are more likely to succeed when there is strong pre-sanction trade, when sanctions are implemented swiftly, and when they involve countries with better pre-sanction relationships. In our robustness checks, we consistently confirmed these core findings across different estimation techniques.</description>
	<pubDate>2025-04-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 16: A Meta-Analysis of Determinants of Success and Failure of Economic Sanctions</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/2/16">doi: 10.3390/econometrics13020016</a></p>
	<p>Authors:
		Binyam Afewerk Demena
		Peter A. G. van Bergeijk
		</p>
	<p>Political scientists and economists often assert that they understand how economic sanctions function as a foreign policy tool and claim to have backed their theories with compelling statistical evidence. The research puzzle that this article addresses is the observation that despite almost four decades of empirical research on economic sanctions, there is still no consensus on the direction and magnitude of the key variables that theoretically determine the success of economic sanctions. To address part of this research puzzle, we conducted a meta-analysis of 37 studies published between 1985 and 2018, focusing on three key determinants of sanction success: trade linkage, prior relations, and duration. Our analysis examines the factors contributing to the variation in findings reported by these primary studies. By constructing up to 27 moderator variables that capture the contexts in which researchers derive their estimates, we found that the differences across studies are primarily influenced by the data used, the variables controlled for in estimation methods, publication quality, and author characteristics. Our results reveal highly significant effects, indicating that sanctions are more likely to succeed when there is strong pre-sanction trade, when sanctions are implemented swiftly, and when they involve countries with better pre-sanction relationships. In our robustness checks, we consistently confirmed these core findings across different estimation techniques.</p>
	]]></content:encoded>

	<dc:title>A Meta-Analysis of Determinants of Success and Failure of Economic Sanctions</dc:title>
			<dc:creator>Binyam Afewerk Demena</dc:creator>
			<dc:creator>Peter A. G. van Bergeijk</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13020016</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-04-09</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-04-09</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>16</prism:startingPage>
		<prism:doi>10.3390/econometrics13020016</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/2/16</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/2/15">

	<title>Econometrics, Vol. 13, Pages 15: Inference of Impulse Responses via Bayesian Graphical Structural VAR Models</title>
	<link>https://www.mdpi.com/2225-1146/13/2/15</link>
	<description>Impulse response functions (IRFs) are crucial for analyzing the dynamic interactions of macroeconomic variables in vector autoregressive (VAR) models. However, traditional IRF estimation methods often have limitations with assumptions on variable ordering and restrictive identification constraints. This paper applies the Bayesian graphical structural vector autoregressive (BGSVAR) model, which integrates structural learning to capture both temporal and contemporaneous dependencies for more accurate impulse response estimation. The BGSVAR framework provides a more efficient and interpretable method for estimating IRFs, which can enhance both forecasting performance and structural inferences in economic modelling. Through extensive simulations across various data-generating processes, we evaluate BGSVAR&amp;amp;rsquo;s effectiveness in modelling dynamic interactions among US macroeconomic variables. Our results demonstrate that BGSVAR outperforms traditional methods, such as LASSO and Bayesian VAR (BVAR), by delivering more precise impulse response estimates and better capturing the structural dynamics of VAR-based models.</description>
	<pubDate>2025-04-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 15: Inference of Impulse Responses via Bayesian Graphical Structural VAR Models</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/2/15">doi: 10.3390/econometrics13020015</a></p>
	<p>Authors:
		Daniel Felix Ahelegbey
		</p>
	<p>Impulse response functions (IRFs) are crucial for analyzing the dynamic interactions of macroeconomic variables in vector autoregressive (VAR) models. However, traditional IRF estimation methods often have limitations with assumptions on variable ordering and restrictive identification constraints. This paper applies the Bayesian graphical structural vector autoregressive (BGSVAR) model, which integrates structural learning to capture both temporal and contemporaneous dependencies for more accurate impulse response estimation. The BGSVAR framework provides a more efficient and interpretable method for estimating IRFs, which can enhance both forecasting performance and structural inferences in economic modelling. Through extensive simulations across various data-generating processes, we evaluate BGSVAR&amp;amp;rsquo;s effectiveness in modelling dynamic interactions among US macroeconomic variables. Our results demonstrate that BGSVAR outperforms traditional methods, such as LASSO and Bayesian VAR (BVAR), by delivering more precise impulse response estimates and better capturing the structural dynamics of VAR-based models.</p>
	]]></content:encoded>

	<dc:title>Inference of Impulse Responses via Bayesian Graphical Structural VAR Models</dc:title>
			<dc:creator>Daniel Felix Ahelegbey</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13020015</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-04-02</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-04-02</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>15</prism:startingPage>
		<prism:doi>10.3390/econometrics13020015</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/2/15</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/2/14">

	<title>Econometrics, Vol. 13, Pages 14: Modeling and Forecasting Time-Series Data with Multiple Seasonal Periods Using Periodograms</title>
	<link>https://www.mdpi.com/2225-1146/13/2/14</link>
	<description>Applications of high-frequency data, including energy management, economics, and finance, frequently require time-series forecasting characterized by complex seasonality. Recognizing prevailing seasonal trends continues to be difficult, given that the majority of solutions depend on basic decomposition techniques. This study introduces a new approach employing periodograms from spectral density analysis to identify predominant seasonal periods. When analyzing hourly electricity consumption data from Brazil, we identified three significant seasonal patterns: sub-daily (6 h), half-daily (12 h), and daily (24 h). We assessed the predictive efficacy of the BATS, TBATS, and STL + ETS models using these seasonal periods. We performed data analysis and model fitting in R 4.4.1 and used accuracy metrics like MAE, MAPE, and others to compare the models. The STL + ETS model exhibited an enhanced performance, surpassing both BATS and TBATS in energy forecasting. These findings improve our understanding of multiple seasonal patterns, assist us in selecting dominating periods, provide new practical forecasting approaches for time-series analysis, and inform professionals seeking superior forecasting solutions in various fields.</description>
	<pubDate>2025-03-28</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 14: Modeling and Forecasting Time-Series Data with Multiple Seasonal Periods Using Periodograms</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/2/14">doi: 10.3390/econometrics13020014</a></p>
	<p>Authors:
		Solomon Buke Chudo
		Gyorgy Terdik
		</p>
	<p>Applications of high-frequency data, including energy management, economics, and finance, frequently require time-series forecasting characterized by complex seasonality. Recognizing prevailing seasonal trends continues to be difficult, given that the majority of solutions depend on basic decomposition techniques. This study introduces a new approach employing periodograms from spectral density analysis to identify predominant seasonal periods. When analyzing hourly electricity consumption data from Brazil, we identified three significant seasonal patterns: sub-daily (6 h), half-daily (12 h), and daily (24 h). We assessed the predictive efficacy of the BATS, TBATS, and STL + ETS models using these seasonal periods. We performed data analysis and model fitting in R 4.4.1 and used accuracy metrics like MAE, MAPE, and others to compare the models. The STL + ETS model exhibited an enhanced performance, surpassing both BATS and TBATS in energy forecasting. These findings improve our understanding of multiple seasonal patterns, assist us in selecting dominating periods, provide new practical forecasting approaches for time-series analysis, and inform professionals seeking superior forecasting solutions in various fields.</p>
	]]></content:encoded>

	<dc:title>Modeling and Forecasting Time-Series Data with Multiple Seasonal Periods Using Periodograms</dc:title>
			<dc:creator>Solomon Buke Chudo</dc:creator>
			<dc:creator>Gyorgy Terdik</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13020014</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-03-28</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-03-28</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>14</prism:startingPage>
		<prism:doi>10.3390/econometrics13020014</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/2/14</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/2/13">

	<title>Econometrics, Vol. 13, Pages 13: Explosive Episodes and Time-Varying Volatility: A New MARMA&amp;ndash;GARCH Model Applied to Cryptocurrencies</title>
	<link>https://www.mdpi.com/2225-1146/13/2/13</link>
	<description>Financial assets often exhibit explosive price surges followed by abrupt collapses, alongside persistent volatility clustering. Motivated by these features, we introduce a mixed causal&amp;amp;ndash;noncausal invertible&amp;amp;ndash;noninvertible autoregressive moving average generalized autoregressive conditional heteroskedasticity (MARMA&amp;amp;ndash;GARCH) model. Unlike standard ARMA processes, our model admits roots inside the unit disk, capturing bubble-like episodes and speculative feedback, while the GARCH component explains time-varying volatility. We propose two estimation approaches: (i) Whittle-based frequency-domain methods, which are asymptotically equivalent to Gaussian likelihood under stationarity and finite variance, and (ii) time-domain maximum likelihood, which proves to be more robust to heavy tails and skewness&amp;amp;mdash;common in financial returns. To identify causal vs. noncausal structures, we develop a higher-order diagnostics procedure using spectral densities and residual-based tests. Simulation results reveal that overlooking noncausality biases GARCH parameters, downplaying short-run volatility reactions to news (&amp;amp;alpha;) while overstating volatility persistence (&amp;amp;beta;). Our empirical application to Bitcoin and Ethereum enhances these insights: we find significant noncausal dynamics in the mean, paired with pronounced GARCH effects in the variance. Imposing a purely causal ARMA specification leads to systematically misspecified volatility estimates, potentially underestimating market risks. Our results emphasize the importance of relaxing the usual causality and invertibility assumption for assets prone to extreme price movements, ultimately improving risk metrics and expanding our understanding of financial market dynamics.</description>
	<pubDate>2025-03-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 13: Explosive Episodes and Time-Varying Volatility: A New MARMA&amp;ndash;GARCH Model Applied to Cryptocurrencies</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/2/13">doi: 10.3390/econometrics13020013</a></p>
	<p>Authors:
		Alain Hecq
		Daniel Velasquez-Gaviria
		</p>
	<p>Financial assets often exhibit explosive price surges followed by abrupt collapses, alongside persistent volatility clustering. Motivated by these features, we introduce a mixed causal&amp;amp;ndash;noncausal invertible&amp;amp;ndash;noninvertible autoregressive moving average generalized autoregressive conditional heteroskedasticity (MARMA&amp;amp;ndash;GARCH) model. Unlike standard ARMA processes, our model admits roots inside the unit disk, capturing bubble-like episodes and speculative feedback, while the GARCH component explains time-varying volatility. We propose two estimation approaches: (i) Whittle-based frequency-domain methods, which are asymptotically equivalent to Gaussian likelihood under stationarity and finite variance, and (ii) time-domain maximum likelihood, which proves to be more robust to heavy tails and skewness&amp;amp;mdash;common in financial returns. To identify causal vs. noncausal structures, we develop a higher-order diagnostics procedure using spectral densities and residual-based tests. Simulation results reveal that overlooking noncausality biases GARCH parameters, downplaying short-run volatility reactions to news (&amp;amp;alpha;) while overstating volatility persistence (&amp;amp;beta;). Our empirical application to Bitcoin and Ethereum enhances these insights: we find significant noncausal dynamics in the mean, paired with pronounced GARCH effects in the variance. Imposing a purely causal ARMA specification leads to systematically misspecified volatility estimates, potentially underestimating market risks. Our results emphasize the importance of relaxing the usual causality and invertibility assumption for assets prone to extreme price movements, ultimately improving risk metrics and expanding our understanding of financial market dynamics.</p>
	]]></content:encoded>

	<dc:title>Explosive Episodes and Time-Varying Volatility: A New MARMA&amp;amp;ndash;GARCH Model Applied to Cryptocurrencies</dc:title>
			<dc:creator>Alain Hecq</dc:creator>
			<dc:creator>Daniel Velasquez-Gaviria</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13020013</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-03-24</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-03-24</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>13</prism:startingPage>
		<prism:doi>10.3390/econometrics13020013</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/2/13</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/1/12">

	<title>Econometrics, Vol. 13, Pages 12: Dynamic Interaction Between Microfinance and Household Well-Being: Evidence from the Microcredit Progressive Model for Sustainable Development</title>
	<link>https://www.mdpi.com/2225-1146/13/1/12</link>
	<description>Microfinance aims to promote financial inclusion among underprivileged individuals, particularly through progressive microcredit, which enables borrowers to access increasing loan amounts over time. This study examines the conditions under which progressive microcredit positively impacts both small business performance and household well-being, considering borrower characteristics and business activity conditions. Using a dataset of 278 households across 110 administrative sectors in Tunisia from 2012 to 2020, this study employs two-stage least squares (2SLS) and three-stage least squares (3SLS) econometric techniques to estimate simultaneous equation models. The findings reveal that the cumulative amount of progressive microcredit received is mainly determined by project capital, suggesting that businesses with higher capital requirements tend to secure larger loans over successive cycles. Household well-being is significantly influenced by progressive microcredit, household income, net business benefit, rate of development index, and homeownership. Meanwhile, business profitability is driven by project capital and total fixed assets, highlighting the long-term impact of microcredit. The results highlight the critical role of microfinance in enabling small-scale entrepreneurs to expand their businesses while simultaneously improving household financial security. By promoting sustainable income generation, progressive microcredit serves as a key instrument in poverty alleviation and economic stability. This study underscores the necessity for microfinance institutions (MFIs) to tailor their lending strategies, ensuring optimal loan progression that balances business expansion with financial sustainability. Additionally, policymakers should refine microcredit frameworks to enhance accessibility and long-term economic benefits for low-income borrowers. Overall, these insights contribute to the broader discourse on financial inclusion and sustainable development, emphasizing that progressive microcredit not only facilitates entrepreneurship, but also serves as a driver of socioeconomic mobility.</description>
	<pubDate>2025-03-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 12: Dynamic Interaction Between Microfinance and Household Well-Being: Evidence from the Microcredit Progressive Model for Sustainable Development</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/1/12">doi: 10.3390/econometrics13010012</a></p>
	<p>Authors:
		Ahmad Alqatan
		Najoua Talbi
		Hasan Behbehani
		Samira Ben Belgacem
		Muhammad Arslan
		Wafaa Sbeiti
		</p>
	<p>Microfinance aims to promote financial inclusion among underprivileged individuals, particularly through progressive microcredit, which enables borrowers to access increasing loan amounts over time. This study examines the conditions under which progressive microcredit positively impacts both small business performance and household well-being, considering borrower characteristics and business activity conditions. Using a dataset of 278 households across 110 administrative sectors in Tunisia from 2012 to 2020, this study employs two-stage least squares (2SLS) and three-stage least squares (3SLS) econometric techniques to estimate simultaneous equation models. The findings reveal that the cumulative amount of progressive microcredit received is mainly determined by project capital, suggesting that businesses with higher capital requirements tend to secure larger loans over successive cycles. Household well-being is significantly influenced by progressive microcredit, household income, net business benefit, rate of development index, and homeownership. Meanwhile, business profitability is driven by project capital and total fixed assets, highlighting the long-term impact of microcredit. The results highlight the critical role of microfinance in enabling small-scale entrepreneurs to expand their businesses while simultaneously improving household financial security. By promoting sustainable income generation, progressive microcredit serves as a key instrument in poverty alleviation and economic stability. This study underscores the necessity for microfinance institutions (MFIs) to tailor their lending strategies, ensuring optimal loan progression that balances business expansion with financial sustainability. Additionally, policymakers should refine microcredit frameworks to enhance accessibility and long-term economic benefits for low-income borrowers. Overall, these insights contribute to the broader discourse on financial inclusion and sustainable development, emphasizing that progressive microcredit not only facilitates entrepreneurship, but also serves as a driver of socioeconomic mobility.</p>
	]]></content:encoded>

	<dc:title>Dynamic Interaction Between Microfinance and Household Well-Being: Evidence from the Microcredit Progressive Model for Sustainable Development</dc:title>
			<dc:creator>Ahmad Alqatan</dc:creator>
			<dc:creator>Najoua Talbi</dc:creator>
			<dc:creator>Hasan Behbehani</dc:creator>
			<dc:creator>Samira Ben Belgacem</dc:creator>
			<dc:creator>Muhammad Arslan</dc:creator>
			<dc:creator>Wafaa Sbeiti</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13010012</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-03-06</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-03-06</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>12</prism:startingPage>
		<prism:doi>10.3390/econometrics13010012</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/1/12</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/1/11">

	<title>Econometrics, Vol. 13, Pages 11: Real Option Valuation of an Emerging Renewable Technology Design in Wave Energy Conversion</title>
	<link>https://www.mdpi.com/2225-1146/13/1/11</link>
	<description>The untapped potential of wave energy offers another alternative to diversifying renewable energy sources and addressing climate change by reducing CO2 emissions. However, development costs to mature the technology remain significant hurdles to adoption at scale and the technology often must compete against other marine energy renewables such as offshore wind. Here, we conduct a real option valuation that includes the uncertain market price of wholesale electricity and managerial flexibility expressed in determining future optimal decisions. We demonstrate the probability that the project&amp;amp;rsquo;s embedded compound real option value can turn a negative net present value wave energy project to a positive expected value. This change in investment decision uses decision tree analysis, where real options are developed as decision nodes, and models the uncertainty as a risk-neutral stochastic process using chance nodes. We also show how our results are analogous to a financial out-of-the-money call option. Our results highlight the distribution of outcomes and the benefit of a staged long-term investment in wave energy systems to better understand and manage project risk, recognizing that these probabilistic results are subject to the ongoing evolution of wholesale electricity prices and the stochastic process models used here to capture their future dynamics. Lastly, we show that the near-term optimal decision is to continue to fund ongoing development of a reference architecture to a higher technology readiness level to maintain the long-term option to deploy such a renewable energy system through private investment or private&amp;amp;ndash;public partnerships.</description>
	<pubDate>2025-03-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 11: Real Option Valuation of an Emerging Renewable Technology Design in Wave Energy Conversion</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/1/11">doi: 10.3390/econometrics13010011</a></p>
	<p>Authors:
		James A. DiLellio
		John C. Butler
		Igor Rizaev
		Wanan Sheng
		George Aggidis
		</p>
	<p>The untapped potential of wave energy offers another alternative to diversifying renewable energy sources and addressing climate change by reducing CO2 emissions. However, development costs to mature the technology remain significant hurdles to adoption at scale and the technology often must compete against other marine energy renewables such as offshore wind. Here, we conduct a real option valuation that includes the uncertain market price of wholesale electricity and managerial flexibility expressed in determining future optimal decisions. We demonstrate the probability that the project&amp;amp;rsquo;s embedded compound real option value can turn a negative net present value wave energy project to a positive expected value. This change in investment decision uses decision tree analysis, where real options are developed as decision nodes, and models the uncertainty as a risk-neutral stochastic process using chance nodes. We also show how our results are analogous to a financial out-of-the-money call option. Our results highlight the distribution of outcomes and the benefit of a staged long-term investment in wave energy systems to better understand and manage project risk, recognizing that these probabilistic results are subject to the ongoing evolution of wholesale electricity prices and the stochastic process models used here to capture their future dynamics. Lastly, we show that the near-term optimal decision is to continue to fund ongoing development of a reference architecture to a higher technology readiness level to maintain the long-term option to deploy such a renewable energy system through private investment or private&amp;amp;ndash;public partnerships.</p>
	]]></content:encoded>

	<dc:title>Real Option Valuation of an Emerging Renewable Technology Design in Wave Energy Conversion</dc:title>
			<dc:creator>James A. DiLellio</dc:creator>
			<dc:creator>John C. Butler</dc:creator>
			<dc:creator>Igor Rizaev</dc:creator>
			<dc:creator>Wanan Sheng</dc:creator>
			<dc:creator>George Aggidis</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13010011</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-03-04</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-03-04</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>11</prism:startingPage>
		<prism:doi>10.3390/econometrics13010011</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/1/11</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/1/10">

	<title>Econometrics, Vol. 13, Pages 10: A Study of Economic and Social Preferences in Energy-Saving Behavior Using a Structural Equation Modeling Approach: The Case of Romania</title>
	<link>https://www.mdpi.com/2225-1146/13/1/10</link>
	<description>Examining the energy consumer behavioral model is critical for national governments and academia. This endeavor seeks to uncover effective solutions amid the energy crisis and climate change challenges. This article delves into legislative developments within the energy sector, European Commission recommendations for reducing energy consumption, and existing constraints impacting individual consumers. By scrutinizing the relevant literature, we aimed to identify and analyze factors that can enhance individual benefits derived from energy savings. Then, a comprehensive set of variables was formulated to model the final consumers&amp;amp;rsquo; behavior. Data collection involved administering questionnaires to individual consumers, consumer associations, and energy micro-enterprises in Romania. The gathered data were meticulously analyzed using the Smart-Pls 4 statistical software. Building upon insights from specialized literature, this paper pinpoints the behavioral determinants influencing the reduction in energy consumption. These determinants serve as independent variables shaping the voluntary adoption of measures in lifestyle and behavior among various types of energy users. This study&amp;amp;rsquo;s findings validate the assumptions presented in this article, highlighting that a reduction in energy consumption is a direct and intrinsic outcome achieved by cumulatively addressing several factors. These factors encompass investments in the energy sector, budget allocation for energy consumption expenditure, adherence to social behavior norms, access to pertinent information about the consequences of the energy crisis, and individual responsibility. Notably, the perception of energy-saving opportunities emerges as a mediator between the independent variables and energy savings with a significant effect. This aspect, developed for the first time in this article, draws inspiration from the prospect theory introduced by Kahneman and Tversky.</description>
	<pubDate>2025-02-24</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 10: A Study of Economic and Social Preferences in Energy-Saving Behavior Using a Structural Equation Modeling Approach: The Case of Romania</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/1/10">doi: 10.3390/econometrics13010010</a></p>
	<p>Authors:
		Cristian Busu
		Mihail Busu
		Stelian Grasu
		Ilona Skačkauskienė
		Luis Miguel Fonseca
		</p>
	<p>Examining the energy consumer behavioral model is critical for national governments and academia. This endeavor seeks to uncover effective solutions amid the energy crisis and climate change challenges. This article delves into legislative developments within the energy sector, European Commission recommendations for reducing energy consumption, and existing constraints impacting individual consumers. By scrutinizing the relevant literature, we aimed to identify and analyze factors that can enhance individual benefits derived from energy savings. Then, a comprehensive set of variables was formulated to model the final consumers&amp;amp;rsquo; behavior. Data collection involved administering questionnaires to individual consumers, consumer associations, and energy micro-enterprises in Romania. The gathered data were meticulously analyzed using the Smart-Pls 4 statistical software. Building upon insights from specialized literature, this paper pinpoints the behavioral determinants influencing the reduction in energy consumption. These determinants serve as independent variables shaping the voluntary adoption of measures in lifestyle and behavior among various types of energy users. This study&amp;amp;rsquo;s findings validate the assumptions presented in this article, highlighting that a reduction in energy consumption is a direct and intrinsic outcome achieved by cumulatively addressing several factors. These factors encompass investments in the energy sector, budget allocation for energy consumption expenditure, adherence to social behavior norms, access to pertinent information about the consequences of the energy crisis, and individual responsibility. Notably, the perception of energy-saving opportunities emerges as a mediator between the independent variables and energy savings with a significant effect. This aspect, developed for the first time in this article, draws inspiration from the prospect theory introduced by Kahneman and Tversky.</p>
	]]></content:encoded>

	<dc:title>A Study of Economic and Social Preferences in Energy-Saving Behavior Using a Structural Equation Modeling Approach: The Case of Romania</dc:title>
			<dc:creator>Cristian Busu</dc:creator>
			<dc:creator>Mihail Busu</dc:creator>
			<dc:creator>Stelian Grasu</dc:creator>
			<dc:creator>Ilona Skačkauskienė</dc:creator>
			<dc:creator>Luis Miguel Fonseca</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13010010</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-02-24</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-02-24</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>10</prism:startingPage>
		<prism:doi>10.3390/econometrics13010010</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/1/10</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/1/9">

	<title>Econometrics, Vol. 13, Pages 9: Investigating Some Issues Relating to Regime Matching</title>
	<link>https://www.mdpi.com/2225-1146/13/1/9</link>
	<description>Markov switching models are a common tool used in many disciplines as well as in Economics, and estimation methods are available in many software packages. Estimated models are commonly used for allocating observations to regimes. This allocation is usually done using a rule based on the estimated smoothed probabilities, such as, in the two regime case, when it exceeds the threshold of 0.5. The accuracy of the regime matching is often measured by the concordance index. Can regime matching be improved by using other rules? By replicating a number of published two-and three- regime studies and the use of simulation methods, it demonstrates that other rules can improve on the performance of the rule based on the threshold of 0.5. Using simulated models we extend the analysis of a single series to investigate, and demonstrate the efficacy of Markov switching models identifying a common factor in multiple time series.</description>
	<pubDate>2025-02-21</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 9: Investigating Some Issues Relating to Regime Matching</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/1/9">doi: 10.3390/econometrics13010009</a></p>
	<p>Authors:
		Anthony D. Hall
		Adrian R. Pagan
		</p>
	<p>Markov switching models are a common tool used in many disciplines as well as in Economics, and estimation methods are available in many software packages. Estimated models are commonly used for allocating observations to regimes. This allocation is usually done using a rule based on the estimated smoothed probabilities, such as, in the two regime case, when it exceeds the threshold of 0.5. The accuracy of the regime matching is often measured by the concordance index. Can regime matching be improved by using other rules? By replicating a number of published two-and three- regime studies and the use of simulation methods, it demonstrates that other rules can improve on the performance of the rule based on the threshold of 0.5. Using simulated models we extend the analysis of a single series to investigate, and demonstrate the efficacy of Markov switching models identifying a common factor in multiple time series.</p>
	]]></content:encoded>

	<dc:title>Investigating Some Issues Relating to Regime Matching</dc:title>
			<dc:creator>Anthony D. Hall</dc:creator>
			<dc:creator>Adrian R. Pagan</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13010009</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-02-21</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-02-21</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>9</prism:startingPage>
		<prism:doi>10.3390/econometrics13010009</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/1/9</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/1/8">

	<title>Econometrics, Vol. 13, Pages 8: Comparative Analysis of VAR and SVAR Models in Assessing Oil Price Shocks and Exchange Rate Transmission to Consumer Prices in South Africa</title>
	<link>https://www.mdpi.com/2225-1146/13/1/8</link>
	<description>This study compared standard VAR, SVAR with short-run restrictions, and SVAR with long-run restrictions to investigate the effects of oil price shocks and the foreign exchange rate (ZAR/USD) on consumer prices in South Africa after the 2008 financial crisis. The standard VAR model revealed that consumer prices responded positively to oil price shocks in the short term, whereas the foreign exchange rate (ZAR/USD) revealed a fluctuating currency over time. That is, the South African rand (ZAR) initially appreciated against the US dollar (USD) in response to oil price shocks (periods 1:7), followed by a depreciation in periods 8:12. Imposing short-run restrictions on the SVAR model revealed that the foreign exchange rate (ZAR/USD) reacted to oil price shocks in a manner similar to the VAR model, with ZAR appreciating during the initial periods (1:7) and subsequently depreciating in the later periods (8:12). Consumer prices responded positively to oil price shocks, causing consumer prices to increase in the short run, which is consistent with the VAR findings. However, imposing long-run restrictions on our SVAR model yielded results that contrasted with those obtained under short-run restrictions and the standard VAR model. That is, oil price shocks had long-lasting effects on the foreign exchange rate, resulting in the depreciation of ZAR relative to USD over time. Additionally, oil price shocks reduced consumer prices, resulting in a deflationary effect in the long run. This study concluded that South Africa&amp;amp;rsquo;s position as a net oil importer with a floating exchange rate renders the country vulnerable to short-term external shocks. Nonetheless, in the long term, the results indicated that the economy tends to adapt to oil price shocks over time.</description>
	<pubDate>2025-02-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 8: Comparative Analysis of VAR and SVAR Models in Assessing Oil Price Shocks and Exchange Rate Transmission to Consumer Prices in South Africa</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/1/8">doi: 10.3390/econometrics13010008</a></p>
	<p>Authors:
		Luyanda Majenge
		Sakhile Mpungose
		Simiso Msomi
		</p>
	<p>This study compared standard VAR, SVAR with short-run restrictions, and SVAR with long-run restrictions to investigate the effects of oil price shocks and the foreign exchange rate (ZAR/USD) on consumer prices in South Africa after the 2008 financial crisis. The standard VAR model revealed that consumer prices responded positively to oil price shocks in the short term, whereas the foreign exchange rate (ZAR/USD) revealed a fluctuating currency over time. That is, the South African rand (ZAR) initially appreciated against the US dollar (USD) in response to oil price shocks (periods 1:7), followed by a depreciation in periods 8:12. Imposing short-run restrictions on the SVAR model revealed that the foreign exchange rate (ZAR/USD) reacted to oil price shocks in a manner similar to the VAR model, with ZAR appreciating during the initial periods (1:7) and subsequently depreciating in the later periods (8:12). Consumer prices responded positively to oil price shocks, causing consumer prices to increase in the short run, which is consistent with the VAR findings. However, imposing long-run restrictions on our SVAR model yielded results that contrasted with those obtained under short-run restrictions and the standard VAR model. That is, oil price shocks had long-lasting effects on the foreign exchange rate, resulting in the depreciation of ZAR relative to USD over time. Additionally, oil price shocks reduced consumer prices, resulting in a deflationary effect in the long run. This study concluded that South Africa&amp;amp;rsquo;s position as a net oil importer with a floating exchange rate renders the country vulnerable to short-term external shocks. Nonetheless, in the long term, the results indicated that the economy tends to adapt to oil price shocks over time.</p>
	]]></content:encoded>

	<dc:title>Comparative Analysis of VAR and SVAR Models in Assessing Oil Price Shocks and Exchange Rate Transmission to Consumer Prices in South Africa</dc:title>
			<dc:creator>Luyanda Majenge</dc:creator>
			<dc:creator>Sakhile Mpungose</dc:creator>
			<dc:creator>Simiso Msomi</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13010008</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-02-20</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-02-20</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>8</prism:startingPage>
		<prism:doi>10.3390/econometrics13010008</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/1/8</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/1/7">

	<title>Econometrics, Vol. 13, Pages 7: Conditional &amp;beta;-Convergence in APEC Economies, 1960&amp;ndash;2020: Empirical Evidence from the Pooled Mean Group Estimator</title>
	<link>https://www.mdpi.com/2225-1146/13/1/7</link>
	<description>The aim of this research is to analyze the impact of conditional variables&amp;amp;mdash;physical capital, population, and Total Factor Productivity (TFP)&amp;amp;mdash;on the economic convergence of the member economies of the Asia-Pacific Economic Cooperation (APEC) Forum over the period 1960&amp;amp;ndash;2020. This study employs a causal and correlational methodological approach, utilizing the pooled mean group (PMG) estimator within a non-experimental design framework for quantitative analysis. This methodology facilitates the estimation of conditional &amp;amp;beta;-convergence, ensuring the statistical significance of estimates even in heterogeneous data panels with variables of integration order I(0) and I(1). The results indicate that physical capital, population growth, and TFP have significantly influenced the growth rates of APEC economies, contributing to economic convergence within the region during the 1960&amp;amp;ndash;2020 period. This study offers significant contributions by analyzing the 21 APEC economies over a 60-year period, utilizing a PMG model to estimate conditional &amp;amp;beta;-convergence, and conducting comprehensive evaluations of short- and long-term trends. Consequently, the research recommends implementing policies that prioritize innovation, strengthen capital, create employment opportunities, and enhance productivity to reduce inequalities and foster sustainable growth across APEC economies.</description>
	<pubDate>2025-02-18</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 7: Conditional &amp;beta;-Convergence in APEC Economies, 1960&amp;ndash;2020: Empirical Evidence from the Pooled Mean Group Estimator</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/1/7">doi: 10.3390/econometrics13010007</a></p>
	<p>Authors:
		César Lenin Navarro-Chávez
		Julio César Morán-Figueroa
		Francisco Javier Ayvar-Campos
		</p>
	<p>The aim of this research is to analyze the impact of conditional variables&amp;amp;mdash;physical capital, population, and Total Factor Productivity (TFP)&amp;amp;mdash;on the economic convergence of the member economies of the Asia-Pacific Economic Cooperation (APEC) Forum over the period 1960&amp;amp;ndash;2020. This study employs a causal and correlational methodological approach, utilizing the pooled mean group (PMG) estimator within a non-experimental design framework for quantitative analysis. This methodology facilitates the estimation of conditional &amp;amp;beta;-convergence, ensuring the statistical significance of estimates even in heterogeneous data panels with variables of integration order I(0) and I(1). The results indicate that physical capital, population growth, and TFP have significantly influenced the growth rates of APEC economies, contributing to economic convergence within the region during the 1960&amp;amp;ndash;2020 period. This study offers significant contributions by analyzing the 21 APEC economies over a 60-year period, utilizing a PMG model to estimate conditional &amp;amp;beta;-convergence, and conducting comprehensive evaluations of short- and long-term trends. Consequently, the research recommends implementing policies that prioritize innovation, strengthen capital, create employment opportunities, and enhance productivity to reduce inequalities and foster sustainable growth across APEC economies.</p>
	]]></content:encoded>

	<dc:title>Conditional &amp;amp;beta;-Convergence in APEC Economies, 1960&amp;amp;ndash;2020: Empirical Evidence from the Pooled Mean Group Estimator</dc:title>
			<dc:creator>César Lenin Navarro-Chávez</dc:creator>
			<dc:creator>Julio César Morán-Figueroa</dc:creator>
			<dc:creator>Francisco Javier Ayvar-Campos</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13010007</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-02-18</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-02-18</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>7</prism:startingPage>
		<prism:doi>10.3390/econometrics13010007</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/1/7</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/1/6">

	<title>Econometrics, Vol. 13, Pages 6: Data-Based Parametrization for Affine GARCH Models Across Multiple Time Scales&amp;mdash;Roughness Implications</title>
	<link>https://www.mdpi.com/2225-1146/13/1/6</link>
	<description>This paper revisits the topic of time-scale parameterizations of the Heston&amp;amp;ndash;Nandi GARCH (1,1) model to create a new, theoretically valid setting compatible with real financial data. We first estimate parameters using three US market indices and six frequencies to let data reveal the correct, data-implied, time-scale parameterizations. We compared the data-implied parametrization to two popular candidates in the literature, demonstrating structurally different continuous-time limits, i.e., the data favor fractional Brownian motion (fBM)&amp;amp;mdash;instead of the standard Brownian motion (BM)-based parametrization. We then propose a theoretically flexible time-scale parameterization compatible with this fBM behavior. In this context, a fractional derivative analysis of our empirically based parametrization is performed, confirming an anomalous diffusion in the continuous-time limit. Such a finding is yet another endorsement of the recent and popular stylized fact known as rough volatility.</description>
	<pubDate>2025-02-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 6: Data-Based Parametrization for Affine GARCH Models Across Multiple Time Scales&amp;mdash;Roughness Implications</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/1/6">doi: 10.3390/econometrics13010006</a></p>
	<p>Authors:
		Marcos Escobar-Anel
		Sebastian Ferrando
		Fuyu Li
		Ke Xu
		</p>
	<p>This paper revisits the topic of time-scale parameterizations of the Heston&amp;amp;ndash;Nandi GARCH (1,1) model to create a new, theoretically valid setting compatible with real financial data. We first estimate parameters using three US market indices and six frequencies to let data reveal the correct, data-implied, time-scale parameterizations. We compared the data-implied parametrization to two popular candidates in the literature, demonstrating structurally different continuous-time limits, i.e., the data favor fractional Brownian motion (fBM)&amp;amp;mdash;instead of the standard Brownian motion (BM)-based parametrization. We then propose a theoretically flexible time-scale parameterization compatible with this fBM behavior. In this context, a fractional derivative analysis of our empirically based parametrization is performed, confirming an anomalous diffusion in the continuous-time limit. Such a finding is yet another endorsement of the recent and popular stylized fact known as rough volatility.</p>
	]]></content:encoded>

	<dc:title>Data-Based Parametrization for Affine GARCH Models Across Multiple Time Scales&amp;amp;mdash;Roughness Implications</dc:title>
			<dc:creator>Marcos Escobar-Anel</dc:creator>
			<dc:creator>Sebastian Ferrando</dc:creator>
			<dc:creator>Fuyu Li</dc:creator>
			<dc:creator>Ke Xu</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13010006</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-02-12</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-02-12</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>6</prism:startingPage>
		<prism:doi>10.3390/econometrics13010006</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/1/6</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/1/5">

	<title>Econometrics, Vol. 13, Pages 5: Application of Fuzzy Discount Factors in Behavioural Decision-Making for Financial Market Modelling</title>
	<link>https://www.mdpi.com/2225-1146/13/1/5</link>
	<description>This paper presents an innovative approach to financial market modelling by integrating fuzzy discount factors into the decision-making process, thereby reflecting the complexities of human behaviour. Traditional financial models often fail to account for market dynamics&amp;amp;rsquo; psychological factors. The proposed method utilizes fuzzy logic to encapsulate the uncertainty and subjective judgment inherent in financial decisions. By representing financial variables as fuzzy numbers, the model better simulates the way humans assess information and make decisions under uncertainty. The incorporation of fuzzy discount factors marks a significant shift from deterministic to a more realistic representation of financial markets, suitable for practical application. This methodology offers a nuanced investment strategy that balances theoretical rigour with real-world applicability, appealing to a broad spectrum of investors. The aim of the following paper is to introduce an alternative to price modelling with the use of fuzzy return rates, which results in some errors in the mathematical model. The solution has the form of introducing fuzzy discount factors (FDFs) that retain the advantages of the fuzzy approach (e.g., encompassing subjectivity and imprecision) while preserving the shape of the fuzzy number modelling a price.</description>
	<pubDate>2025-01-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 5: Application of Fuzzy Discount Factors in Behavioural Decision-Making for Financial Market Modelling</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/1/5">doi: 10.3390/econometrics13010005</a></p>
	<p>Authors:
		Joanna Siwek
		Patryk Żywica
		</p>
	<p>This paper presents an innovative approach to financial market modelling by integrating fuzzy discount factors into the decision-making process, thereby reflecting the complexities of human behaviour. Traditional financial models often fail to account for market dynamics&amp;amp;rsquo; psychological factors. The proposed method utilizes fuzzy logic to encapsulate the uncertainty and subjective judgment inherent in financial decisions. By representing financial variables as fuzzy numbers, the model better simulates the way humans assess information and make decisions under uncertainty. The incorporation of fuzzy discount factors marks a significant shift from deterministic to a more realistic representation of financial markets, suitable for practical application. This methodology offers a nuanced investment strategy that balances theoretical rigour with real-world applicability, appealing to a broad spectrum of investors. The aim of the following paper is to introduce an alternative to price modelling with the use of fuzzy return rates, which results in some errors in the mathematical model. The solution has the form of introducing fuzzy discount factors (FDFs) that retain the advantages of the fuzzy approach (e.g., encompassing subjectivity and imprecision) while preserving the shape of the fuzzy number modelling a price.</p>
	]]></content:encoded>

	<dc:title>Application of Fuzzy Discount Factors in Behavioural Decision-Making for Financial Market Modelling</dc:title>
			<dc:creator>Joanna Siwek</dc:creator>
			<dc:creator>Patryk Żywica</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13010005</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-01-26</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-01-26</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>5</prism:startingPage>
		<prism:doi>10.3390/econometrics13010005</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/1/5</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/1/4">

	<title>Econometrics, Vol. 13, Pages 4: An Economic Theory with a Formal-Econometric Test of Its Empirical Relevance</title>
	<link>https://www.mdpi.com/2225-1146/13/1/4</link>
	<description>The paper contains five parts&amp;amp;mdash;a theory about entrepreneurial choice under uncertainty, a formal econometric structure for a test, the test, an appraisal of the test, and a description of the data generating process. Here, an entrepreneur is an individual who manages a firm that produces one commodity with labor, an intermediate good, and capital. He pays dividends to shareholders, invests in bonds and capital, and has an n-period planning horizon. Conditioned on the values of current-period prices, the entrepreneur aims to maximize the expected value of a utility function that varies with the dividends he pays each period and with his firm&amp;amp;rsquo;s balance sheet variables at the end of the planning horizon. The test comprises a family of trials of theorems that I derive from the axioms of the theory part of the formal econometric structure. In the test, the theorems are appraised for their empirical relevance in an empirical context, where each one of a random sample of four hundred entrepreneurs has chosen the first-period part of his optimal n-period expenditure plan. My formal econometric arguments demonstrate that the theorems pass all the trials. At the end, I show that my formal econometric results imply that the theory is empirically relevant.</description>
	<pubDate>2025-01-16</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 4: An Economic Theory with a Formal-Econometric Test of Its Empirical Relevance</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/1/4">doi: 10.3390/econometrics13010004</a></p>
	<p>Authors:
		Bernt Petter Stigum
		</p>
	<p>The paper contains five parts&amp;amp;mdash;a theory about entrepreneurial choice under uncertainty, a formal econometric structure for a test, the test, an appraisal of the test, and a description of the data generating process. Here, an entrepreneur is an individual who manages a firm that produces one commodity with labor, an intermediate good, and capital. He pays dividends to shareholders, invests in bonds and capital, and has an n-period planning horizon. Conditioned on the values of current-period prices, the entrepreneur aims to maximize the expected value of a utility function that varies with the dividends he pays each period and with his firm&amp;amp;rsquo;s balance sheet variables at the end of the planning horizon. The test comprises a family of trials of theorems that I derive from the axioms of the theory part of the formal econometric structure. In the test, the theorems are appraised for their empirical relevance in an empirical context, where each one of a random sample of four hundred entrepreneurs has chosen the first-period part of his optimal n-period expenditure plan. My formal econometric arguments demonstrate that the theorems pass all the trials. At the end, I show that my formal econometric results imply that the theory is empirically relevant.</p>
	]]></content:encoded>

	<dc:title>An Economic Theory with a Formal-Econometric Test of Its Empirical Relevance</dc:title>
			<dc:creator>Bernt Petter Stigum</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13010004</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-01-16</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-01-16</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>4</prism:startingPage>
		<prism:doi>10.3390/econometrics13010004</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/1/4</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/1/3">

	<title>Econometrics, Vol. 13, Pages 3: Optimal Time Series Forecasting Through the GARMA Model</title>
	<link>https://www.mdpi.com/2225-1146/13/1/3</link>
	<description>This paper examines the use of machine learning methods in modeling and forecasting time series with long memory through GARMA. By employing rigorous model selection criteria through simulation study, we find that the hybrid GARMA-LSTM model outperforms traditional approaches in forecasting long-memory time series. This characteristic is confirmed using popular datasets such as sunspot data and Australian beer production data. This approach provides a robust framework for accurate and reliable forecasting in long-memory time series. Additionally, we compare the GARMA-LSTM model with other implemented models, such as GARMA, TBATS, ARIMA, and ANN, highlighting its ability to address both long-memory and non-linear dynamics. Finally, we discuss the representativeness of the datasets selected and the adaptability of the proposed hybrid model to various time series scenarios.</description>
	<pubDate>2025-01-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 3: Optimal Time Series Forecasting Through the GARMA Model</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/1/3">doi: 10.3390/econometrics13010003</a></p>
	<p>Authors:
		Adel Hassan A. Gadhi
		Shelton Peiris
		David E. Allen
		Richard Hunt
		</p>
	<p>This paper examines the use of machine learning methods in modeling and forecasting time series with long memory through GARMA. By employing rigorous model selection criteria through simulation study, we find that the hybrid GARMA-LSTM model outperforms traditional approaches in forecasting long-memory time series. This characteristic is confirmed using popular datasets such as sunspot data and Australian beer production data. This approach provides a robust framework for accurate and reliable forecasting in long-memory time series. Additionally, we compare the GARMA-LSTM model with other implemented models, such as GARMA, TBATS, ARIMA, and ANN, highlighting its ability to address both long-memory and non-linear dynamics. Finally, we discuss the representativeness of the datasets selected and the adaptability of the proposed hybrid model to various time series scenarios.</p>
	]]></content:encoded>

	<dc:title>Optimal Time Series Forecasting Through the GARMA Model</dc:title>
			<dc:creator>Adel Hassan A. Gadhi</dc:creator>
			<dc:creator>Shelton Peiris</dc:creator>
			<dc:creator>David E. Allen</dc:creator>
			<dc:creator>Richard Hunt</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13010003</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-01-08</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-01-08</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>3</prism:startingPage>
		<prism:doi>10.3390/econometrics13010003</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/1/3</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/1/2">

	<title>Econometrics, Vol. 13, Pages 2: Forecasting Half-Hourly Electricity Prices Using a Mixed-Frequency Structural VAR Framework</title>
	<link>https://www.mdpi.com/2225-1146/13/1/2</link>
	<description>Electricity price forecasting has been a topic of significant interest since the deregulation of electricity markets worldwide. The New Zealand electricity market is run primarily on renewable fuels, and so weather metrics have a significant impact on electricity price and volatility. In this paper, we employ a mixed-frequency vector autoregression (MF-VAR) framework where we propose a VAR specification to the reverse unrestricted mixed-data sampling (RU-MIDAS) model, called RU-MIDAS-VAR, to provide point forecasts of half-hourly electricity prices using several weather variables and electricity demand. A key focus of this study is the use of variational Bayes as an estimation technique and its comparison with other well-known Bayesian estimation methods. We separate forecasts for peak and off-peak periods in a day since we are primarily concerned with forecasts for peak periods. Our forecasts, which include peak and off-peak data, show that weather variables and demand as regressors can replicate some key characteristics of electricity prices. We also find the MF-VAR and RU-MIDAS-VAR models achieve similar forecast results. Using the LASSO, adaptive LASSO, and random subspace regression as dimension-reduction and variable selection methods helps to improve forecasts where random subspace methods perform well for large parameter sets while the LASSO significantly improves our forecasting results in all scenarios.</description>
	<pubDate>2025-01-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 2: Forecasting Half-Hourly Electricity Prices Using a Mixed-Frequency Structural VAR Framework</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/1/2">doi: 10.3390/econometrics13010002</a></p>
	<p>Authors:
		Gaurav Kapoor
		Nuttanan Wichitaksorn
		Mengheng Li
		Wenjun Zhang
		</p>
	<p>Electricity price forecasting has been a topic of significant interest since the deregulation of electricity markets worldwide. The New Zealand electricity market is run primarily on renewable fuels, and so weather metrics have a significant impact on electricity price and volatility. In this paper, we employ a mixed-frequency vector autoregression (MF-VAR) framework where we propose a VAR specification to the reverse unrestricted mixed-data sampling (RU-MIDAS) model, called RU-MIDAS-VAR, to provide point forecasts of half-hourly electricity prices using several weather variables and electricity demand. A key focus of this study is the use of variational Bayes as an estimation technique and its comparison with other well-known Bayesian estimation methods. We separate forecasts for peak and off-peak periods in a day since we are primarily concerned with forecasts for peak periods. Our forecasts, which include peak and off-peak data, show that weather variables and demand as regressors can replicate some key characteristics of electricity prices. We also find the MF-VAR and RU-MIDAS-VAR models achieve similar forecast results. Using the LASSO, adaptive LASSO, and random subspace regression as dimension-reduction and variable selection methods helps to improve forecasts where random subspace methods perform well for large parameter sets while the LASSO significantly improves our forecasting results in all scenarios.</p>
	]]></content:encoded>

	<dc:title>Forecasting Half-Hourly Electricity Prices Using a Mixed-Frequency Structural VAR Framework</dc:title>
			<dc:creator>Gaurav Kapoor</dc:creator>
			<dc:creator>Nuttanan Wichitaksorn</dc:creator>
			<dc:creator>Mengheng Li</dc:creator>
			<dc:creator>Wenjun Zhang</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13010002</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-01-08</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-01-08</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>2</prism:startingPage>
		<prism:doi>10.3390/econometrics13010002</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/1/2</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/13/1/1">

	<title>Econometrics, Vol. 13, Pages 1: Relationship Between Coefficients in Parametric Survival Models for Exponentially Distributed Survival Time&amp;mdash;Registered Unemployment in Poland</title>
	<link>https://www.mdpi.com/2225-1146/13/1/1</link>
	<description>Survival analysis is a popular research tool in medicine and demography. It has been used for many years to study the duration of socio-economic phenomena. The aim of this article is to evaluate the relationship between the coefficients of the proportional hazards model (PH) and the accelerated failure time model (AFT), assuming an exponential distribution of survival time. The coefficients of the PH and AFT exponential models have the same magnitude but have opposite signs. It follows that there is a symmetric relation between the coefficients. In the case of exponential PH and AFT models, there is a relation of equality between the parameters describing the quality and fit of the model, as well as between the standard errors of the parameters of both models. In this case also, we can talk about a symmetric relation. The exponential PH model is valid if the exponential AFT model is valid. The study showed that the intensity of starting work was higher in the case of men, people with work experience, people with higher education and young people. The job search time was longer for women, people with no work experience, and people aged 60+, but shorter for people with higher education.</description>
	<pubDate>2025-01-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 13, Pages 1: Relationship Between Coefficients in Parametric Survival Models for Exponentially Distributed Survival Time&amp;mdash;Registered Unemployment in Poland</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/13/1/1">doi: 10.3390/econometrics13010001</a></p>
	<p>Authors:
		Beata Bieszk-Stolorz
		</p>
	<p>Survival analysis is a popular research tool in medicine and demography. It has been used for many years to study the duration of socio-economic phenomena. The aim of this article is to evaluate the relationship between the coefficients of the proportional hazards model (PH) and the accelerated failure time model (AFT), assuming an exponential distribution of survival time. The coefficients of the PH and AFT exponential models have the same magnitude but have opposite signs. It follows that there is a symmetric relation between the coefficients. In the case of exponential PH and AFT models, there is a relation of equality between the parameters describing the quality and fit of the model, as well as between the standard errors of the parameters of both models. In this case also, we can talk about a symmetric relation. The exponential PH model is valid if the exponential AFT model is valid. The study showed that the intensity of starting work was higher in the case of men, people with work experience, people with higher education and young people. The job search time was longer for women, people with no work experience, and people aged 60+, but shorter for people with higher education.</p>
	]]></content:encoded>

	<dc:title>Relationship Between Coefficients in Parametric Survival Models for Exponentially Distributed Survival Time&amp;amp;mdash;Registered Unemployment in Poland</dc:title>
			<dc:creator>Beata Bieszk-Stolorz</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics13010001</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2025-01-02</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2025-01-02</prism:publicationDate>
	<prism:volume>13</prism:volume>
	<prism:number>1</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>1</prism:startingPage>
		<prism:doi>10.3390/econometrics13010001</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/13/1/1</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/4/39">

	<title>Econometrics, Vol. 12, Pages 39: Dynamic Factor Models and Fractional Integration&amp;mdash;With an Application to US Real Economic Activity</title>
	<link>https://www.mdpi.com/2225-1146/12/4/39</link>
	<description>This paper makes a twofold contribution. First, it develops the dynamic factor model of by allowing for fractional integration instead of imposing the classical dichotomy between I(0) stationary and I(1) non-stationary series. This more general setup provides valuable information on the degree of persistence and mean-reverting properties of the series. Second, the proposed framework is used to analyse five annual US Real Economic Activity series (Employees, Energy, Industrial Production, Manufacturing, Personal Income) over the period from 1967 to 2019 in order to shed light on their degree of persistence and cyclical behaviour. The results indicate that economic activity in the US is highly persistent and is also characterised by cycles with a periodicity of 6 years and 8 months.</description>
	<pubDate>2024-12-19</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 39: Dynamic Factor Models and Fractional Integration&amp;mdash;With an Application to US Real Economic Activity</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/4/39">doi: 10.3390/econometrics12040039</a></p>
	<p>Authors:
		Guglielmo Maria Caporale
		Luis Alberiko Gil-Alana
		Pedro Jose Piqueras Martinez
		</p>
	<p>This paper makes a twofold contribution. First, it develops the dynamic factor model of by allowing for fractional integration instead of imposing the classical dichotomy between I(0) stationary and I(1) non-stationary series. This more general setup provides valuable information on the degree of persistence and mean-reverting properties of the series. Second, the proposed framework is used to analyse five annual US Real Economic Activity series (Employees, Energy, Industrial Production, Manufacturing, Personal Income) over the period from 1967 to 2019 in order to shed light on their degree of persistence and cyclical behaviour. The results indicate that economic activity in the US is highly persistent and is also characterised by cycles with a periodicity of 6 years and 8 months.</p>
	]]></content:encoded>

	<dc:title>Dynamic Factor Models and Fractional Integration&amp;amp;mdash;With an Application to US Real Economic Activity</dc:title>
			<dc:creator>Guglielmo Maria Caporale</dc:creator>
			<dc:creator>Luis Alberiko Gil-Alana</dc:creator>
			<dc:creator>Pedro Jose Piqueras Martinez</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12040039</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-12-19</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-12-19</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>39</prism:startingPage>
		<prism:doi>10.3390/econometrics12040039</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/4/39</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/4/38">

	<title>Econometrics, Vol. 12, Pages 38: Financial Uncertainty and Gold Market Volatility: Evidence from a Generalized Autoregressive Conditional Heteroskedasticity Variant of the Mixed-Data Sampling (GARCH-MIDAS) Approach with Variable Selection</title>
	<link>https://www.mdpi.com/2225-1146/12/4/38</link>
	<description>We analyze the predictive effect of monthly global, regional, and country-level financial uncertainties on daily gold market volatility using univariate and multivariate GARCH-MIDAS models, with the latter characterized by variable selection. Based on data over the period of July 1992 to May 2020, we highlight the role of the global financial uncertainty factor in accurately forecasting gold price volatility relative to the benchmark GARCH-MIDAS-realized volatility model, with a dominant role of European financial uncertainties, and 36 out of the 42 regional financial market uncertainties. The forecasting performance of the global financial uncertainty factor is as good as an index of global economic conditions, with results based on a combination of these two models depicting evidence of complementary information. Moreover, the GARCH-MIDAS model with global financial uncertainty cannot be outperformed by the multivariate version of the GARCH-MIDAS framework, estimated using the adaptive LASSO, involving the top five developed and developing countries each, chosen based on their ability to explain the movements of overall global financial uncertainty. Our results imply that as financial uncertainties can improve the accuracy of the forecasts of gold returns volatility, it would help investors to design optimal portfolios to counteract financial risks. Also, as gold returns volatility reflects financial uncertainty, accurate forecasts of it would provide information about the future path of economic activity, and assist policy authorities in preventing possible economic slowdowns.</description>
	<pubDate>2024-12-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 38: Financial Uncertainty and Gold Market Volatility: Evidence from a Generalized Autoregressive Conditional Heteroskedasticity Variant of the Mixed-Data Sampling (GARCH-MIDAS) Approach with Variable Selection</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/4/38">doi: 10.3390/econometrics12040038</a></p>
	<p>Authors:
		O-Chia Chuang
		Rangan Gupta
		Christian Pierdzioch
		Buliao Shu
		</p>
	<p>We analyze the predictive effect of monthly global, regional, and country-level financial uncertainties on daily gold market volatility using univariate and multivariate GARCH-MIDAS models, with the latter characterized by variable selection. Based on data over the period of July 1992 to May 2020, we highlight the role of the global financial uncertainty factor in accurately forecasting gold price volatility relative to the benchmark GARCH-MIDAS-realized volatility model, with a dominant role of European financial uncertainties, and 36 out of the 42 regional financial market uncertainties. The forecasting performance of the global financial uncertainty factor is as good as an index of global economic conditions, with results based on a combination of these two models depicting evidence of complementary information. Moreover, the GARCH-MIDAS model with global financial uncertainty cannot be outperformed by the multivariate version of the GARCH-MIDAS framework, estimated using the adaptive LASSO, involving the top five developed and developing countries each, chosen based on their ability to explain the movements of overall global financial uncertainty. Our results imply that as financial uncertainties can improve the accuracy of the forecasts of gold returns volatility, it would help investors to design optimal portfolios to counteract financial risks. Also, as gold returns volatility reflects financial uncertainty, accurate forecasts of it would provide information about the future path of economic activity, and assist policy authorities in preventing possible economic slowdowns.</p>
	]]></content:encoded>

	<dc:title>Financial Uncertainty and Gold Market Volatility: Evidence from a Generalized Autoregressive Conditional Heteroskedasticity Variant of the Mixed-Data Sampling (GARCH-MIDAS) Approach with Variable Selection</dc:title>
			<dc:creator>O-Chia Chuang</dc:creator>
			<dc:creator>Rangan Gupta</dc:creator>
			<dc:creator>Christian Pierdzioch</dc:creator>
			<dc:creator>Buliao Shu</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12040038</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-12-12</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-12-12</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>38</prism:startingPage>
		<prism:doi>10.3390/econometrics12040038</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/4/38</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/4/37">

	<title>Econometrics, Vol. 12, Pages 37: How Financial Stress Can Impact Fiscal and Monetary Policies: Threshold VAR Analysis for Brazilian Economy</title>
	<link>https://www.mdpi.com/2225-1146/12/4/37</link>
	<description>This study examines economic policy responses in Brazil during periods of financial stress, with a particular emphasis on the dynamics of both the impulse and rule components of fiscal policy. We offer novel empirical evidence on policy responses under both low and high stress conditions, utilizing monthly data that span the past two decades. To this end, we construct a Financial Stress Index (FSI) and integrate it into a threshold-VAR framework. Additionally, we employ five distinct methodologies to decompose fiscal policy into its impulse and rule components. Our analysis yields two main findings. First, fiscal policy exhibits procyclical behavior in its impulse component and countercyclical behavior in its rule component across both regimes. Second, while monetary policy is countercyclical during high stress conditions, its impact remains largely statistically non-significant. These results suggest that policymakers should exercise caution when timing the implementation of expansionary fiscal policies, carefully considering the phase of the business cycle. Moreover, our findings carry significant implications for the ongoing discourse on fiscal stimulus and debt stabilization strategies, particularly in the context of financial stress.</description>
	<pubDate>2024-12-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 37: How Financial Stress Can Impact Fiscal and Monetary Policies: Threshold VAR Analysis for Brazilian Economy</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/4/37">doi: 10.3390/econometrics12040037</a></p>
	<p>Authors:
		Roberta Moreira Wichmann
		Werley Cordeiro
		João F. Caldeira
		</p>
	<p>This study examines economic policy responses in Brazil during periods of financial stress, with a particular emphasis on the dynamics of both the impulse and rule components of fiscal policy. We offer novel empirical evidence on policy responses under both low and high stress conditions, utilizing monthly data that span the past two decades. To this end, we construct a Financial Stress Index (FSI) and integrate it into a threshold-VAR framework. Additionally, we employ five distinct methodologies to decompose fiscal policy into its impulse and rule components. Our analysis yields two main findings. First, fiscal policy exhibits procyclical behavior in its impulse component and countercyclical behavior in its rule component across both regimes. Second, while monetary policy is countercyclical during high stress conditions, its impact remains largely statistically non-significant. These results suggest that policymakers should exercise caution when timing the implementation of expansionary fiscal policies, carefully considering the phase of the business cycle. Moreover, our findings carry significant implications for the ongoing discourse on fiscal stimulus and debt stabilization strategies, particularly in the context of financial stress.</p>
	]]></content:encoded>

	<dc:title>How Financial Stress Can Impact Fiscal and Monetary Policies: Threshold VAR Analysis for Brazilian Economy</dc:title>
			<dc:creator>Roberta Moreira Wichmann</dc:creator>
			<dc:creator>Werley Cordeiro</dc:creator>
			<dc:creator>João F. Caldeira</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12040037</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-12-05</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-12-05</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>37</prism:startingPage>
		<prism:doi>10.3390/econometrics12040037</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/4/37</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/4/36">

	<title>Econometrics, Vol. 12, Pages 36: Instrument Selection in Panel Data Models with Endogeneity: A Bayesian Approach</title>
	<link>https://www.mdpi.com/2225-1146/12/4/36</link>
	<description>This paper proposes the use of Bayesian inference techniques to search for and obtain valid instruments in dynamic panel data models where endogenous variables may exist. The use of Principal Component Analysis (PCA) allows for obtaining a reduced number of instruments in comparison to the high number of instruments commonly used in the literature, and Monte Carlo Markov Chain (MCMC) methods enable efficient exploration of the instrument space, deriving accurate point estimates of the elements of interest. The proposed methodology is illustrated in a simulated case and in an empirical application, where the partial effect of a series of determinants on the attraction of international bank flows is quantified. The results highlight the importance of promoting and developing the private sector in these economies, as well as the importance of maintaining good levels of creditworthiness.</description>
	<pubDate>2024-12-02</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 36: Instrument Selection in Panel Data Models with Endogeneity: A Bayesian Approach</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/4/36">doi: 10.3390/econometrics12040036</a></p>
	<p>Authors:
		Álvaro Herce
		Manuel Salvador
		</p>
	<p>This paper proposes the use of Bayesian inference techniques to search for and obtain valid instruments in dynamic panel data models where endogenous variables may exist. The use of Principal Component Analysis (PCA) allows for obtaining a reduced number of instruments in comparison to the high number of instruments commonly used in the literature, and Monte Carlo Markov Chain (MCMC) methods enable efficient exploration of the instrument space, deriving accurate point estimates of the elements of interest. The proposed methodology is illustrated in a simulated case and in an empirical application, where the partial effect of a series of determinants on the attraction of international bank flows is quantified. The results highlight the importance of promoting and developing the private sector in these economies, as well as the importance of maintaining good levels of creditworthiness.</p>
	]]></content:encoded>

	<dc:title>Instrument Selection in Panel Data Models with Endogeneity: A Bayesian Approach</dc:title>
			<dc:creator>Álvaro Herce</dc:creator>
			<dc:creator>Manuel Salvador</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12040036</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-12-02</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-12-02</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>36</prism:startingPage>
		<prism:doi>10.3390/econometrics12040036</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/4/36</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/4/35">

	<title>Econometrics, Vol. 12, Pages 35: Bayesian Inference for Long Memory Stochastic Volatility Models</title>
	<link>https://www.mdpi.com/2225-1146/12/4/35</link>
	<description>We explore the application of integrated nested Laplace approximations for the Bayesian estimation of stochastic volatility models characterized by long memory. The logarithmic variance persistence in these models is represented by a Fractional Gaussian Noise process, which we approximate as a linear combination of independent first-order autoregressive processes, lending itself to a Gaussian Markov Random Field representation. Our results from Monte Carlo experiments indicate that this approach exhibits small sample properties akin to those of Markov Chain Monte Carlo estimators. Additionally, it offers the advantages of reduced computational complexity and the mitigation of posterior convergence issues. We employ this methodology to estimate volatility dependency patterns for both the SP&amp;amp;amp;500 index and major cryptocurrencies. We thoroughly assess the in-sample fit and extend our analysis to the construction of out-of-sample forecasts. Furthermore, we propose multi-factor extensions and apply this method to estimate volatility measurements from high-frequency data, underscoring its exceptional computational efficiency. Our simulation results demonstrate that the INLA methodology achieves comparable accuracy to traditional MCMC methods for estimating latent parameters and volatilities in LMSV models. The proposed model extensions show strong in-sample fit and out-of-sample forecast performance, highlighting the versatility of the INLA approach. This method is particularly advantageous in high-frequency contexts, where the computational demands of traditional posterior simulations are often prohibitive.</description>
	<pubDate>2024-11-27</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 35: Bayesian Inference for Long Memory Stochastic Volatility Models</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/4/35">doi: 10.3390/econometrics12040035</a></p>
	<p>Authors:
		Pedro Chaim
		Márcio Poletti Laurini
		</p>
	<p>We explore the application of integrated nested Laplace approximations for the Bayesian estimation of stochastic volatility models characterized by long memory. The logarithmic variance persistence in these models is represented by a Fractional Gaussian Noise process, which we approximate as a linear combination of independent first-order autoregressive processes, lending itself to a Gaussian Markov Random Field representation. Our results from Monte Carlo experiments indicate that this approach exhibits small sample properties akin to those of Markov Chain Monte Carlo estimators. Additionally, it offers the advantages of reduced computational complexity and the mitigation of posterior convergence issues. We employ this methodology to estimate volatility dependency patterns for both the SP&amp;amp;amp;500 index and major cryptocurrencies. We thoroughly assess the in-sample fit and extend our analysis to the construction of out-of-sample forecasts. Furthermore, we propose multi-factor extensions and apply this method to estimate volatility measurements from high-frequency data, underscoring its exceptional computational efficiency. Our simulation results demonstrate that the INLA methodology achieves comparable accuracy to traditional MCMC methods for estimating latent parameters and volatilities in LMSV models. The proposed model extensions show strong in-sample fit and out-of-sample forecast performance, highlighting the versatility of the INLA approach. This method is particularly advantageous in high-frequency contexts, where the computational demands of traditional posterior simulations are often prohibitive.</p>
	]]></content:encoded>

	<dc:title>Bayesian Inference for Long Memory Stochastic Volatility Models</dc:title>
			<dc:creator>Pedro Chaim</dc:creator>
			<dc:creator>Márcio Poletti Laurini</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12040035</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-11-27</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-11-27</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>35</prism:startingPage>
		<prism:doi>10.3390/econometrics12040035</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/4/35</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/4/34">

	<title>Econometrics, Vol. 12, Pages 34: Forecasting Wind&amp;ndash;Photovoltaic Energy Production and Income with Traditional and ML Techniques</title>
	<link>https://www.mdpi.com/2225-1146/12/4/34</link>
	<description>Hybrid production plants harness diverse climatic sources for electricity generation, playing a crucial role in the transition to renewable energies. This study aims to forecast the profitability of a combined wind&amp;amp;ndash;photovoltaic energy system. Here, we develop a model that integrates predicted spot prices and electricity output forecasts, incorporating relevant climatic variables to enhance accuracy. The jointly modeled climatic variables and the spot price constitute one of the innovative aspects of this work. Regarding practical application, we considered a hypothetical wind&amp;amp;ndash;photovoltaic plant located in Italy and used the relevant climate series to determine the quantity of energy produced. We forecast the quantity of energy as well as income through machine learning techniques and more traditional statistical and econometric models. We evaluate the results by splitting the dataset into estimation windows and test windows, and using a backtesting technique. In particular, we found evidence that ML regression techniques outperform results obtained with traditional econometric models. Regarding the models used to achieve this goal, the objective is not to propose original models but to verify the effectiveness of the most recent machine learning models for this important application, and to compare them with more classic linear regression techniques.</description>
	<pubDate>2024-11-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 34: Forecasting Wind&amp;ndash;Photovoltaic Energy Production and Income with Traditional and ML Techniques</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/4/34">doi: 10.3390/econometrics12040034</a></p>
	<p>Authors:
		Giovanni Masala
		Amelie Schischke
		</p>
	<p>Hybrid production plants harness diverse climatic sources for electricity generation, playing a crucial role in the transition to renewable energies. This study aims to forecast the profitability of a combined wind&amp;amp;ndash;photovoltaic energy system. Here, we develop a model that integrates predicted spot prices and electricity output forecasts, incorporating relevant climatic variables to enhance accuracy. The jointly modeled climatic variables and the spot price constitute one of the innovative aspects of this work. Regarding practical application, we considered a hypothetical wind&amp;amp;ndash;photovoltaic plant located in Italy and used the relevant climate series to determine the quantity of energy produced. We forecast the quantity of energy as well as income through machine learning techniques and more traditional statistical and econometric models. We evaluate the results by splitting the dataset into estimation windows and test windows, and using a backtesting technique. In particular, we found evidence that ML regression techniques outperform results obtained with traditional econometric models. Regarding the models used to achieve this goal, the objective is not to propose original models but to verify the effectiveness of the most recent machine learning models for this important application, and to compare them with more classic linear regression techniques.</p>
	]]></content:encoded>

	<dc:title>Forecasting Wind&amp;amp;ndash;Photovoltaic Energy Production and Income with Traditional and ML Techniques</dc:title>
			<dc:creator>Giovanni Masala</dc:creator>
			<dc:creator>Amelie Schischke</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12040034</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-11-12</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-11-12</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>34</prism:startingPage>
		<prism:doi>10.3390/econometrics12040034</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/4/34</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/4/33">

	<title>Econometrics, Vol. 12, Pages 33: Likert Scale Variables in Personal Finance Research: The Neutral Category Problem</title>
	<link>https://www.mdpi.com/2225-1146/12/4/33</link>
	<description>Personal finance research often utilizes Likert-type items and Likert scales as dependent variables, frequently employing standard probit and ordered probit models. If inappropriately modeled, the &amp;amp;ldquo;neutral&amp;amp;rdquo; category of discrete dependent variables can bias estimates of the remaining categories. Through the utilization of hierarchical models, this paper demonstrates a methodology that accounts for the econometric issues of the neutral category. We then analyze the technique through an empirical exercise relevant to personal finance research using data from the National Financial Capability Study. We demonstrate that ignoring the &amp;amp;ldquo;neutral&amp;amp;rdquo; category bias can lead to incorrect inferences, hindering the progression of personal finance research. Our findings underscore the importance of refining statistical modeling techniques when dealing with Likert-type data. By accounting for the neutral category, we can enhance the reliability of personal finance research outcomes, fostering improved decision-relevant insights.</description>
	<pubDate>2024-11-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 33: Likert Scale Variables in Personal Finance Research: The Neutral Category Problem</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/4/33">doi: 10.3390/econometrics12040033</a></p>
	<p>Authors:
		Blain Pearson
		Donald Lacombe
		Nasima Khatun
		</p>
	<p>Personal finance research often utilizes Likert-type items and Likert scales as dependent variables, frequently employing standard probit and ordered probit models. If inappropriately modeled, the &amp;amp;ldquo;neutral&amp;amp;rdquo; category of discrete dependent variables can bias estimates of the remaining categories. Through the utilization of hierarchical models, this paper demonstrates a methodology that accounts for the econometric issues of the neutral category. We then analyze the technique through an empirical exercise relevant to personal finance research using data from the National Financial Capability Study. We demonstrate that ignoring the &amp;amp;ldquo;neutral&amp;amp;rdquo; category bias can lead to incorrect inferences, hindering the progression of personal finance research. Our findings underscore the importance of refining statistical modeling techniques when dealing with Likert-type data. By accounting for the neutral category, we can enhance the reliability of personal finance research outcomes, fostering improved decision-relevant insights.</p>
	]]></content:encoded>

	<dc:title>Likert Scale Variables in Personal Finance Research: The Neutral Category Problem</dc:title>
			<dc:creator>Blain Pearson</dc:creator>
			<dc:creator>Donald Lacombe</dc:creator>
			<dc:creator>Nasima Khatun</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12040033</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-11-06</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-11-06</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>33</prism:startingPage>
		<prism:doi>10.3390/econometrics12040033</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/4/33</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/4/32">

	<title>Econometrics, Vol. 12, Pages 32: Enhancing Efficiency: Halton Draws in the Generalized True Random Effects Model</title>
	<link>https://www.mdpi.com/2225-1146/12/4/32</link>
	<description>This paper measures the impact of the number of Halton draws in excess of &amp;amp;lceil;n&amp;amp;rceil; on technical efficiency in the generalized true random effects (four-component) stochastic frontier model estimated by simulated maximum likelihood. A substantial set of Monte Carlo simulations demonstrates that increasing the number of Halton draws to &amp;amp;lceil;n3/4&amp;amp;rceil; (&amp;amp;lceil;n2/3&amp;amp;rceil;) decreases the mean squared error of the total technical efficiency estimates by 6.1 (4.9) percent. Furthermore, increasing the number of Halton draws either improves or has no detrimental impact on correlation, mean squared error, relative bias, and upward bias for persistent, transient, and total technical efficiency. An energy sector application is included, to demonstrate how these issues can arise in practice, and how increasing Halton draws can improve parameter and efficiency estimates in empirical work.</description>
	<pubDate>2024-11-06</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 32: Enhancing Efficiency: Halton Draws in the Generalized True Random Effects Model</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/4/32">doi: 10.3390/econometrics12040032</a></p>
	<p>Authors:
		David H. Bernstein
		</p>
	<p>This paper measures the impact of the number of Halton draws in excess of &amp;amp;lceil;n&amp;amp;rceil; on technical efficiency in the generalized true random effects (four-component) stochastic frontier model estimated by simulated maximum likelihood. A substantial set of Monte Carlo simulations demonstrates that increasing the number of Halton draws to &amp;amp;lceil;n3/4&amp;amp;rceil; (&amp;amp;lceil;n2/3&amp;amp;rceil;) decreases the mean squared error of the total technical efficiency estimates by 6.1 (4.9) percent. Furthermore, increasing the number of Halton draws either improves or has no detrimental impact on correlation, mean squared error, relative bias, and upward bias for persistent, transient, and total technical efficiency. An energy sector application is included, to demonstrate how these issues can arise in practice, and how increasing Halton draws can improve parameter and efficiency estimates in empirical work.</p>
	]]></content:encoded>

	<dc:title>Enhancing Efficiency: Halton Draws in the Generalized True Random Effects Model</dc:title>
			<dc:creator>David H. Bernstein</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12040032</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-11-06</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-11-06</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>32</prism:startingPage>
		<prism:doi>10.3390/econometrics12040032</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/4/32</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/4/31">

	<title>Econometrics, Vol. 12, Pages 31: Exploring the Role of Global Value Chain Position in Economic Models for Bankruptcy Forecasting</title>
	<link>https://www.mdpi.com/2225-1146/12/4/31</link>
	<description>This study addresses a significant gap in the literature by comparing the effectiveness of traditional statistical methods with artificial intelligence (AI) techniques in predicting bankruptcy among small and medium-sized enterprises (SMEs). Traditional bankruptcy prediction models often fail to account for the unique characteristics of SMEs, such as their vulnerability due to lean structures and reliance on short-term credit. This research utilizes a comprehensive database of 7104 Belgian SMEs to evaluate these models. Belgium was selected due to its unique regulatory and economic environment, which presents specific challenges and opportunities for bankruptcy prediction in SMEs. Our findings reveal that AI techniques significantly outperform traditional statistical methods in predicting bankruptcy, demonstrating superior predictive accuracy. Furthermore, our analysis highlights that a firm&amp;amp;rsquo;s position within the Global Value Chain (GVC) impacts prediction accuracy. Specifically, firms operating upstream in the production process show lower prediction performance, suggesting that bankruptcy risk may propagate upward along the value chain. This effect was measured by analyzing the firm&amp;amp;rsquo;s GVC position as a variable in the prediction models, with upstream firms exhibiting greater vulnerability to the financial distress of downstream partners. These insights are valuable for practitioners, emphasizing the need to consider specific performance factors based on the firm&amp;amp;rsquo;s position within the GVC when assessing bankruptcy risk. By integrating both AI techniques and GVC positioning into bankruptcy prediction models, this study provides a more nuanced understanding of bankruptcy risks for SMEs and offers practical guidance for managing and mitigating these risks.</description>
	<pubDate>2024-11-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 31: Exploring the Role of Global Value Chain Position in Economic Models for Bankruptcy Forecasting</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/4/31">doi: 10.3390/econometrics12040031</a></p>
	<p>Authors:
		Mélanie Croquet
		Loredana Cultrera
		Dimitri Laroutis
		Laetitia Pozniak
		Guillaume Vermeylen
		</p>
	<p>This study addresses a significant gap in the literature by comparing the effectiveness of traditional statistical methods with artificial intelligence (AI) techniques in predicting bankruptcy among small and medium-sized enterprises (SMEs). Traditional bankruptcy prediction models often fail to account for the unique characteristics of SMEs, such as their vulnerability due to lean structures and reliance on short-term credit. This research utilizes a comprehensive database of 7104 Belgian SMEs to evaluate these models. Belgium was selected due to its unique regulatory and economic environment, which presents specific challenges and opportunities for bankruptcy prediction in SMEs. Our findings reveal that AI techniques significantly outperform traditional statistical methods in predicting bankruptcy, demonstrating superior predictive accuracy. Furthermore, our analysis highlights that a firm&amp;amp;rsquo;s position within the Global Value Chain (GVC) impacts prediction accuracy. Specifically, firms operating upstream in the production process show lower prediction performance, suggesting that bankruptcy risk may propagate upward along the value chain. This effect was measured by analyzing the firm&amp;amp;rsquo;s GVC position as a variable in the prediction models, with upstream firms exhibiting greater vulnerability to the financial distress of downstream partners. These insights are valuable for practitioners, emphasizing the need to consider specific performance factors based on the firm&amp;amp;rsquo;s position within the GVC when assessing bankruptcy risk. By integrating both AI techniques and GVC positioning into bankruptcy prediction models, this study provides a more nuanced understanding of bankruptcy risks for SMEs and offers practical guidance for managing and mitigating these risks.</p>
	]]></content:encoded>

	<dc:title>Exploring the Role of Global Value Chain Position in Economic Models for Bankruptcy Forecasting</dc:title>
			<dc:creator>Mélanie Croquet</dc:creator>
			<dc:creator>Loredana Cultrera</dc:creator>
			<dc:creator>Dimitri Laroutis</dc:creator>
			<dc:creator>Laetitia Pozniak</dc:creator>
			<dc:creator>Guillaume Vermeylen</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12040031</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-11-05</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-11-05</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>31</prism:startingPage>
		<prism:doi>10.3390/econometrics12040031</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/4/31</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/4/30">

	<title>Econometrics, Vol. 12, Pages 30: Impact of Areal Factors on Students&amp;rsquo; Travel Mode Choices: A Bayesian Spatial Analysis</title>
	<link>https://www.mdpi.com/2225-1146/12/4/30</link>
	<description>A preliminary analysis of the 2018/2019 Austin Travel Survey indicated that most off-campus students in Travis County, TX, tend to use cars rather than more sustainable transportation modes, significantly contributing to traffic congestion and environmental impact. This study aims to analyze the impacts of areal factors, including environmental and transportation factors, on students&amp;amp;rsquo; choices of travel mode in order to promote more sustainable transport behaviors. Additionally, we investigate the presence of spatial correlation and unobserved heterogeneity in travel data and their effects on students&amp;amp;rsquo; travel mode choices. We have proposed two Bayesian models&amp;amp;mdash;a basic model and a spatial model&amp;amp;mdash;with structured and unstructured random-effect terms to perform the analysis. The results indicate that the inclusion of spatial random effects considerably improves model performance, suggesting that students&amp;amp;rsquo; choices of mode are likely influenced by areal factors often &amp;amp;lsquo;unobserved&amp;amp;rsquo; in many individual travel mode choice surveys. Furthermore, we found that the average slope, sidewalk density, and bus-stop density significantly affect students&amp;amp;rsquo; travel mode choices. These findings provide insights into promoting sustainable transport systems by addressing environmental and infrastructural factors in an effort to reduce car dependency among students, thereby supporting sustainable urban development.</description>
	<pubDate>2024-10-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 30: Impact of Areal Factors on Students&amp;rsquo; Travel Mode Choices: A Bayesian Spatial Analysis</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/4/30">doi: 10.3390/econometrics12040030</a></p>
	<p>Authors:
		Amin Azimian
		Alireza Azimian
		</p>
	<p>A preliminary analysis of the 2018/2019 Austin Travel Survey indicated that most off-campus students in Travis County, TX, tend to use cars rather than more sustainable transportation modes, significantly contributing to traffic congestion and environmental impact. This study aims to analyze the impacts of areal factors, including environmental and transportation factors, on students&amp;amp;rsquo; choices of travel mode in order to promote more sustainable transport behaviors. Additionally, we investigate the presence of spatial correlation and unobserved heterogeneity in travel data and their effects on students&amp;amp;rsquo; travel mode choices. We have proposed two Bayesian models&amp;amp;mdash;a basic model and a spatial model&amp;amp;mdash;with structured and unstructured random-effect terms to perform the analysis. The results indicate that the inclusion of spatial random effects considerably improves model performance, suggesting that students&amp;amp;rsquo; choices of mode are likely influenced by areal factors often &amp;amp;lsquo;unobserved&amp;amp;rsquo; in many individual travel mode choice surveys. Furthermore, we found that the average slope, sidewalk density, and bus-stop density significantly affect students&amp;amp;rsquo; travel mode choices. These findings provide insights into promoting sustainable transport systems by addressing environmental and infrastructural factors in an effort to reduce car dependency among students, thereby supporting sustainable urban development.</p>
	]]></content:encoded>

	<dc:title>Impact of Areal Factors on Students&amp;amp;rsquo; Travel Mode Choices: A Bayesian Spatial Analysis</dc:title>
			<dc:creator>Amin Azimian</dc:creator>
			<dc:creator>Alireza Azimian</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12040030</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-10-26</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-10-26</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>30</prism:startingPage>
		<prism:doi>10.3390/econometrics12040030</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/4/30</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/4/29">

	<title>Econometrics, Vol. 12, Pages 29: Econometric Analysis of the Sustainability and Development of an Alternative Strategy to Gross Value Added in Kazakhstan&amp;rsquo;s Agricultural Sector</title>
	<link>https://www.mdpi.com/2225-1146/12/4/29</link>
	<description>Based on the systematization of relevant problems in the agricultural sector of Kazakhstan and other countries, the purpose of the research is to aid in the development and implementation of a methodology for the econometric analysis of sustainability, the classification of economic growth, and an alternative strategy for gross value added depending on time phases with time lags of 0, 1, and 2 years, and on the gross fixed capital formation in the agricultural sector of Kazakhstan. The research has used a variety of quantitative techniques, including the logistic growth difference equation, applied statistics, econometric models, operations research, nonlinear mathematical programming models, economic modeling simulations, and sustainability analysis. In the work on three criteria: equilibrium, balanced and optimal growth, we have defined the main trends of growth of Gross added value of agriculture, hunting and forestry. The first, depending on the time phases, the second, depending on the Gross fixed capital formation transactions for equilibrium growth, for the growth of an alternative strategy, for the endogenous growth rate and the growth of exogenous flows. And we also received a classification of the trend of Productive, Moderate and Critical growth for the agricultural industry depending on the correlated linkaged industry of the national economy of Kazakhstan. The results of this work can be used in data analytics and artificial intelligence, digital transformation and technology in agriculture, as well as in the areas of sustainability and environmental impact.</description>
	<pubDate>2024-10-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 29: Econometric Analysis of the Sustainability and Development of an Alternative Strategy to Gross Value Added in Kazakhstan&amp;rsquo;s Agricultural Sector</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/4/29">doi: 10.3390/econometrics12040029</a></p>
	<p>Authors:
		Azat Tleubayev
		Seyit Kerimkhulle
		Manatzhan Tleuzhanova
		Aigul Uchkampirova
		Zhanat Bulakbay
		Raikhan Mugauina
		Zhumagul Tazhibayeva
		Alibek Adalbek
		Yerassyl Iskakov
		Daniyar Toleubay
		</p>
	<p>Based on the systematization of relevant problems in the agricultural sector of Kazakhstan and other countries, the purpose of the research is to aid in the development and implementation of a methodology for the econometric analysis of sustainability, the classification of economic growth, and an alternative strategy for gross value added depending on time phases with time lags of 0, 1, and 2 years, and on the gross fixed capital formation in the agricultural sector of Kazakhstan. The research has used a variety of quantitative techniques, including the logistic growth difference equation, applied statistics, econometric models, operations research, nonlinear mathematical programming models, economic modeling simulations, and sustainability analysis. In the work on three criteria: equilibrium, balanced and optimal growth, we have defined the main trends of growth of Gross added value of agriculture, hunting and forestry. The first, depending on the time phases, the second, depending on the Gross fixed capital formation transactions for equilibrium growth, for the growth of an alternative strategy, for the endogenous growth rate and the growth of exogenous flows. And we also received a classification of the trend of Productive, Moderate and Critical growth for the agricultural industry depending on the correlated linkaged industry of the national economy of Kazakhstan. The results of this work can be used in data analytics and artificial intelligence, digital transformation and technology in agriculture, as well as in the areas of sustainability and environmental impact.</p>
	]]></content:encoded>

	<dc:title>Econometric Analysis of the Sustainability and Development of an Alternative Strategy to Gross Value Added in Kazakhstan&amp;amp;rsquo;s Agricultural Sector</dc:title>
			<dc:creator>Azat Tleubayev</dc:creator>
			<dc:creator>Seyit Kerimkhulle</dc:creator>
			<dc:creator>Manatzhan Tleuzhanova</dc:creator>
			<dc:creator>Aigul Uchkampirova</dc:creator>
			<dc:creator>Zhanat Bulakbay</dc:creator>
			<dc:creator>Raikhan Mugauina</dc:creator>
			<dc:creator>Zhumagul Tazhibayeva</dc:creator>
			<dc:creator>Alibek Adalbek</dc:creator>
			<dc:creator>Yerassyl Iskakov</dc:creator>
			<dc:creator>Daniyar Toleubay</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12040029</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-10-17</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-10-17</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>29</prism:startingPage>
		<prism:doi>10.3390/econometrics12040029</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/4/29</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/4/28">

	<title>Econometrics, Vol. 12, Pages 28: Long-Term Care in Germany in the Context of the Demographic Transition&amp;mdash;An Outlook for the Expenses of Long-Term Care Insurance through 2050</title>
	<link>https://www.mdpi.com/2225-1146/12/4/28</link>
	<description>Demographic aging results in a growing number of older people in need of care in many regions all over the world. Germany has witnessed steady population aging for decades, prompting policymakers and other stakeholders to discuss how to fulfill the rapidly growing demand for care workers and finance the rising costs of long-term care. Informed decisions on this matter to ensure the sustainability of the statutory long-term care insurance system require reliable knowledge of the associated future costs. These need to be simulated based on well-designed forecast models that holistically include the complexity of the forecast problem, namely the demographic transition, epidemiological trends, concrete demand for and supply of specific care services, and the respective costs. Care risks heavily depend on demographics, both in absolute terms and according to severity. The number of persons in need of care, disaggregated by severity of disability, in turn, is the main driver of the remuneration that is paid by long-term care insurance. Therefore, detailed forecasts of the population and care rates are important ingredients for forecasts of long-term care insurance expenditures. We present a novel approach based on a stochastic demographic cohort-component approach that includes trends in age- and sex-specific care rates and the demand for specific care services, given changing preferences over the life course. The model is executed for Germany until the year 2050 as a case study.</description>
	<pubDate>2024-10-09</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 28: Long-Term Care in Germany in the Context of the Demographic Transition&amp;mdash;An Outlook for the Expenses of Long-Term Care Insurance through 2050</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/4/28">doi: 10.3390/econometrics12040028</a></p>
	<p>Authors:
		Patrizio Vanella
		Christina Benita Wilke
		Moritz Heß
		</p>
	<p>Demographic aging results in a growing number of older people in need of care in many regions all over the world. Germany has witnessed steady population aging for decades, prompting policymakers and other stakeholders to discuss how to fulfill the rapidly growing demand for care workers and finance the rising costs of long-term care. Informed decisions on this matter to ensure the sustainability of the statutory long-term care insurance system require reliable knowledge of the associated future costs. These need to be simulated based on well-designed forecast models that holistically include the complexity of the forecast problem, namely the demographic transition, epidemiological trends, concrete demand for and supply of specific care services, and the respective costs. Care risks heavily depend on demographics, both in absolute terms and according to severity. The number of persons in need of care, disaggregated by severity of disability, in turn, is the main driver of the remuneration that is paid by long-term care insurance. Therefore, detailed forecasts of the population and care rates are important ingredients for forecasts of long-term care insurance expenditures. We present a novel approach based on a stochastic demographic cohort-component approach that includes trends in age- and sex-specific care rates and the demand for specific care services, given changing preferences over the life course. The model is executed for Germany until the year 2050 as a case study.</p>
	]]></content:encoded>

	<dc:title>Long-Term Care in Germany in the Context of the Demographic Transition&amp;amp;mdash;An Outlook for the Expenses of Long-Term Care Insurance through 2050</dc:title>
			<dc:creator>Patrizio Vanella</dc:creator>
			<dc:creator>Christina Benita Wilke</dc:creator>
			<dc:creator>Moritz Heß</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12040028</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-10-09</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-10-09</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>28</prism:startingPage>
		<prism:doi>10.3390/econometrics12040028</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/4/28</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/4/27">

	<title>Econometrics, Vol. 12, Pages 27: Estimating the Effects of Credit Constraints on Productivity of Peruvian Agriculture</title>
	<link>https://www.mdpi.com/2225-1146/12/4/27</link>
	<description>This paper proposes an estimator for the endogenous switching regression models with fixed effects. The decision to switch from one regime to the other may depend on unobserved factors, which would cause the state, such as being credit constrained, to be endogenous. Our estimator allows for this endogenous selection and for conditional heteroscedasticity in the outcome equation. Applying our estimator to a dataset on the productivity in agriculture substantially changes the conclusions compared to earlier analysis of the same dataset. Intuitively, the reason that our estimate of the impact of switching between states is smaller than previously estimated is that we captured the selection issue: switching between being credit constrained and credit unconstrained may be endogenous to farm production. In particular, we find that being credit constant has the substantial effect of reducing yield by 11%, but not the previously estimated very dramatic effect of reducing yield by 26%.</description>
	<pubDate>2024-09-26</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 27: Estimating the Effects of Credit Constraints on Productivity of Peruvian Agriculture</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/4/27">doi: 10.3390/econometrics12040027</a></p>
	<p>Authors:
		Tiemen Woutersen
		Katherine Hauck
		Shahidur R. Khandker
		</p>
	<p>This paper proposes an estimator for the endogenous switching regression models with fixed effects. The decision to switch from one regime to the other may depend on unobserved factors, which would cause the state, such as being credit constrained, to be endogenous. Our estimator allows for this endogenous selection and for conditional heteroscedasticity in the outcome equation. Applying our estimator to a dataset on the productivity in agriculture substantially changes the conclusions compared to earlier analysis of the same dataset. Intuitively, the reason that our estimate of the impact of switching between states is smaller than previously estimated is that we captured the selection issue: switching between being credit constrained and credit unconstrained may be endogenous to farm production. In particular, we find that being credit constant has the substantial effect of reducing yield by 11%, but not the previously estimated very dramatic effect of reducing yield by 26%.</p>
	]]></content:encoded>

	<dc:title>Estimating the Effects of Credit Constraints on Productivity of Peruvian Agriculture</dc:title>
			<dc:creator>Tiemen Woutersen</dc:creator>
			<dc:creator>Katherine Hauck</dc:creator>
			<dc:creator>Shahidur R. Khandker</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12040027</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-09-26</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-09-26</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>4</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>27</prism:startingPage>
		<prism:doi>10.3390/econometrics12040027</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/4/27</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/3/26">

	<title>Econometrics, Vol. 12, Pages 26: Estimating Treatment Effects Using Observational Data and Experimental Data with Non-Overlapping Support</title>
	<link>https://www.mdpi.com/2225-1146/12/3/26</link>
	<description>When estimating treatment effects, the gold standard is to conduct a randomized experiment and then contrast outcomes associated with the treatment group and the control group. However, in many cases, randomized experiments are either conducted with a much smaller scale compared to the size of the target population or accompanied with certain ethical issues and thus hard to implement. Therefore, researchers usually rely on observational data to study causal connections. The downside is that the unconfoundedness assumption, which is the key to validating the use of observational data, is untestable and almost always violated. Hence, any conclusion drawn from observational data should be further analyzed with great care. Given the richness of observational data and usefulness of experimental data, researchers hope to develop credible methods to combine the strength of the two. In this paper, we consider a setting where the observational data contain the outcome of interest as well as a surrogate outcome, while the experimental data contain only the surrogate outcome. We propose an easy-to-implement estimator to estimate the average treatment effect of interest using both the observational data and the experimental data.</description>
	<pubDate>2024-09-20</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 26: Estimating Treatment Effects Using Observational Data and Experimental Data with Non-Overlapping Support</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/3/26">doi: 10.3390/econometrics12030026</a></p>
	<p>Authors:
		Kevin Han
		Han Wu
		Linjia Wu
		Yu Shi
		Canyao Liu
		</p>
	<p>When estimating treatment effects, the gold standard is to conduct a randomized experiment and then contrast outcomes associated with the treatment group and the control group. However, in many cases, randomized experiments are either conducted with a much smaller scale compared to the size of the target population or accompanied with certain ethical issues and thus hard to implement. Therefore, researchers usually rely on observational data to study causal connections. The downside is that the unconfoundedness assumption, which is the key to validating the use of observational data, is untestable and almost always violated. Hence, any conclusion drawn from observational data should be further analyzed with great care. Given the richness of observational data and usefulness of experimental data, researchers hope to develop credible methods to combine the strength of the two. In this paper, we consider a setting where the observational data contain the outcome of interest as well as a surrogate outcome, while the experimental data contain only the surrogate outcome. We propose an easy-to-implement estimator to estimate the average treatment effect of interest using both the observational data and the experimental data.</p>
	]]></content:encoded>

	<dc:title>Estimating Treatment Effects Using Observational Data and Experimental Data with Non-Overlapping Support</dc:title>
			<dc:creator>Kevin Han</dc:creator>
			<dc:creator>Han Wu</dc:creator>
			<dc:creator>Linjia Wu</dc:creator>
			<dc:creator>Yu Shi</dc:creator>
			<dc:creator>Canyao Liu</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12030026</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-09-20</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-09-20</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>26</prism:startingPage>
		<prism:doi>10.3390/econometrics12030026</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/3/26</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/3/25">

	<title>Econometrics, Vol. 12, Pages 25: Score-Driven Interactions for &amp;ldquo;Disease X&amp;rdquo; Using COVID and Non-COVID Mortality</title>
	<link>https://www.mdpi.com/2225-1146/12/3/25</link>
	<description>The COVID-19 (coronavirus disease of 2019) pandemic is over; however, the probability of such a pandemic is about 2% in any year. There are international negotiations among almost 200 countries at the World Health Organization (WHO) concerning a global plan to deal with the next pandemic on the scale of COVID-19, known as &amp;amp;ldquo;Disease X&amp;amp;rdquo;. We develop a nonlinear panel quasi-vector autoregressive (PQVAR) model for the multivariate t-distribution with dynamic unobserved effects, which can be used for out-of-sample forecasts of causes of death counts in the United States (US) when a new global pandemic starts. We use panel data from the Centers for Disease Control and Prevention (CDC) for the cross section of all states of the United States (US) from March 2020 to September 2022 regarding all death counts of (i) COVID-19 deaths, (ii) deaths that medically may be related to COVID-19, and (iii) the remaining causes of death. We compare the t-PQVAR model with its special cases, the PVAR moving average (PVARMA), and PVAR. The t-PQVAR model provides robust evidence on dynamic interactions among (i), (ii), and (iii). The t-PQVAR model may be used for out-of-sample forecasting purposes at the outbreak of a future &amp;amp;ldquo;Disease X&amp;amp;rdquo; pandemic.</description>
	<pubDate>2024-09-04</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 25: Score-Driven Interactions for &amp;ldquo;Disease X&amp;rdquo; Using COVID and Non-COVID Mortality</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/3/25">doi: 10.3390/econometrics12030025</a></p>
	<p>Authors:
		Szabolcs Blazsek
		William M. Dos Santos
		Andreco S. Edwards
		</p>
	<p>The COVID-19 (coronavirus disease of 2019) pandemic is over; however, the probability of such a pandemic is about 2% in any year. There are international negotiations among almost 200 countries at the World Health Organization (WHO) concerning a global plan to deal with the next pandemic on the scale of COVID-19, known as &amp;amp;ldquo;Disease X&amp;amp;rdquo;. We develop a nonlinear panel quasi-vector autoregressive (PQVAR) model for the multivariate t-distribution with dynamic unobserved effects, which can be used for out-of-sample forecasts of causes of death counts in the United States (US) when a new global pandemic starts. We use panel data from the Centers for Disease Control and Prevention (CDC) for the cross section of all states of the United States (US) from March 2020 to September 2022 regarding all death counts of (i) COVID-19 deaths, (ii) deaths that medically may be related to COVID-19, and (iii) the remaining causes of death. We compare the t-PQVAR model with its special cases, the PVAR moving average (PVARMA), and PVAR. The t-PQVAR model provides robust evidence on dynamic interactions among (i), (ii), and (iii). The t-PQVAR model may be used for out-of-sample forecasting purposes at the outbreak of a future &amp;amp;ldquo;Disease X&amp;amp;rdquo; pandemic.</p>
	]]></content:encoded>

	<dc:title>Score-Driven Interactions for &amp;amp;ldquo;Disease X&amp;amp;rdquo; Using COVID and Non-COVID Mortality</dc:title>
			<dc:creator>Szabolcs Blazsek</dc:creator>
			<dc:creator>William M. Dos Santos</dc:creator>
			<dc:creator>Andreco S. Edwards</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12030025</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-09-04</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-09-04</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>25</prism:startingPage>
		<prism:doi>10.3390/econometrics12030025</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/3/25</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/3/24">

	<title>Econometrics, Vol. 12, Pages 24: Signs of Fluctuations in Energy Prices and Energy Stock-Market Volatility in Brazil and in the US</title>
	<link>https://www.mdpi.com/2225-1146/12/3/24</link>
	<description>Volatility reflects the degree of variation in a time series, and a measurement of the stock performance in the energy sector can help one understand the pattern of fluctuations within this industry, as well as the factors that influence it. One of these factors could be the COVID-19 pandemic, which led to extreme volatility within the stock market in several economic sectors. It is essential to understand this regime of volatility so that robust financial strategies can be adopted to handle it. This study used stock data from the Yahoo! Finance API and data from the energy-price database from the US Energy Information Administration to conduct a comparative analysis of the volatility in the energy sector in Brazil and in the United States, as well as of the energy prices in California. The volatility in these time series were modeled using GARCH. The stock volatility regimes, both before and after COVID-19, were identified with a Markov switching model; the spillover index between the energy markets in the USA and in Brazil was evaluated with the Diebold&amp;amp;ndash;Yilmaz index; and the causality between the energy stock price and the energy prices was measured with the Granger causality test. The findings of this study show that (i) the volatility regime introduced by COVID-19 is still prevalent in Brazil and in the USA, (ii) the changes in the energy market in the US affect the Brazilian market significantly more than the reverse, and (iii) there is a causality relationship between the energy stock markets and the energy prices in California. These results may assist in the achievement of effective regulation and economic planning, while also supporting better market interventions. Also, acknowledging the persistent COVID-19-induced volatility can help with developing strategies for future crisis resilience.</description>
	<pubDate>2024-08-23</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 24: Signs of Fluctuations in Energy Prices and Energy Stock-Market Volatility in Brazil and in the US</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/3/24">doi: 10.3390/econometrics12030024</a></p>
	<p>Authors:
		Gabriel Arquelau Pimenta Rodrigues
		André Luiz Marques Serrano
		Gabriela Mayumi Saiki
		Matheus Noschang de Oliveira
		Guilherme Fay Vergara
		Pedro Augusto Giacomelli Fernandes
		Vinícius Pereira Gonçalves
		Clóvis Neumann
		</p>
	<p>Volatility reflects the degree of variation in a time series, and a measurement of the stock performance in the energy sector can help one understand the pattern of fluctuations within this industry, as well as the factors that influence it. One of these factors could be the COVID-19 pandemic, which led to extreme volatility within the stock market in several economic sectors. It is essential to understand this regime of volatility so that robust financial strategies can be adopted to handle it. This study used stock data from the Yahoo! Finance API and data from the energy-price database from the US Energy Information Administration to conduct a comparative analysis of the volatility in the energy sector in Brazil and in the United States, as well as of the energy prices in California. The volatility in these time series were modeled using GARCH. The stock volatility regimes, both before and after COVID-19, were identified with a Markov switching model; the spillover index between the energy markets in the USA and in Brazil was evaluated with the Diebold&amp;amp;ndash;Yilmaz index; and the causality between the energy stock price and the energy prices was measured with the Granger causality test. The findings of this study show that (i) the volatility regime introduced by COVID-19 is still prevalent in Brazil and in the USA, (ii) the changes in the energy market in the US affect the Brazilian market significantly more than the reverse, and (iii) there is a causality relationship between the energy stock markets and the energy prices in California. These results may assist in the achievement of effective regulation and economic planning, while also supporting better market interventions. Also, acknowledging the persistent COVID-19-induced volatility can help with developing strategies for future crisis resilience.</p>
	]]></content:encoded>

	<dc:title>Signs of Fluctuations in Energy Prices and Energy Stock-Market Volatility in Brazil and in the US</dc:title>
			<dc:creator>Gabriel Arquelau Pimenta Rodrigues</dc:creator>
			<dc:creator>André Luiz Marques Serrano</dc:creator>
			<dc:creator>Gabriela Mayumi Saiki</dc:creator>
			<dc:creator>Matheus Noschang de Oliveira</dc:creator>
			<dc:creator>Guilherme Fay Vergara</dc:creator>
			<dc:creator>Pedro Augusto Giacomelli Fernandes</dc:creator>
			<dc:creator>Vinícius Pereira Gonçalves</dc:creator>
			<dc:creator>Clóvis Neumann</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12030024</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-08-23</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-08-23</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>24</prism:startingPage>
		<prism:doi>10.3390/econometrics12030024</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/3/24</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/3/23">

	<title>Econometrics, Vol. 12, Pages 23: Transient and Persistent Technical Efficiencies in Rice Farming: A Generalized True Random-Effects Model Approach</title>
	<link>https://www.mdpi.com/2225-1146/12/3/23</link>
	<description>This study estimates transient and persistent technical efficiencies (TEs) using a generalized true random-effects (GTRE) model. We estimate the GTRE model using maximum likelihood and Bayesian estimation methods, then compare it to three simpler models nested within it to evaluate the robustness of our estimates. We use a panel data set of 945 observations collected from 344 rice farming households in Vietnam&amp;amp;rsquo;s Mekong River Delta. The results indicate that the GTRE model is more appropriate than the restricted models for understanding heterogeneity and inefficiency in rice production. The mean estimate of overall technical efficiency is 0.71 on average, with transient rather than persistent inefficiency being the dominant component. This suggests that rice farmers could increase output substantially and would benefit from policies that pay more attention to addressing short-term inefficiency issues.</description>
	<pubDate>2024-08-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 23: Transient and Persistent Technical Efficiencies in Rice Farming: A Generalized True Random-Effects Model Approach</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/3/23">doi: 10.3390/econometrics12030023</a></p>
	<p>Authors:
		Phuc Trong Ho
		Michael Burton
		Atakelty Hailu
		Chunbo Ma
		</p>
	<p>This study estimates transient and persistent technical efficiencies (TEs) using a generalized true random-effects (GTRE) model. We estimate the GTRE model using maximum likelihood and Bayesian estimation methods, then compare it to three simpler models nested within it to evaluate the robustness of our estimates. We use a panel data set of 945 observations collected from 344 rice farming households in Vietnam&amp;amp;rsquo;s Mekong River Delta. The results indicate that the GTRE model is more appropriate than the restricted models for understanding heterogeneity and inefficiency in rice production. The mean estimate of overall technical efficiency is 0.71 on average, with transient rather than persistent inefficiency being the dominant component. This suggests that rice farmers could increase output substantially and would benefit from policies that pay more attention to addressing short-term inefficiency issues.</p>
	]]></content:encoded>

	<dc:title>Transient and Persistent Technical Efficiencies in Rice Farming: A Generalized True Random-Effects Model Approach</dc:title>
			<dc:creator>Phuc Trong Ho</dc:creator>
			<dc:creator>Michael Burton</dc:creator>
			<dc:creator>Atakelty Hailu</dc:creator>
			<dc:creator>Chunbo Ma</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12030023</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-08-12</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-08-12</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>23</prism:startingPage>
		<prism:doi>10.3390/econometrics12030023</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/3/23</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/3/22">

	<title>Econometrics, Vol. 12, Pages 22: Is It Sufficient to Select the Optimal Class Number Based Only on Information Criteria in Fixed- and Random-Parameter Latent Class Discrete Choice Modeling Approaches?</title>
	<link>https://www.mdpi.com/2225-1146/12/3/22</link>
	<description>Heterogeneity in preferences can be addressed through various discrete choice modeling approaches. The random-parameter latent class (RLC) approach offers a desirable alternative for analysts due to its advantageous properties of separating classes with different preferences and capturing the remaining heterogeneity within classes by including random parameters. For latent class specifications, however, more empirical evidence on the optimal number of classes to consider is needed in order to develop a more objective set of criteria. To investigate this question, we tested cases with different class numbers (for both fixed- and random-parameter latent class modeling) by analyzing data from a discrete choice experiment conducted in 2021 (examined preferences regarding COVID-19 vaccines). We compared models using commonly used indicators such as the Bayesian information criterion, and we took into account, among others, a seemingly simple but often overlooked indicator such as the ratio of significant parameter estimates. Based on our results, it is not sufficient to decide on the optimal number of classes in the latent class modeling based on only information criteria. We considered aspects such as the ratio of significant parameter estimates (it may be interesting to examine this both between and within specifications to find out which model type and class number has the most balanced ratio); the validity of the coefficients obtained (focusing on whether the conclusions are consistent with our theoretical model); whether including random parameters is justified (finding a balance between the complexity of the model and its information content, i.e., to examine when (and to what extent) the introduction of within-class heterogeneity is relevant); and the distributions of MRS calculations (since they often function as a direct measure of preferences, it is necessary to test how consistent the distributions of specifications with different class numbers are (if they are highly, i.e., relatively stable in explaining consumer preferences, it is probably worth putting more emphasis on the aspects mentioned above when choosing a model)). The results of this research raise further questions that should be addressed by further model testing in the future.</description>
	<pubDate>2024-08-08</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 22: Is It Sufficient to Select the Optimal Class Number Based Only on Information Criteria in Fixed- and Random-Parameter Latent Class Discrete Choice Modeling Approaches?</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/3/22">doi: 10.3390/econometrics12030022</a></p>
	<p>Authors:
		Péter Czine
		Péter Balogh
		Zsanett Blága
		Zoltán Szabó
		Réka Szekeres
		Stephane Hess
		Béla Juhász
		</p>
	<p>Heterogeneity in preferences can be addressed through various discrete choice modeling approaches. The random-parameter latent class (RLC) approach offers a desirable alternative for analysts due to its advantageous properties of separating classes with different preferences and capturing the remaining heterogeneity within classes by including random parameters. For latent class specifications, however, more empirical evidence on the optimal number of classes to consider is needed in order to develop a more objective set of criteria. To investigate this question, we tested cases with different class numbers (for both fixed- and random-parameter latent class modeling) by analyzing data from a discrete choice experiment conducted in 2021 (examined preferences regarding COVID-19 vaccines). We compared models using commonly used indicators such as the Bayesian information criterion, and we took into account, among others, a seemingly simple but often overlooked indicator such as the ratio of significant parameter estimates. Based on our results, it is not sufficient to decide on the optimal number of classes in the latent class modeling based on only information criteria. We considered aspects such as the ratio of significant parameter estimates (it may be interesting to examine this both between and within specifications to find out which model type and class number has the most balanced ratio); the validity of the coefficients obtained (focusing on whether the conclusions are consistent with our theoretical model); whether including random parameters is justified (finding a balance between the complexity of the model and its information content, i.e., to examine when (and to what extent) the introduction of within-class heterogeneity is relevant); and the distributions of MRS calculations (since they often function as a direct measure of preferences, it is necessary to test how consistent the distributions of specifications with different class numbers are (if they are highly, i.e., relatively stable in explaining consumer preferences, it is probably worth putting more emphasis on the aspects mentioned above when choosing a model)). The results of this research raise further questions that should be addressed by further model testing in the future.</p>
	]]></content:encoded>

	<dc:title>Is It Sufficient to Select the Optimal Class Number Based Only on Information Criteria in Fixed- and Random-Parameter Latent Class Discrete Choice Modeling Approaches?</dc:title>
			<dc:creator>Péter Czine</dc:creator>
			<dc:creator>Péter Balogh</dc:creator>
			<dc:creator>Zsanett Blága</dc:creator>
			<dc:creator>Zoltán Szabó</dc:creator>
			<dc:creator>Réka Szekeres</dc:creator>
			<dc:creator>Stephane Hess</dc:creator>
			<dc:creator>Béla Juhász</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12030022</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-08-08</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-08-08</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>22</prism:startingPage>
		<prism:doi>10.3390/econometrics12030022</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/3/22</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/3/21">

	<title>Econometrics, Vol. 12, Pages 21: Instrumental Variable Method for Regularized Estimation in Generalized Linear Measurement Error Models</title>
	<link>https://www.mdpi.com/2225-1146/12/3/21</link>
	<description>Regularized regression methods have attracted much attention in the literature, mainly due to its application in high-dimensional variable selection problems. Most existing regularization methods assume that the predictors are directly observed and precisely measured. It is well known that in a low-dimensional regression model if some covariates are measured with error, then the naive estimators that ignore the measurement error are biased and inconsistent. However, the impact of measurement error in regularized estimation procedures is not clear. For example, it is known that the ordinary least squares estimate of the regression coefficient in a linear model is attenuated towards zero and, on the other hand, the variance of the observed surrogate predictor is inflated. Therefore, it is unclear how the interaction of these two factors affects the selection outcome. To correct for the measurement error effects, some researchers assume that the measurement error covariance matrix is known or can be estimated using external data. In this paper, we propose the regularized instrumental variable method for generalized linear measurement error models. We show that the proposed approach yields a consistent variable selection procedure and root-n consistent parameter estimators. Extensive finite sample simulation studies show that the proposed method performs satisfactorily in both linear and generalized linear models. A real data example is provided to further demonstrate the usage of the method.</description>
	<pubDate>2024-07-12</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 21: Instrumental Variable Method for Regularized Estimation in Generalized Linear Measurement Error Models</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/3/21">doi: 10.3390/econometrics12030021</a></p>
	<p>Authors:
		Lin Xue
		Liqun Wang
		</p>
	<p>Regularized regression methods have attracted much attention in the literature, mainly due to its application in high-dimensional variable selection problems. Most existing regularization methods assume that the predictors are directly observed and precisely measured. It is well known that in a low-dimensional regression model if some covariates are measured with error, then the naive estimators that ignore the measurement error are biased and inconsistent. However, the impact of measurement error in regularized estimation procedures is not clear. For example, it is known that the ordinary least squares estimate of the regression coefficient in a linear model is attenuated towards zero and, on the other hand, the variance of the observed surrogate predictor is inflated. Therefore, it is unclear how the interaction of these two factors affects the selection outcome. To correct for the measurement error effects, some researchers assume that the measurement error covariance matrix is known or can be estimated using external data. In this paper, we propose the regularized instrumental variable method for generalized linear measurement error models. We show that the proposed approach yields a consistent variable selection procedure and root-n consistent parameter estimators. Extensive finite sample simulation studies show that the proposed method performs satisfactorily in both linear and generalized linear models. A real data example is provided to further demonstrate the usage of the method.</p>
	]]></content:encoded>

	<dc:title>Instrumental Variable Method for Regularized Estimation in Generalized Linear Measurement Error Models</dc:title>
			<dc:creator>Lin Xue</dc:creator>
			<dc:creator>Liqun Wang</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12030021</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-07-12</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-07-12</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>21</prism:startingPage>
		<prism:doi>10.3390/econometrics12030021</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/3/21</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/3/20">

	<title>Econometrics, Vol. 12, Pages 20: Comparing Estimation Methods for the Power&amp;ndash;Pareto Distribution</title>
	<link>https://www.mdpi.com/2225-1146/12/3/20</link>
	<description>Non-negative distributions are important tools in various fields. Given the importance of achieving a good fit, the literature offers hundreds of different models, from the very simple to the highly flexible. In this paper, we consider the power&amp;amp;ndash;Pareto model, which is defined by its quantile function. This distribution has three parameters, allowing the model to take different shapes, including symmetrical and left- and right-skewed. We provide different distributional characteristics and discuss parameter estimation. In addition to the already-known Maximum Likelihood and Least Squares of the logarithm of the order statistics estimation methods, we propose several additional methods. A simulation study and an application to two datasets are conducted to illustrate the performance of the estimation methods.</description>
	<pubDate>2024-07-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 20: Comparing Estimation Methods for the Power&amp;ndash;Pareto Distribution</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/3/20">doi: 10.3390/econometrics12030020</a></p>
	<p>Authors:
		Frederico Caeiro
		Mina Norouzirad
		</p>
	<p>Non-negative distributions are important tools in various fields. Given the importance of achieving a good fit, the literature offers hundreds of different models, from the very simple to the highly flexible. In this paper, we consider the power&amp;amp;ndash;Pareto model, which is defined by its quantile function. This distribution has three parameters, allowing the model to take different shapes, including symmetrical and left- and right-skewed. We provide different distributional characteristics and discuss parameter estimation. In addition to the already-known Maximum Likelihood and Least Squares of the logarithm of the order statistics estimation methods, we propose several additional methods. A simulation study and an application to two datasets are conducted to illustrate the performance of the estimation methods.</p>
	]]></content:encoded>

	<dc:title>Comparing Estimation Methods for the Power&amp;amp;ndash;Pareto Distribution</dc:title>
			<dc:creator>Frederico Caeiro</dc:creator>
			<dc:creator>Mina Norouzirad</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12030020</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-07-11</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-07-11</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>20</prism:startingPage>
		<prism:doi>10.3390/econometrics12030020</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/3/20</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/3/19">

	<title>Econometrics, Vol. 12, Pages 19: Stochastic Debt Sustainability Analysis in Romania in the Context of the War in Ukraine</title>
	<link>https://www.mdpi.com/2225-1146/12/3/19</link>
	<description>Public debt is determined by borrowings undertaken by a government to finance its short- or long-term financial needs and to ensure that macroeconomic objectives are met within budgetary constraints. In Romania, public debt has been on an upward trajectory, a trend that has been further exacerbated in recent years by the COVID-19 pandemic. Additionally, a significant non-economic event influencing Romania&amp;amp;rsquo;s public debt is the war in Ukraine. To analyze this, a stochastic debt sustainability analysis was conducted, incorporating the unique characteristics of Romania&amp;amp;rsquo;s emerging market into the research methodology. The projections focused on achieving satisfactory results by following two lines of research. The first direction involved developing four scenarios to assess the risks presented by macroeconomic shocks. Particular emphasis was placed on an unusual negative shock, specifically the war in Ukraine, with forecasts indicating that the debt-to-GDP ratio could reach 102% by 2026. However, if policymakers implement discretionary measures, this level could be contained below 88%. The second direction of research aimed to establish the maximum safe limit of public debt for Romania, which was determined to be 70%. This threshold would allow the emerging economy to manage a reasonable level of risk without requiring excessive fiscal efforts to maintain long-term stability.</description>
	<pubDate>2024-07-05</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 19: Stochastic Debt Sustainability Analysis in Romania in the Context of the War in Ukraine</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/3/19">doi: 10.3390/econometrics12030019</a></p>
	<p>Authors:
		Gabriela Dobrotă
		Alina Daniela Voda
		</p>
	<p>Public debt is determined by borrowings undertaken by a government to finance its short- or long-term financial needs and to ensure that macroeconomic objectives are met within budgetary constraints. In Romania, public debt has been on an upward trajectory, a trend that has been further exacerbated in recent years by the COVID-19 pandemic. Additionally, a significant non-economic event influencing Romania&amp;amp;rsquo;s public debt is the war in Ukraine. To analyze this, a stochastic debt sustainability analysis was conducted, incorporating the unique characteristics of Romania&amp;amp;rsquo;s emerging market into the research methodology. The projections focused on achieving satisfactory results by following two lines of research. The first direction involved developing four scenarios to assess the risks presented by macroeconomic shocks. Particular emphasis was placed on an unusual negative shock, specifically the war in Ukraine, with forecasts indicating that the debt-to-GDP ratio could reach 102% by 2026. However, if policymakers implement discretionary measures, this level could be contained below 88%. The second direction of research aimed to establish the maximum safe limit of public debt for Romania, which was determined to be 70%. This threshold would allow the emerging economy to manage a reasonable level of risk without requiring excessive fiscal efforts to maintain long-term stability.</p>
	]]></content:encoded>

	<dc:title>Stochastic Debt Sustainability Analysis in Romania in the Context of the War in Ukraine</dc:title>
			<dc:creator>Gabriela Dobrotă</dc:creator>
			<dc:creator>Alina Daniela Voda</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12030019</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-07-05</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-07-05</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>3</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>19</prism:startingPage>
		<prism:doi>10.3390/econometrics12030019</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/3/19</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/2/18">

	<title>Econometrics, Vol. 12, Pages 18: Investigation of Equilibrium in Oligopoly Markets with the Help of Tripled Fixed Points in Banach Spaces</title>
	<link>https://www.mdpi.com/2225-1146/12/2/18</link>
	<description>In the study we explore an oligopoly market for equilibrium and stability based on statistical data with the help of response functions rather than payoff maximization. To achieve this, we extend the concept of coupled fixed points to triple fixed points. We propose a new model that leads to generalized triple fixed points. We present a possible application of the generalized tripled fixed point model to the study of market equilibrium in an oligopolistic market dominated by three major competitors. The task of maximizing the payout functions of the three players is modified by the concept of generalized tripled fixed points of response functions. The presented model for generalized tripled fixed points of response functions is equivalent to Cournot payoff maximization, provided that the market price function and the three players&amp;amp;rsquo; cost functions are differentiable. Furthermore, we demonstrate that the contractive condition corresponds to the second-order constraints in payoff maximization. Moreover, the model under consideration is stable in the sense that it ensures the stability of the consecutive production process, as opposed to the payoff maximization model with which the market equilibrium may not be stable. A possible gap in the applications of the classical technique for maximization of the payoff functions is that the price function in the market may not be known, and any approximation of it may lead to the solution of a task different from the one generated by the market. We use empirical data from Bulgaria&amp;amp;rsquo;s beer market to illustrate the created model. The statistical data gives fair information on how the players react without knowing the price function, their cost function, or their aims towards a specific market. We present two models based on the real data and their approximations, respectively. The two models, although different, show similar behavior in terms of time and the stability of the market equilibrium. Thus, the notion of response functions and tripled fixed points seems to present a justified way of modeling market processes in oligopoly markets when searching whether the market has reached equilibrium and if this equilibrium is unique and stable in time</description>
	<pubDate>2024-06-17</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 18: Investigation of Equilibrium in Oligopoly Markets with the Help of Tripled Fixed Points in Banach Spaces</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/2/18">doi: 10.3390/econometrics12020018</a></p>
	<p>Authors:
		Atanas Ilchev
		Vanya Ivanova
		Hristina Kulina
		Polina Yaneva
		Boyan Zlatanov
		</p>
	<p>In the study we explore an oligopoly market for equilibrium and stability based on statistical data with the help of response functions rather than payoff maximization. To achieve this, we extend the concept of coupled fixed points to triple fixed points. We propose a new model that leads to generalized triple fixed points. We present a possible application of the generalized tripled fixed point model to the study of market equilibrium in an oligopolistic market dominated by three major competitors. The task of maximizing the payout functions of the three players is modified by the concept of generalized tripled fixed points of response functions. The presented model for generalized tripled fixed points of response functions is equivalent to Cournot payoff maximization, provided that the market price function and the three players&amp;amp;rsquo; cost functions are differentiable. Furthermore, we demonstrate that the contractive condition corresponds to the second-order constraints in payoff maximization. Moreover, the model under consideration is stable in the sense that it ensures the stability of the consecutive production process, as opposed to the payoff maximization model with which the market equilibrium may not be stable. A possible gap in the applications of the classical technique for maximization of the payoff functions is that the price function in the market may not be known, and any approximation of it may lead to the solution of a task different from the one generated by the market. We use empirical data from Bulgaria&amp;amp;rsquo;s beer market to illustrate the created model. The statistical data gives fair information on how the players react without knowing the price function, their cost function, or their aims towards a specific market. We present two models based on the real data and their approximations, respectively. The two models, although different, show similar behavior in terms of time and the stability of the market equilibrium. Thus, the notion of response functions and tripled fixed points seems to present a justified way of modeling market processes in oligopoly markets when searching whether the market has reached equilibrium and if this equilibrium is unique and stable in time</p>
	]]></content:encoded>

	<dc:title>Investigation of Equilibrium in Oligopoly Markets with the Help of Tripled Fixed Points in Banach Spaces</dc:title>
			<dc:creator>Atanas Ilchev</dc:creator>
			<dc:creator>Vanya Ivanova</dc:creator>
			<dc:creator>Hristina Kulina</dc:creator>
			<dc:creator>Polina Yaneva</dc:creator>
			<dc:creator>Boyan Zlatanov</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12020018</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-06-17</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-06-17</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>18</prism:startingPage>
		<prism:doi>10.3390/econometrics12020018</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/2/18</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/2/17">

	<title>Econometrics, Vol. 12, Pages 17: Modeling the Economic Impact of the COVID-19 Pandemic Using Dynamic Panel Models and Seemingly Unrelated Regressions</title>
	<link>https://www.mdpi.com/2225-1146/12/2/17</link>
	<description>The importance of assessing and estimating the impact of the COVID-19 pandemic on financial markets and economic activity has attracted the interest of researchers and practitioners in recent years. The proposed study aims to explore the pandemic&amp;amp;rsquo;s impact on the economic activity of six Euro area economies. A class of dynamic panel data models and their corresponding Seemingly Unrelated Regression (SUR) models are developed and applied to model the economic activity of six Eurozone countries. This class of models allows for common and country-specific covariates to affect the real growth, as well as for cross-sectional dependence in the error processes. Estimation and inference for this class of panel models are based on both Bayesian and classical techniques. Our findings reveal that significant heterogeneity exists among the different economies with respect to the explanatory/predictive factors. The impact of the COVID-19 pandemic varied across the Euro area economies under study. Nonetheless, the outbreak of the COVID-19 pandemic profoundly affected real economic activity across all regions and countries. As an exogenous shock of such magnitude, it caused a sharp increase in overall uncertainty that spread quickly across all sectors of the global economy.</description>
	<pubDate>2024-06-14</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 17: Modeling the Economic Impact of the COVID-19 Pandemic Using Dynamic Panel Models and Seemingly Unrelated Regressions</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/2/17">doi: 10.3390/econometrics12020017</a></p>
	<p>Authors:
		Ioannis D. Vrontos
		John Galakis
		Ekaterini Panopoulou
		Spyridon D. Vrontos
		</p>
	<p>The importance of assessing and estimating the impact of the COVID-19 pandemic on financial markets and economic activity has attracted the interest of researchers and practitioners in recent years. The proposed study aims to explore the pandemic&amp;amp;rsquo;s impact on the economic activity of six Euro area economies. A class of dynamic panel data models and their corresponding Seemingly Unrelated Regression (SUR) models are developed and applied to model the economic activity of six Eurozone countries. This class of models allows for common and country-specific covariates to affect the real growth, as well as for cross-sectional dependence in the error processes. Estimation and inference for this class of panel models are based on both Bayesian and classical techniques. Our findings reveal that significant heterogeneity exists among the different economies with respect to the explanatory/predictive factors. The impact of the COVID-19 pandemic varied across the Euro area economies under study. Nonetheless, the outbreak of the COVID-19 pandemic profoundly affected real economic activity across all regions and countries. As an exogenous shock of such magnitude, it caused a sharp increase in overall uncertainty that spread quickly across all sectors of the global economy.</p>
	]]></content:encoded>

	<dc:title>Modeling the Economic Impact of the COVID-19 Pandemic Using Dynamic Panel Models and Seemingly Unrelated Regressions</dc:title>
			<dc:creator>Ioannis D. Vrontos</dc:creator>
			<dc:creator>John Galakis</dc:creator>
			<dc:creator>Ekaterini Panopoulou</dc:creator>
			<dc:creator>Spyridon D. Vrontos</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12020017</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-06-14</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-06-14</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>17</prism:startingPage>
		<prism:doi>10.3390/econometrics12020017</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/2/17</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
        <item rdf:about="https://www.mdpi.com/2225-1146/12/2/16">

	<title>Econometrics, Vol. 12, Pages 16: Predicting the Direction of NEPSE Index Movement with News Headlines Using Machine Learning</title>
	<link>https://www.mdpi.com/2225-1146/12/2/16</link>
	<description>Predicting stock market movement direction is a challenging task due to its fuzzy, chaotic, volatile, nonlinear, and complex nature. However, with advancements in artificial intelligence, abundant data availability, and improved computational capabilities, creating robust models capable of accurately predicting stock market movement is now feasible. This study aims to construct a predictive model using news headlines to predict stock market movement direction. It conducts a comparative analysis of five supervised classification machine learning algorithms&amp;amp;mdash;logistic regression (LR), support vector machine (SVM), random forest (RF), extreme gradient boosting (XGBoost), and artificial neural network (ANN)&amp;amp;mdash;to predict the next day&amp;amp;rsquo;s movement direction of the close price of the Nepal Stock Exchange (NEPSE) index. Sentiment scores from news headlines are computed using the Valence Aware Dictionary for Sentiment Reasoning (VADER) and TextBlob sentiment analyzer. The models&amp;amp;rsquo; performance is evaluated based on sensitivity, specificity, accuracy, and the area under the receiver operating characteristic (ROC) curve (AUC). Experimental results reveal that all five models perform equally well when using sentiment scores from the TextBlob analyzer. Similarly, all models exhibit almost identical performance when using sentiment scores from the VADER analyzer, except for minor variations in AUC in SVM vs. LR and SVM vs. ANN. Moreover, models perform relatively better when using sentiment scores from the TextBlob analyzer compared to the VADER analyzer. These findings are further validated through statistical tests.</description>
	<pubDate>2024-06-11</pubDate>

	<content:encoded><![CDATA[
	<p><b>Econometrics, Vol. 12, Pages 16: Predicting the Direction of NEPSE Index Movement with News Headlines Using Machine Learning</b></p>
	<p>Econometrics <a href="https://www.mdpi.com/2225-1146/12/2/16">doi: 10.3390/econometrics12020016</a></p>
	<p>Authors:
		Keshab Raj Dahal
		Ankrit Gupta
		Nawa Raj Pokhrel
		</p>
	<p>Predicting stock market movement direction is a challenging task due to its fuzzy, chaotic, volatile, nonlinear, and complex nature. However, with advancements in artificial intelligence, abundant data availability, and improved computational capabilities, creating robust models capable of accurately predicting stock market movement is now feasible. This study aims to construct a predictive model using news headlines to predict stock market movement direction. It conducts a comparative analysis of five supervised classification machine learning algorithms&amp;amp;mdash;logistic regression (LR), support vector machine (SVM), random forest (RF), extreme gradient boosting (XGBoost), and artificial neural network (ANN)&amp;amp;mdash;to predict the next day&amp;amp;rsquo;s movement direction of the close price of the Nepal Stock Exchange (NEPSE) index. Sentiment scores from news headlines are computed using the Valence Aware Dictionary for Sentiment Reasoning (VADER) and TextBlob sentiment analyzer. The models&amp;amp;rsquo; performance is evaluated based on sensitivity, specificity, accuracy, and the area under the receiver operating characteristic (ROC) curve (AUC). Experimental results reveal that all five models perform equally well when using sentiment scores from the TextBlob analyzer. Similarly, all models exhibit almost identical performance when using sentiment scores from the VADER analyzer, except for minor variations in AUC in SVM vs. LR and SVM vs. ANN. Moreover, models perform relatively better when using sentiment scores from the TextBlob analyzer compared to the VADER analyzer. These findings are further validated through statistical tests.</p>
	]]></content:encoded>

	<dc:title>Predicting the Direction of NEPSE Index Movement with News Headlines Using Machine Learning</dc:title>
			<dc:creator>Keshab Raj Dahal</dc:creator>
			<dc:creator>Ankrit Gupta</dc:creator>
			<dc:creator>Nawa Raj Pokhrel</dc:creator>
		<dc:identifier>doi: 10.3390/econometrics12020016</dc:identifier>
	<dc:source>Econometrics</dc:source>
	<dc:date>2024-06-11</dc:date>

	<prism:publicationName>Econometrics</prism:publicationName>
	<prism:publicationDate>2024-06-11</prism:publicationDate>
	<prism:volume>12</prism:volume>
	<prism:number>2</prism:number>
	<prism:section>Article</prism:section>
	<prism:startingPage>16</prism:startingPage>
		<prism:doi>10.3390/econometrics12020016</prism:doi>
	<prism:url>https://www.mdpi.com/2225-1146/12/2/16</prism:url>
	
	<cc:license rdf:resource="CC BY 4.0"/>
</item>
    
<cc:License rdf:about="https://creativecommons.org/licenses/by/4.0/">
	<cc:permits rdf:resource="https://creativecommons.org/ns#Reproduction" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#Distribution" />
	<cc:permits rdf:resource="https://creativecommons.org/ns#DerivativeWorks" />
</cc:License>

</rdf:RDF>
