%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% QMLE estimation of the excess (with respect to the risk-free rate) return process:
% The conditional mean is driven by the dividend yield (to evaluate predictability)
% and the error terms are given by a GARCH(1,1) process
Copyright By Assignmentchef assignmentchef
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Clean the environment
close all;
clear all;
% Upload the data
data = readtable(predictability.xls);
ret_m = log(1+data.retdny); % Continuously-compounded market returns
ret_f = log(1+data.tbill);% Continuously-compounded riskless returns
dp = data.dyny; % The divinded yield
r = ret_m ret_f;% Excess continuously-compounded returns (risk premia)
T = length(r);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Let us first estimate the conditional mean by least squares:
% This is useful for two reasons:
% (1) It is helpful to have a benchmark.
% (2) You need starting values for the QMLE procedure. It makes therefore
% sense to use the OLS values as starting values.
% We will also compute OLS standard errors: again, to have a benchmark.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
X = [ones(T-1,1),dp(1:end-1)];
beta = inv(X*X)*(X*r(2:end));
OLSres = r(2:end) X*beta;
sigma2 = (OLSres*OLSres)/(length(OLSres)-2);
Var_beta = sigma2*inv(X*X);
OLStstat = beta./sqrt(diag(Var_beta));
OLStstat = [OLStstat;0;0;0];
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Always plot the data (when you can):
% Do we need a time-varying conditional mean?
% Do we need a time-varying conditional variance?
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
plot(r(2:end));
plot(beta(1)+beta(2)*dp(1:end-1));
title(Excess continuously-compounded returns and their conditional mean)
ylabel(returns and conditional expected returns)
xlabel(time)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Let us now maximize the log-likelihood and find the parameter estimates
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
parameter_names = [alpha beta mu delta phi];
% Initial conditions (I am just choosing reasonable values, but you can of course change them and see what happens)
theta_initial=[beta(1),beta(2),0,0.5,0.5];
options = optimset(Display,none,TolFun,1e-20,TolX,1e-20);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Let us now do inference: we need the asymptotic variance/covariance
% We will compute it in its sandwich format.
% Recall: QMLE requires both the gradient and the hessian
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
sum4 = zeros(5,5);
sum5 = zeros(5,5);
h = var(r)*ones(T-1,1); % We are initializing the conditional variance at the unconditional level
grad1_h = zeros(T-1,1); % We are initializing the first derivatives at zero
grad2_h = zeros(T-1,1);
grad3_h = zeros(T-1,1);
hes12_h = zeros(T-1,1); % We are initializing the second derivatives at zero
hes22_h = zeros(T-1,1);
hes23_h = zeros(T-1,1);
h(t) = theta(3)+theta(4)*h(t-1)+theta(5)*(r(t-1)-(theta(1)+theta(2)*dp(t-2)))^2;
% Compute the gradient: this is the first piece (2 rows) having to do with the conditional mean
grad1 = [(r(t) (theta(1)+theta(2)*dp(t-1)))/h(t); (r(t) (theta(1)+theta(2)*dp(t-1)))*dp(t-1)/h(t)]; % First piece
% Compute the gradient: this is the second piece (3 rows) having to do with the conditional variance
% First, we have to have a recursion over the first derivatives of h with respect to the parameters
grad1_h(t) = 1 + theta(4)*grad1_h(t-1);
grad2_h(t) = h(t-1) + theta(4)*grad2_h(t-1);
grad3_h(t) = (r(t-1)-(theta(1)+theta(2)*dp(t-2)))*(r(t-1)-(theta(1)+theta(2)*dp(t-2))) + theta(4)*grad3_h(t-1);
grad_h = [grad1_h(t); grad2_h(t); grad3_h(t)];
grad = -(1/2)*1/h(t)+(1/2)*(r(t)-(theta(1)+theta(2)*dp(t-1)))*(r(t)-(theta(1)+theta(2)*dp(t-1)))/(h(t)^2);
grad2 = grad.*grad_h; % Second piece
gradient = [grad1;grad2]; % Full gradient first piece on top of the second piece
sum4 = sum4+gradient*gradient; % Loop to compute Omega_0
% Compute the hessian:
% First, we need to have a recursion over some of the second derivatives of h with respect to the parameters
v = (1/2)*(1/(h(t)^2)) ((r(t)-(theta(1)+theta(2)*dp(t-1)))^2)/(h(t)^3);
hes12_h(t) = grad1_h(t-1) + theta(4)*hes12_h(t-1);
hes22_h(t) = 2*grad2_h(t-1) + theta(4)*hes22_h(t-1);
hes23_h(t) = grad3_h(t-1) + theta(4)*hes23_h(t-1);
hessian = [-1/h(t), -dp(t-1)/h(t), 0, 0, 0;
-dp(t-1)/h(t), -dp(t-1)^2/h(t),0,0,0;
0, 0, v*(grad1_h(t))^2, v*(grad1_h(t))*(grad2_h(t)) + grad*hes12_h(t), v*(grad1_h(t))*(grad3_h(t));
0, 0, v*(grad2_h(t))*(grad1_h(t))+grad*hes12_h(t), v*(grad2_h(t))^2 + grad*hes22_h(t), v*(grad2_h(t))*(grad3_h(t))+grad*hes23_h(t);
0, 0, v*(grad3_h(t))*(grad1_h(t)), v*(grad3_h(t))*(grad2_h(t))+grad*hes23_h(t),v*(grad3_h(t))^2];
sum5=sum5+hessian;
asy_varMLE = (1/T)*inv((1/T)*sum4); % Asymptotic variance under correct specification
t_statsMLE = theta./sqrt(diag(asy_varMLE));
asy_varQMLE = (1/T)*inv(-(1/T)*sum5)*((1/T)*sum4)*inv(-(1/T)*sum5); % QMLE asymptotic variance
t_statsQMLE = theta./sqrt(diag(asy_varQMLE));
outcome = table(parameter_names,theta,t_statsMLE, t_statsQMLE, OLStstat,
VariableNames,{parameters Estimates_QMLE t_statistics_MLE t_statistics_QML t_statistics_OLS});
disp(outcome);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This is the function which delivers the log-likelihood given an assumption
% of normality on the error terms
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
function y=garchloglik(theta,r,dp)
h1 = var(r); % Initial value of the conditional variance: we use the unconditional
% empirical variance from the data
T = length(r);
alpha = theta(1);
beta = theta(2);
mu = theta(3);
delta = theta(4);
phi = theta(5);
sum1=-T/2*log(2*pi);
sum2=-0.5*log(h1);
sum3= -(r(2)-(alpha+beta*dp(1)))*(r(2)-(alpha+beta*dp(1)))/(2*h1);
h = mu+delta*h+phi*(r(t-1)-(alpha+beta*dp(t-2)))*(r(t-1)-(alpha+beta*dp(t-2)));
sum2=sum2-0.5*log(h);
stand_res=(r(t)-(alpha+beta*dp(t-1)))*(r(t)-(alpha+beta*dp(t-1)))/(2*h);
sum3=sum3-stand_res;
y= -(1/T)*(sum1+sum2+sum3);% Note that I changed the sign of the likelihood. We are maximizing, not minimizing.
% The function garchloglik is however fed into a
% minsearch function
CS: assignmentchef QQ: 1823890830 Email: [email protected]
Reviews
There are no reviews yet.