# ANALISIS DEL MODELO WEIBULL #
#------
# Datos
n_30
x_c(23, 261, 87, 7, 120, 14, 62, 47, 225, 71, 246, 21, 42, 20, 5,
12, 120, 11, 3, 14, 71, 11, 14, 11, 16, 90, 1, 16, 52, 95)
#######################################################
# Algoritmo Newton Raphson para maximizar una funcion f
nr_function(f, D1f, D2f, x0, p) # Dada p, la precision deseada
{
i <- 0
xt <- x0
valor <- 0
xt1 <- xt - (D1f(xt))/(D2f(xt))
while((abs((xt - xt1)/xt)) > p) {
i <- i+1
xt <- xt1
xt1 <- xt - (D1f(xt))/(D2f(xt))
valor <- f(xt1)
print(cbind(i, xt1, valor))
}
vgorro <- xt1
Varv <- -1/D2f(vgorro)
cbind(vgorro,Varv)
}
#---------------------------------
# Aproximacion normal asintotica
fv_function(v)
{
((exp((n-1) * v)) * ((prod(x))^(exp(v))))/(sum(x^(exp(v))))^n
}
gprima_function(v)
{
(n-1) + exp(v) * sum(log(x)) -
(n * exp(v) * sum(x^exp(v) * log(x)))/sum(x^exp(v))
}
gbiprima_function(v)
{
exp(v) * sum(log(x)) -
(n * exp(v) * sum((x^exp(v)) * log(x)))/sum(x^exp(v)) -
(n * exp(2 * v) * sum(x^exp(v) * (log(x)^2)))/sum(x^exp(v)) +
(n * exp(2 * v) * ((sum((x^exp(v)) * log(x)))^2))/((sum(x^exp(v)))^2)
}
ana_nr(fv,gprima,gbiprima,0,0.0000001)
vgorro_ana[1]
vgorro
>
> [1] -0.1781658
>
Varv_ana[2]
Varv
>
> [1] 0.02015616
>
#-----------------------------------------------
# Constante de Normalizacion para p(theta|x) via
# aproximacion de Laplace
Sigvg_n*Varv
Is_(2*pi/n)^0.5*(Sigvg)^0.5*fv(vgorro)
Is
>
> [1] 1.277015e-054
>
#---------------------------------------
# E(theta|x) via aproximacion de Laplace
# (Forma estandar)
Eths_exp(vgorro)
Eths
>
> [1] 0.8368037
>
#---------------------------------------
# E(theta|x) via aproximacion de Laplace
# (Forma exponencial)
fvp_function(v)
{
(exp(v) * ((exp((n-1) * v)) * ((prod(x))^(exp(v)))))/(sum(x^(exp(v))))^n
}
gpp_function(v)
{
1 + (n-1) + exp(v) * sum(log(x)) -
(n * exp(v) * sum(x^exp(v) * log(x)))/sum(x^exp(v))
}
gbpp_function(v)
{
exp(v) * sum(log(x)) -
(n * exp(v) * sum((x^exp(v)) * log(x)))/sum(x^exp(v)) -
(n * exp(2 * v) * sum(x^exp(v) * (log(x)^2)))/sum(x^exp(v)) +
(n * exp(2 * v) * ((sum((x^exp(v)) * log(x)))^2))/((sum(x^exp(v)))^2)
}
le_nr(fvp,gpp,gbpp,1,0.000001)
vgp_le[1]
vgp
>
> [1] -0.1583074
>
Sigvgp_n*le[2]
Sigvgp
>
> [1] 0.5870183
>
Sthe_exp(vgp)*(2*pi/n)^0.5*(Sigvgp)^0.5*fv(vgp)
Sthe
>
> [1] 1.063443e-054
>
S1_Is
Ethe_Sthe/S1
Ethe
>
> [1] 0.8327572
>
#--------------------------------------------------------
# Densidad predictiva p^(y|x) via aproximacion de Laplace
# (Forma estandar)
# Constante de normalizacion obtenida analiticamente
py_function(y)
{
w_exp(vgorro)
n * ((w * y^(w - 1) * (sum(x^w))^n)/(y^w + sum(x^w))^(n+1))
}
grafica_function(f, a, b, np, nombre)
{
win.graph()
x <- seq(a, b, length = np)
tabula <- x
for(i in 1:np) {tabula[i] <- f(x[i])}
plot(x, tabula, xlab = "y", ylab = "p(y|x)", type = "l")
title(nombre)
}
grafica(py,0,500,100,"Densidad predictiva: Laplace")
################################################
# Regla trapezoidal
trapezoidal_function(f, a, b, N)
{
sumaf <- 0
k <- N - 1
for(i in 1:k) { sumaf <- sumaf + f(a + (i * (b - a))/N) }
((b - a)/N) * ((f(a) + f(b))/2 + sumaf)
}
#-----------------------------------------------
# Constante de normalizacion para p(theta|x) via
# regla trapezoidal (N=1000)
N_1000
a_vgorro - 5*sqrt(Varv)
b_vgorro + 5*sqrt(Varv)
Cnpth_trapezoidal(fv,a,b,N)
Cnpth
>
> [1] 1.283785e-054
>
#--------------------------------------------
# Constante de normalizacion para p^(y|x) via
# regla trapezoidal (N=1000)
fu_function(u)
{
w_exp(vgorro)
y <- u/(1 - u)
((w * y^(w - 1) * (sum(x^w))^n)/(y^w + sum(x^w))^(n+1)) * (1 - u)^(-2)
}
Cnpy_trapezoidal(fu,0.00001,0.99999,N)
Cnpy
>
> [1] 0.03333357
>
#################################################
# Metodo iterativo de Naylor-Smith, basado en la
# regla de Gauss-Hermite.
ghc3_function(fv, mu0, s20, p){
r1_-1.2247448714
r2_0
r3_-r1
w1_0.29540898
w2_1.1816359
w3_w1
i_0
cn_-1
cnm1_1
while((abs( (cn - cnm1) / cn )) > p) {
i_i+1
cnm1_cn
u1_sqrt(2*sig2) * w1 * exp(r1*r1)
u2_sqrt(2*sig2) * w2 * exp(r2*r2)
u3_sqrt(2*sig2) * w3 * exp(r3*r3)
eta1_mu + sqrt(2*sig2) * r1
eta2_mu + sqrt(2*sig2) * r2
eta3_mu + sqrt(2*sig2) * r3
cn_u1*fv(eta1) + u2*fv(eta2) + u3*fv(eta3)
muh_( u1*eta1*fv(eta1) + u2*eta2*fv(eta2) + u3*eta3*fv(eta3) ) / cn
e2h_( u1*eta1*eta1*fv(eta1) + u2*eta2*eta2*fv(eta2) + u3*eta3*eta3*fv(eta3)) / cn
sig2h_e2h - muh*muh
mu_muh
sig2_sig2h
print(cbind(i,cn,mu,sig2))
}
cn
}
ghc5_function(fv, mu0, s20, p){
r1_-2.0201828705
r2_-0.9585724646
r3_0
r4_-r2
r5_-r1
w1_0.019953242
w2_0.39361932
w3_0.94530872
w4_w2
w5_w1
i_0
cn_-1
cnm1_1
while((abs( (cn - cnm1) / cn )) > p) {
i_i+1
cnm1_cn
u1_sqrt(2*sig2) * w1 * exp(r1*r1)
u2_sqrt(2*sig2) * w2 * exp(r2*r2)
u3_sqrt(2*sig2) * w3 * exp(r3*r3)
u4_sqrt(2*sig2) * w4 * exp(r4*r4)
u5_sqrt(2*sig2) * w5 * exp(r5*r5)
eta1_mu + sqrt(2*sig2) * r1
eta2_mu + sqrt(2*sig2) * r2
eta3_mu + sqrt(2*sig2) * r3
eta4_mu + sqrt(2*sig2) * r4
eta5_mu + sqrt(2*sig2) * r5
cn_u1*fv(eta1) + u2*fv(eta2) + u3*fv(eta3) + u4*fv(eta4) + u5*fv(eta5)
muh_( u1*eta1*fv(eta1) + u2*eta2*fv(eta2) + u3*eta3*fv(eta3) +
u4*eta4*fv(eta4) + u5*eta5*fv(eta5) ) / cn
e2h_( u1*eta1*eta1*fv(eta1) + u2*eta2*eta2*fv(eta2) + u3*eta3*eta3*fv(eta3)
+ u4*eta4*eta4*fv(eta4) + u5*eta5*eta5*fv(eta5) ) / cn
sig2h_e2h - muh*muh
mu_muh
sig2_sig2h
print(cbind(i,cn,mu,sig2))
}
cn
}
#-----------------------------------------------
# Constante de normalizacion para p(theta|x) via
# regla de Gauss-Hermite (N=3)
mu_vgorro
sig2_Varv
fv_function(v)
{
((exp((n-1) * v)) * ((prod(x))^(exp(v))))/(sum(x^(exp(v))))^n
}
Cnpthgh_ghc3(fv, mu, sig2,0.0000001)
Cnpthgh
>
> [1] 1.278927e-054
>
#--------------------------------------------
# Constante de normalizacion para p^(y|x) via
# regla de Gauss-Hermite (N=5)
mu_3.5
sig2_2
pyr_function(yr)
{
wh_exp(vgorro)
( wh * exp(yr*wh) * (sum(x^wh))^n )/( exp(yr*wh) + sum(x^wh))^(n+1)
}
Cnpygh_ghc5(pyr, mu, sig2,0.0000001)
Cnpygh
>
> [1] 0.03253937
>
###################################################################
# Monte Carlo (Muestreo por Importancia)
importancia_function(u, f, N, m, var)
{
# u: funcion cuyo valor esperado se desea calcular
# f: densidad con respecto a la cual se calcula la esperanza
# N: Tamanio de la muestra simulada
# m: media para la aproximacion normal
# var: Varianza para la aproximacion normal
s <- var^0.5
muestra <- seq(0, 0, length = N)
for(i in 1:N) {
aleatorio <- rnorm(1, m, s)
muestra[i] <- (u(aleatorio) * f(aleatorio))/(dnorm(aleatorio, m, s))
}
mean(muestra)
}
#----------------------------------------
# E(w|x) via Monte Carlo
u_function(v){ exp(v) }
pv_function(v)
{
(1/Cnpth) * exp((n-1) * v + exp(v) * sum(log(x)) - n * log(sum(x^exp(v))))
}
N_5000
Ethmc_importancia(u,pv,N,vgorro,Varv)
Ethmc
>
> [1] 0.8321348
>
###################################################################
# Algoritmo de Metropolis-Hastings (Independencia)
metro_function(f, N, R, m, var)
{
# f: Funcion de la que desea muestrear
# N: Tamanio de la muestra
# R: Numero de iteraciones
# m: Media de la distribucion candidato (Normal)
# var: Varianza de la distribucion candidato (Normal)
sal_list(mv=0,me=0)
v0 <- rnorm(N, m, sqrt(var))
vme <- seq(0, 0, length = R)
v2me <- seq(0, 0, length = R)
thme <- seq(0, 0, length = R)
# Iteraciones del algoritmo de Metropolis
for(i in 1:R) {
v1 <- rnorm(N, m, sqrt(var))
w1 <- f(v1)/dnorm(v1, m, sqrt(var))
w0 <- f(v0)/dnorm(v0, m, sqrt(var))
alfa <- w1/w0
u <- runif(N, 0, 1)
aux <- ifelse(u < alfa, v1, v0)
v0 <- aux
vme[i] <- mean(v0)
v2me[i] <- mean(v0*v0)
thme[i] <- mean(exp(v0))
print(i)
}
sal$mv <- v0
sal$me <- cbind(vme,v2me,thme)
sal
}
fvm_function(v)
{
k <- length(v)
c <- seq(0, 0, length = k)
for(i in 1:n) { c <- c + x[i]^exp(v) }
exp((n-1) * v + exp(v) * sum(log(x)) - n * log(c))
}
N_1000
R_1100
mm_metro(fvm,N,R,vgorro,Varv)
#------------
# Diagnostico
ITC_trunc(R/11)
mcal_mm$me
mgra_mcal[(ITC+1):R,1:3]
merg_cbind(cumsum(mgra[,1]),cumsum(mgra[,2]),cumsum(mgra[,3]))/(1:(R-ITC))
win.graph()
par(mfrow=c(2,2))
plot(merg[,1], xlab="Iteracion", ylab="v", type="l")
title("Promedios ergodicos: v")
plot(merg[,2], xlab="Iteracion", ylab="v^2", type="l")
title("Promedios ergodicos: v^2")
plot(merg[,3], xlab="Iteracion", ylab="theta", type="l")
title("Promedios ergodicos: theta")
#------------------------------------------------
# Histograma y densidad final estimada para theta
win.graph()
mtheta_exp(mm$mv)
hist(mtheta,xlab="theta",ylab="p(theta|x)",nclass=20,probability=T)
denth_density(mtheta,width=0.2)
lines(denth,lty=1)
title("Distribucion final de theta")
#-------------------------------------------
# Muestra de lambda via muestreo condicional
genlam_function(w)
{
r <- length(w)
b <- seq(0, 0, length = r)
lambda <- seq(0, 0, length = r)
for(i in 1:r) {
a <- n
b[i] <- sum(x^w[i])
lambda[i] <- rgamma(1, a)/b[i]
}
lambda
}
mlamb_genlam(mtheta)
#-------------------------------------------
# Media y varianza finales de theta y lambda
c(mean(mtheta),var(mtheta))
>
> [1] 0.83497586 0.01428637
>
c(mean(mlamb),var(mlamb))
>
> [1] 0.0402371634 0.0004886204
>
#--------------------------------------------------
#Densidad predictiva p(y|x) via Metropolis-Hastings
weibull_function(x, th, l)
{
l * th * x^(th - 1) * exp( - l * x^th)
}
predic_function(y)
{
suma <- 0
{
for(i in 1:N)
suma <- suma + weibull(y, mtheta[i], mlamb[i])
}
promedio <- suma/N
promedio
}
#--------
# Grafica
grafsobre_function(f, a, b, np,ltype)
{
x <- seq(a, b, length = np)
tabula <- x
for(i in 1:np) {tabula[i] <- f(x[i])}
lines(x, tabula, lty = ltype)
}
grafica(predic,0,500,100,"Densidad predictiva: Monte Carlo")
#----------------------------------------
# Compara las aproximacion de Laplace con
# la de Metropolis-Hastings
grafica(py,0,500,100,"Densidad predictiva")
grafsobre(predic,0,500,100,2)
legend(locator(1),
legend=c("Aproximacion de Laplace","Aproximacion de Monte Carlo"),
lty=c(1,2),cex=0.65,bty="n")