You are on page 1of 14

>

> sink("gev.txt")

> cat("

+ model {

+ # Verossimilhança

+ for (i in 1:n) {

+ y[i] ~ dgev(mu, sigma,eta)

+ }

+ # Prior

+ # dnorm(media, precisao)

+ mu ~ dnorm(986,0.0003)

+ sigma ~ dnorm(294,0.0004)

+ eta ~ dnorm(0.29,48)

+ yp3<-mu+((sigma/eta)*(pow(-log(1-(1/3)),-eta)-1))

+ yp6<-mu+((sigma/eta)*(pow(-log(1-(1/6)),-eta)-1))

+ yp9<-mu+((sigma/eta)*(pow(-log(1-(1/9)),-eta)-1))

+ yp18<-mu+((sigma/eta)*(pow(-log(1-(1/18)),-eta)-1))

+ yp24<-mu+((sigma/eta)*(pow(-log(1-(1/24)),-eta)-1))

+ }

+ ",fill=TRUE)

> sink()

> dados_bug<- list(y=max,n=length(max))

> dados_bug

$y
[1] 1946 1372 1049 731 807 945 899 1222 807 937 661 1963 811 1131 1171

[16] 1302 1589 1245 3328 1124 1020 1121 1021 849 670 745 1752 1560 922

$n

[1] 29

> inits <- function(){ list(mu=1200, sigma =400,eta=0.02)}

> params <- c("mu","sigma","eta")

>

> nc = 3 #Numero de cadeias

> ni = 120000 #Tamanho da cadeira

> nb = 30000 #Numero de simulação que serão descartadas

> nt = 10

>

> # Inicie o Amostrador

>

>

> gev.inf = bugs(data = dados_bug, inits = inits,

+ parameters =c(params,"yp3","yp6","yp9","yp18","yp24"),

+ model = "gev.txt",

+ n.thin = nt, n.chains = nc,

+ n.burnin = nb, n.iter = ni, codaPkg=FALSE, debug=T)

Error in bugs.run(n.burnin, OpenBUGS.pgm, debug = debug, WINE = WINE, :

Look at the log file in C:/Users/Usuario/AppData/Local/Temp/RtmpM7EFY4 and

try again with 'debug=TRUE' to figure out what went wrong within OpenBUGS.

> inits <- function(){ list(mu=2200, sigma =600,eta=0.2)}

> params <- c("mu","sigma","eta")

>

> nc = 3 #Numero de cadeias

> ni = 120000 #Tamanho da cadeira

> nb = 30000 #Numero de simulação que serão descartadas


> nt = 10

>

> # Inicie o Amostrador

>

> gev.inf = bugs(data = dados_bug, inits = inits,

+ parameters =c(params,"yp3","yp6","yp9","yp18","yp24"),

+ model = "gev.txt",

+ n.thin = nt, n.chains = nc,

+ n.burnin = nb, n.iter = ni, codaPkg=FALSE, debug=T)

>

>

>

> post_g.inf<-as.mcmc(gev.inf$sims.matrix[,]) # salva a saída como cadeia mcmc

>

> geweke.diag(post_g.inf)

Fraction in 1st window = 0.1

Fraction in 2nd window = 0.5

mu sigma eta yp3 yp6 yp9 yp18 yp24

-1.3864 -1.3547 -0.4727 -1.6021 -1.6206 -1.5851 -1.4836 -1.4350

deviance

-0.5801

> raftery.diag(post_g.inf)

Quantile (q) = 0.025

Accuracy (r) = +/- 0.005

Probability (s) = 0.95


Burn-in Total Lower bound Dependence

(M) (N) (Nmin) factor (I)

mu 2 3778 3746 1.01

sigma 2 3756 3746 1.00

eta 2 3761 3746 1.00

yp3 1 3881 3746 1.04

yp6 1 3799 3746 1.01

yp9 1 3800 3746 1.01

yp18 2 3787 3746 1.01

yp24 2 3765 3746 1.01

deviance 1 5347 3746 1.43

> heidel.diag(post_g.inf)

Stationarity start p-value

test iteration

mu passed 1 0.763

sigma passed 1 0.595

eta passed 1 0.916

yp3 passed 1 0.832

yp6 passed 1 0.708

yp9 passed 1 0.664

yp18 passed 1 0.633

yp24 passed 1 0.639

deviance passed 1 0.977

Halfwidth Mean Halfwidth

test

mu passed 994.573 0.198793

sigma passed 317.860 0.173431

eta passed 0.297 0.000423


yp3 passed 1324.206 0.356752

yp6 passed 1703.281 0.617436

yp9 passed 1954.944 0.848401

yp18 passed 2458.803 1.440030

yp24 passed 2701.734 1.767420

deviance passed 427.376 0.008122

>

> HPD.inf=HPDinterval(post_g.inf)

> HPD.inf

lower upper

mu 8.752e+02 1083.0000

sigma 2.193e+02 410.4000

eta 7.961e-02 0.5204

yp3 1.112e+03 1513.0000

yp6 1.381e+03 2037.0000

yp9 1.546e+03 2412.0000

yp18 1.801e+03 3215.0000

yp24 1.909e+03 3636.0000

deviance 4.248e+02 431.4000

attr(,"Probability")

[1] 0.95

>

> resumo1=print(gev.inf,dig=3) # salva a resumo da cadeia mcmc com 3 dígitos

Inference for Bugs model at "gev.txt",

Current: 3 chains, each with 120000 iterations (first 30000 discarded), n.thin = 10

Cumulative: n.sims = 270000 iterations saved

mean sd 2.5% 25% 50% 75% 97.5% Rhat

mu 994.573 52.702 894.000 966.100 995.000 1018.000 1113.000 1.411

sigma 317.860 45.978 232.200 287.500 318.400 341.000 437.700 1.290

eta 0.297 0.113 0.083 0.219 0.294 0.372 0.524 1.004

yp3 1324.206 94.578 1145.000 1275.000 1319.000 1368.000 1571.000 1.376


yp6 1703.281 162.279 1422.000 1599.000 1689.000 1787.000 2102.000 1.247

yp9 1954.944 222.247 1588.000 1803.000 1929.000 2075.000 2485.000 1.178

yp18 2458.803 377.971 1884.000 2190.000 2401.000 2665.000 3358.000 1.098

yp24 2701.734 468.560 2010.000 2368.000 2625.000 2953.000 3827.000 1.077

deviance 427.376 2.153 425.100 426.100 426.900 427.900 433.200 1.121

n.eff

mu 9

sigma 11

eta 650

yp3 9

yp6 12

yp9 16

yp18 25

yp24 31

deviance 35

For each parameter, n.eff is a crude measure of effective sample size,

and Rhat is the potential scale reduction factor (at convergence, Rhat=1).

DIC info (using the rule, pD = Dbar-Dhat)

pD = 1.327 and DIC = 428.700

DIC is an estimate of expected predictive error (lower deviance is better).

>

> #resumo1

> #Preditiva

> ypred1<-resumo1$mean[4:8] #salva as médias do y predito objeto"ypredl"

>

>

> VP1 = as.numeric(ypred1);VP1

[1] 1324.206 1703.281 1954.944 2458.803 2701.734

>
>

> maxf<-dados$Max[30:53]

>

> obs1_1<-max(maxf[1:3])

> obs1_1

[1] 1896

> obs1_2<-max(maxf[1:6])

> obs1_2

[1] 1896

> obs1_3<-max(maxf[1:9])

> obs1_3

[1] 1896

> obs1_4<-max(maxf[1:18])

> obs1_4

[1] 2239

> obs1_5<-max(maxf[1:24])

> obs1_5

[1] 2239

>

> obs<-c(obs1_1,obs1_2,obs1_3,obs1_4,obs1_5)

> obs

[1] 1896 1896 1896 2239 2239

>

> EpGev1= abs((obs-VP1)/obs)

> #round(EpGEVT*100,2)

> ep.inf.1=round(mean(EpGev1)*100,2) ;ep.inf.1

[1] 14.78

>
> sink("gumbel.txt")

> cat("

+ model {

+ # Verossimilhança

+ for (i in 1:n) {

+ y[i] ~ dgumbel(mu, sigma)

+ }

+
+ # Prior

+ # dnorm(media, precisao)

+ mu ~ dnorm(1046,0.0002)

+ sigma ~ dnorm(363,0.0003)

+ yp3 <- mu - (sigma)*log(-log(1-(1/3)))

+ yp6 <- mu - (sigma)*log(-log(1-(1/6)))

+ yp9 <- mu - (sigma)*log(-log(1-(1/9)))

+ yp18 <- mu - (sigma)*log(-log(1-(1/18)))

+ yp24 <- mu - (sigma)*log(-log(1-(1/24)))

+ }

+ ",fill=TRUE)

> sink()

>

>

> dados_bug<- list(y=max,n=length(max))

> dados_bug

$y

[1] 1946 1372 1049 731 807 945 899 1222 807 937 661 1963 811 1131 1171

[16] 1302 1589 1245 3328 1124 1020 1121 1021 849 670 745 1752 1560 922

$n

[1] 29

>

> inits <- function(){list(mu=850, sigma=300)}

> params <- c("mu","sigma")

>

> nc = 2 #Numero de cadeias

> ni = 60000 #Tamanho da cadeira


> nb = 15000 #Numero de simulação que serão descartadas

> nt = 10

>

> # Inicie o Amostrador

>

>

> gumbel.inf = bugs(data = dados_bug, inits = inits,

+ parameters =c(params,"yp3","yp6","yp9","yp18","yp24"),

+ model = "gumbel.txt",

+ n.thin = nt, n.chains = nc,

+ n.burnin = nb, n.iter = ni, codaPkg=FALSE, debug=T)

>

>

>

> post_g.inf<-as.mcmc(gumbel.inf$sims.matrix[,]) # salva a saída como cadeia mcmc

>

>

> geweke.diag(post_g.inf)

Fraction in 1st window = 0.1

Fraction in 2nd window = 0.5

mu sigma yp3 yp6 yp9 yp18 yp24 deviance

-0.02092 -0.70037 -0.39895 -0.54273 -0.57871 -0.61704 -0.63011 -2.48916

> raftery.diag(post_g.inf)

Quantile (q) = 0.025

Accuracy (r) = +/- 0.005

Probability (s) = 0.95


Burn-in Total Lower bound Dependence

(M) (N) (Nmin) factor (I)

mu 1 3754 3746 1.000

sigma 2 3783 3746 1.010

yp3 2 3785 3746 1.010

yp6 2 3735 3746 0.997

yp9 2 3780 3746 1.010

yp18 2 3740 3746 0.998

yp24 1 3760 3746 1.000

deviance 2 9911 3746 2.650

> heidel.diag(post_g.inf)

Stationarity start p-value

test iteration

mu passed 1 0.215

sigma passed 1 0.885

yp3 passed 1 0.558

yp6 passed 1 0.828

yp9 passed 1 0.890

yp18 passed 1 0.934

yp24 passed 1 0.941

deviance passed 1 0.145

Halfwidth Mean Halfwidth

test

mu passed 1018 0.30951

sigma passed 349 0.26206

yp3 passed 1333 0.42332

yp6 passed 1612 0.58861

yp9 passed 1764 0.68917


yp18 passed 2017 0.86672

yp24 passed 2119 0.93948

deviance passed 431 0.00858

>

> HPD.inf=HPDinterval(post_g.inf)

> HPD.inf

lower upper

mu 924.2 1110.0

sigma 270.9 426.7

yp3 1207.0 1460.0

yp6 1437.0 1790.0

yp9 1563.0 1976.0

yp18 1752.0 2269.0

yp24 1834.0 2394.0

deviance 429.9 434.0

attr(,"Probability")

[1] 0.95

>

> resumo1=print(gumbel.inf,dig=3) # salva a resumo da cadeia mcmc com 3 dígitos

Inference for Bugs model at "gumbel.txt",

Current: 2 chains, each with 60000 iterations (first 15000 discarded), n.thin = 10

Cumulative: n.sims = 90000 iterations saved

mean sd 2.5% 25% 50% 75% 97.5% Rhat n.eff

mu 1017.908 47.535 925.4 985.7 1018.0 1050.0 1112.0 1.003 780

sigma 348.930 40.111 275.5 320.8 347.4 375.3 432.2 1.001 6900

yp3 1332.893 65.210 1211.0 1287.0 1331.0 1376.0 1464.0 1.003 910

yp6 1611.782 90.584 1442.0 1548.0 1609.0 1672.0 1797.0 1.002 1300

yp9 1764.238 105.999 1566.0 1690.0 1761.0 1834.0 1981.0 1.002 1500

yp18 2016.519 132.662 1769.0 1924.0 2012.0 2104.0 2288.0 1.002 1800

yp24 2119.428 143.797 1852.0 2019.0 2115.0 2214.0 2414.0 1.002 1900

deviance 431.407 1.314 430.0 430.4 431.1 432.0 434.8 1.001 3500
For each parameter, n.eff is a crude measure of effective sample size,

and Rhat is the potential scale reduction factor (at convergence, Rhat=1).

DIC info (using the rule, pD = Dbar-Dhat)

pD = 1.006 and DIC = 432.400

DIC is an estimate of expected predictive error (lower deviance is better).

>

> #resumo1

> #Preditiva

> ypred1<-resumo1$mean[3:7] #salva as médias do y predito objeto"ypredl"

>

>

> VP1 = as.numeric(ypred1);VP1

[1] 1332.893 1611.782 1764.238 2016.519 2119.428

>

>

> maxf<-dados$Max[30:53]

>

> obs1_1<-max(maxf[1:3])

> obs1_1

[1] 1896

> obs1_2<-max(maxf[1:6])

> obs1_2

[1] 1896

> obs1_3<-max(maxf[1:9])

> obs1_3

[1] 1896

> obs1_4<-max(maxf[1:18])

> obs1_4

[1] 2239
> obs1_5<-max(maxf[1:24])

> obs1_5

[1] 2239

>

> obs<-c(obs1_1,obs1_2,obs1_3,obs1_4,obs1_5)

> obs

[1] 1896 1896 1896 2239 2239

>

> EpG1= abs((obs-VP1)/obs)

> #round(EpGEVT*100,2)

> ep.inf.1=round(mean(EpG1)*100,2) ;ep.inf.1

[1] 13.38

You might also like