In a fixed power prior model, the model is set up as:
Suppose that the event follows a Bernoulli distribution with probability p_i, what I want is to raise the Bernoulli likelihood to the power of w in jags:
mod1<-function(){
for(i in 1 : N) {
y[i] ~ dbern(p[i])^w #<-------------------
logit(p[i]) <- a[group[i]] + t[group[i]]*d[i]
}
for (j in 1:3) {
a[j] ~ dnorm(mu.a, pow(sd.a,-2))
t[j] ~ dnorm(mu.t, pow(sd.t,-2))
}
mu.a~dnorm(0,0.01)
mu.t~dnorm(0,0.01)
sd.a ~ dunif(0,10)
sd.t ~ dunif(0,10)
}
However JAGS does not recognize such code.
As an alternative, I wrote the following STAN code:
mod1 <- '
data {
int<lower = 1> N;
real w;
int y[N];
vector[N] d;
int group[N];
}
parameters {
vector[3] a;
vector<lower=0>[3] t;
real mua;
real mut;
real sigmaa;
real sigmat;
}
model {
real p[N];
//prior
mua~normal(0,10);
mut~normal(0,10);
sigmaa ~ uniform(0,10);
sigmat ~ uniform(0,10);
for (j in 1:3) {
a[j] ~ normal(mua, sigmaa);
t[j] ~ normal(mut, sigmat);
}
//likelihood
for (i in 1:N) {
p[i]=inv_logit(a[group[i]] + t[group[i]]*d[i]);
target += bernoulli_lpmf(y[i]|(p[i]))*w;
}
}
'
which works, but I'd like to use JAGS (R2jags) for this project and was wondering if there is a JAGS equivalent for the above STAN code. THanks!
Here is the sample data if you want to test the code:
y <- rbinom(160, 1, 0.3)
d <- runif(160, 0, 41)
group <- sample(1:3, 160, replace = TRUE)
w <- rep(0.5, 160)
N=160
mydata <- data.frame(y, d, group, x4,N)
I would have thought that using the ones or zeros trick would do the trick. Here's an example with the zeros trick:
mod1j<-function(){
C <- 0
for(i in 1 : N) {
zeros[i] ~ dpois(phi[i])
phi[i] <- -LL[i] + C
LL[i] <- (log(p[i])*y[i] + log(1-p[i])*(1-y[i]))*w[i]
logit(p[i]) <- a[group[i]] + t[group[i]]*d[i]
}
ll <- sum(LL[])
for (j in 1:3) {
a[j] ~ dnorm(mu.a, pow(sd.a,-2))
t[j] ~ dnorm(mu.t, pow(sd.t,-2))
}
mu.a~dnorm(0,0.01)
mu.t~dnorm(0,0.01)
sd.a ~ dunif(0,10)
sd.t ~ dunif(0,10)
}
mod1 <- '
data {
int<lower = 1> N;
real w[N];
int y[N];
real d[N];
int group[N];
}
parameters {
vector[3] a;
vector[3] t;
real mua;
real mut;
real sigmaa;
real sigmat;
}
model {
real p[N];
//prior
mua~normal(0,10);
mut~normal(0,10);
sigmaa ~ uniform(0,10);
sigmat ~ uniform(0,10);
for (j in 1:3) {
a[j] ~ normal(mua, sigmaa);
t[j] ~ normal(mut, sigmat);
}
//likelihood
for (i in 1:N) {
p[i]=inv_logit(a[group[i]] + t[group[i]]*d[i]);
target += bernoulli_lpmf(y[i]|(p[i]))*w[i];
}
}
'
y <- rbinom(160, 1, 0.3)
d <- runif(160, 0, 41)
group <- sample(1:3, 160, replace = TRUE)
w <- runif(160, .5, 2)
N=160
mydata <- list(y=y, d=c(scale(d)), group=group, w=w, N=N, zeros=rep(0, N))
library(rstan)
smod <- stan(model_code= mod1, data=mydata, chains=2, iter = 25000, warmup=10000)
#> Warning: There were 8607 divergent transitions after warmup. See
#> https://mc-stan.org/misc/warnings.html#divergent-transitions-after-warmup
#> to find out why this is a problem and how to eliminate them.
#> Warning: There were 11 transitions after warmup that exceeded the maximum treedepth. Increase max_treedepth above 10. See
#> https://mc-stan.org/misc/warnings.html#maximum-treedepth-exceeded
#> Warning: Examine the pairs() plot to diagnose sampling problems
library(R2jags)
jmod <- jags(mydata, inits=NULL, model.file = mod1j, parameters.to.save = c("a", "t", "ll", "mu.a", "mu.t", "sd.a", "sd.t"), n.chains=2, n.iter=50000, n.burnin=10000, n.thin = 4)
summary(smod)$summary
#> mean se_mean sd 2.5% 97.5%
#> a[1] -0.9485418 0.004391921 0.2616977 -1.45203977 -0.42740087
#> a[2] -0.9229228 0.004768607 0.2498671 -1.39857013 -0.40886121
#> a[3] -1.3801613 0.006318897 0.2987109 -2.02100962 -0.85652760
#> t[1] -0.3540932 0.004473947 0.2395895 -0.82275651 0.12607274
#> t[2] -0.3760353 0.004234657 0.2339069 -0.84264376 0.08154950
#> t[3] -0.4288344 0.004414164 0.2519173 -0.96762765 0.03517404
#> mua -1.0691230 0.017084370 0.9030125 -2.82320530 0.82897044
#> mut -0.3732147 0.014114388 0.7314059 -1.58495700 1.11586195
#> sigmaa 1.0581223 0.036266564 1.3500882 0.04529007 5.25790545
#> sigmat 0.7294629 0.042495890 1.2622050 0.01482299 4.49297131
#> lp__ -115.2883229 0.201683196 5.3385894 -125.86118296 -104.86900546
jmod
#> mu.vect sd.vect 2.5% 97.5%
#> a[1] -0.952 0.260 -1.454 -0.434
#> a[2] -0.925 0.250 -1.404 -0.428
#> a[3] -1.371 0.301 -1.999 -0.849
#> t[1] -0.348 0.237 -0.817 0.119
#> t[2] -0.367 0.230 -0.829 0.085
#> t[3] -0.423 0.251 -0.966 0.043
#> mu.a -1.076 1.010 -3.108 0.868
#> mu.t -0.383 0.780 -1.744 1.001
#> sd.a 1.045 1.405 0.032 5.523
#> sd.t 0.707 1.211 0.011 4.630
#> ll -118.541 1.504 -122.250 -116.472
#> deviance 237.082 3.008 232.945 244.501
Created on 2023-03-28 with reprex v2.0.2
The models look pretty comparable. Note, that I took the <lower=0>
constraint away from t
just to make the JAGS and Stan models look otherwise equivalent. The results look pretty comparable. I also generated unequal weights.