Constants

seed <- 2023
prob_arm1 <- 0.333

library(bssd)

Generating priors

Small historical dataset

df <- read.csv("small_historic.csv")

map_small <- stan_map_patient_data(
  y = df$y, trial = df$trial, time = df$time,
  a_alpha = 1 / 3, a_beta = 1 / 3,
  b_alpha = 5, b_beta = 5,
  refresh = 0, seed = seed
)
map_small
## Prob(Event): Mean 0.168 (HPDI 0.079, 0.253)
## MAP Prob(Event): Mean 0.168 (HPDI 0.038, 0.299)
map_small$fit
## Inference for Stan model: map_prior_patient_data.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##                 mean se_mean    sd   2.5%    25%    50%    75%  97.5% n_eff
## a               0.17    0.00  0.04   0.09   0.14   0.17   0.20   0.26  1448
## b               0.51    0.00  0.15   0.24   0.41   0.51   0.62   0.79  2155
## pi_h[1]         0.15    0.00  0.06   0.05   0.11   0.14   0.19   0.28  2058
## pi_h[2]         0.20    0.00  0.06   0.11   0.16   0.20   0.24   0.33  2239
## pi_h[3]         0.15    0.00  0.05   0.06   0.11   0.15   0.18   0.27  2284
## pi_h[4]         0.15    0.00  0.05   0.06   0.12   0.15   0.19   0.26  2266
## pi_h[5]         0.17    0.00  0.06   0.08   0.13   0.17   0.21   0.30  2345
## pi_star         0.17    0.00  0.07   0.05   0.12   0.16   0.21   0.32  2162
## pi_star_alpha   8.62    0.08  3.31   3.18   6.15   8.28  10.71  15.98  1528
## pi_star_beta   42.81    0.26 12.35  20.12  33.84  42.63  51.40  66.81  2269
## avg             0.17    0.00  0.04   0.09   0.14   0.17   0.20   0.26  1448
## ess            51.43    0.31 14.51  24.49  40.97  51.46  61.73  78.65  2155
## lp__          -52.57    0.08  2.37 -58.23 -53.83 -52.17 -50.85 -49.18   965
##               Rhat
## a                1
## b                1
## pi_h[1]          1
## pi_h[2]          1
## pi_h[3]          1
## pi_h[4]          1
## pi_h[5]          1
## pi_star          1
## pi_star_alpha    1
## pi_star_beta     1
## avg              1
## ess              1
## lp__             1
## 
## Samples were drawn using NUTS(diag_e) at Wed Nov 15 12:39:23 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
mix_small <- fit_mixture(map_small)
mix_small
## EM for Beta Mixture Model
## Log-Likelihood = 5124.374
## 
## Univariate beta mixture
## Mixture Components:
##   comp1     comp2    
## w  0.579896  0.420104
## a  6.820920  3.890079
## b 29.176607 24.198249

Large historical dataset

df <- read.csv("large_historic.csv")

map_large <- stan_map_patient_data(
  y = df$y, trial = df$trial, time = df$time,
  a_alpha = 1 / 3, a_beta = 1 / 3,
  b_alpha = 5, b_beta = 5,
  refresh = 0, seed = seed
)
map_large
## Prob(Event): Mean 0.132 (HPDI 0.117, 0.147)
## MAP Prob(Event): Mean 0.132 (HPDI 0.099, 0.163)
map_large$fit
## Inference for Stan model: map_prior_patient_data.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##                  mean se_mean     sd    2.5%     25%     50%     75%   97.5%
## a                0.13    0.00   0.01    0.12    0.13    0.13    0.14    0.15
## b                0.55    0.00   0.14    0.28    0.45    0.55    0.64    0.81
## pi_h[1]          0.13    0.00   0.01    0.11    0.13    0.13    0.14    0.16
## pi_h[2]          0.13    0.00   0.01    0.10    0.12    0.13    0.13    0.15
## pi_h[3]          0.13    0.00   0.01    0.10    0.12    0.13    0.13    0.15
## pi_h[4]          0.14    0.00   0.01    0.11    0.13    0.14    0.14    0.16
## pi_h[5]          0.13    0.00   0.01    0.10    0.12    0.12    0.13    0.15
## pi_h[6]          0.14    0.00   0.01    0.11    0.13    0.14    0.15    0.17
## pi_h[7]          0.14    0.00   0.01    0.11    0.13    0.14    0.15    0.16
## pi_h[8]          0.12    0.00   0.01    0.10    0.12    0.12    0.13    0.15
## pi_h[9]          0.13    0.00   0.01    0.11    0.12    0.13    0.14    0.15
## pi_h[10]         0.14    0.00   0.01    0.11    0.13    0.14    0.15    0.17
## pi_star          0.13    0.00   0.02    0.10    0.12    0.13    0.14    0.17
## pi_star_alpha   72.25    0.32  18.34   36.69   59.84   71.99   84.64  109.30
## pi_star_beta   475.40    1.97 117.98  241.88  393.29  476.76  556.90  703.40
## avg              0.13    0.00   0.01    0.12    0.13    0.13    0.14    0.15
## ess            547.66    2.26 135.77  278.93  453.45  549.21  641.56  811.34
## lp__          -592.31    0.07   2.86 -599.19 -593.98 -591.87 -590.25 -587.99
##               n_eff Rhat
## a              1486    1
## b              3601    1
## pi_h[1]        3422    1
## pi_h[2]        3225    1
## pi_h[3]        3977    1
## pi_h[4]        3515    1
## pi_h[5]        3762    1
## pi_h[6]        2903    1
## pi_h[7]        3927    1
## pi_h[8]        3640    1
## pi_h[9]        3121    1
## pi_h[10]       3031    1
## pi_star        3005    1
## pi_star_alpha  3334    1
## pi_star_beta   3604    1
## avg            1486    1
## ess            3601    1
## lp__           1587    1
## 
## Samples were drawn using NUTS(diag_e) at Wed Nov 15 12:39:32 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
mix_large <- fit_mixture(map_large)
mix_large
## EM for Beta Mixture Model
## Log-Likelihood = 10647.76
## 
## Univariate beta mixture
## Mixture Components:
##   comp1    
## w   1.00000
## a  52.44163
## b 344.85923

Analyses of ongoing trials

The prior on the active arm (arm 2) is constant throughoit:

w2 <- as.array(c(1))
pi2_alpha <- as.array(c(0.3))
pi2_beta <- as.array(c(0.3))

Small current dataset

df <- read.csv("small_current.csv")

Using small historical dataset prior

w1 <- mix_small["w",]
pi1_alpha <- mix_small["a",]
pi1_beta <- mix_small["b",]

Blinded analysis

fit <- stan_bssd(y = df$y, time = df$time, prob_arm1 = prob_arm1,
                 w1 = w1, pi1_alpha = pi1_alpha, pi1_beta = pi1_beta,
                 w2 = w2, pi2_alpha = pi2_alpha, pi2_beta = pi2_beta)
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 2.9e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.29 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.185 seconds (Warm-up)
## Chain 1:                0.32 seconds (Sampling)
## Chain 1:                0.505 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 2.8e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.28 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.176 seconds (Warm-up)
## Chain 2:                0.162 seconds (Sampling)
## Chain 2:                0.338 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 2.6e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.26 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.159 seconds (Warm-up)
## Chain 3:                0.153 seconds (Sampling)
## Chain 3:                0.312 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 2.5e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.25 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.153 seconds (Warm-up)
## Chain 4:                0.167 seconds (Sampling)
## Chain 4:                0.32 seconds (Total)
## Chain 4:
fit
## Prob(Event) in arm 1: 0.175 
## Prob(Event) in arm 2: 0.203 
## Prob(Event rate in arm 2 > arm 1): 0.59625
fit$fit
## Inference for Stan model: bssd.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##         mean se_mean   sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## mu[1]  -1.75    0.02 0.51  -2.94  -2.04  -1.68  -1.38  -0.90  1090    1
## mu[2]  -1.57    0.02 0.53  -2.72  -1.79  -1.49  -1.24  -0.84  1061    1
## pi1     0.17    0.00 0.07   0.05   0.12   0.17   0.22   0.33  1233    1
## pi2     0.20    0.00 0.07   0.06   0.15   0.20   0.25   0.35  1726    1
## lp__  -50.35    0.04 1.13 -53.39 -50.78 -50.00 -49.56 -49.25   886    1
## 
## Samples were drawn using NUTS(diag_e) at Wed Nov 15 12:39:33 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).

Unblinded analysis

fit <- stan_bssd(y = df$y, time = df$time, tmt = df$tmt,
                 w1 = w1, pi1_alpha = pi1_alpha, pi1_beta = pi1_beta,
                 w2 = w2, pi2_alpha = pi2_alpha, pi2_beta = pi2_beta)
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1.2e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.12 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.045 seconds (Warm-up)
## Chain 1:                0.049 seconds (Sampling)
## Chain 1:                0.094 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 1e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.046 seconds (Warm-up)
## Chain 2:                0.045 seconds (Sampling)
## Chain 2:                0.091 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 1.1e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.045 seconds (Warm-up)
## Chain 3:                0.04 seconds (Sampling)
## Chain 3:                0.085 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 9e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.044 seconds (Warm-up)
## Chain 4:                0.048 seconds (Sampling)
## Chain 4:                0.092 seconds (Total)
## Chain 4:
fit
## Prob(Event) in arm 1: 0.178 
## Prob(Event) in arm 2: 0.205 
## Prob(Event rate in arm 2 > arm 1): 0.6545
fit$fit
## Inference for Stan model: bssd_unblinded.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##         mean se_mean   sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## mu[1]  -1.67    0.01 0.31  -2.34  -1.87  -1.65  -1.45  -1.11  2944    1
## mu[2]  -1.50    0.01 0.29  -2.10  -1.70  -1.49  -1.30  -0.98  3152    1
## pi1     0.18    0.00 0.05   0.09   0.14   0.17   0.21   0.28  3273    1
## pi2     0.21    0.00 0.05   0.12   0.17   0.20   0.24   0.31  3297    1
## lp__  -50.23    0.02 0.98 -52.88 -50.64 -49.94 -49.52 -49.23  1703    1
## 
## Samples were drawn using NUTS(diag_e) at Wed Nov 15 12:39:34 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).

Using large historical dataset prior

w1 <- mix_large["w",]
pi1_alpha <- mix_large["a",]
pi1_beta <- mix_large["b",]

Blinded analysis

fit <- stan_bssd(y = df$y, time = df$time, prob_arm1 = prob_arm1,
                 w1 = w1, pi1_alpha = pi1_alpha, pi1_beta = pi1_beta,
                 w2 = w2, pi2_alpha = pi2_alpha, pi2_beta = pi2_beta)
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 2.8e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.28 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.13 seconds (Warm-up)
## Chain 1:                0.14 seconds (Sampling)
## Chain 1:                0.27 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 2.7e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.27 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.132 seconds (Warm-up)
## Chain 2:                0.123 seconds (Sampling)
## Chain 2:                0.255 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 2.6e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.26 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.127 seconds (Warm-up)
## Chain 3:                0.122 seconds (Sampling)
## Chain 3:                0.249 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 2.7e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.27 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.126 seconds (Warm-up)
## Chain 4:                0.103 seconds (Sampling)
## Chain 4:                0.229 seconds (Total)
## Chain 4:
fit
## Prob(Event) in arm 1: 0.132 
## Prob(Event) in arm 2: 0.226 
## Prob(Event rate in arm 2 > arm 1): 0.923
fit$fit
## Inference for Stan model: bssd.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##         mean se_mean   sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## mu[1]  -1.96    0.00 0.14  -2.24  -2.05  -1.96  -1.87  -1.70  2691    1
## mu[2]  -1.40    0.01 0.33  -2.14  -1.60  -1.38  -1.17  -0.81  2646    1
## pi1     0.13    0.00 0.02   0.10   0.12   0.13   0.14   0.17  2974    1
## pi2     0.23    0.00 0.06   0.11   0.18   0.22   0.27   0.36  3067    1
## lp__  -49.01    0.02 1.01 -51.67 -49.41 -48.69 -48.29 -48.00  1744    1
## 
## Samples were drawn using NUTS(diag_e) at Wed Nov 15 12:39:35 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).

Unblinded analysis

fit <- stan_bssd(y = df$y, time = df$time, tmt = df$tmt,
                 w1 = w1, pi1_alpha = pi1_alpha, pi1_beta = pi1_beta,
                 w2 = w2, pi2_alpha = pi2_alpha, pi2_beta = pi2_beta)
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 1).
## Chain 1: 
## Chain 1: Gradient evaluation took 1e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.1 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.047 seconds (Warm-up)
## Chain 1:                0.047 seconds (Sampling)
## Chain 1:                0.094 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 9e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.045 seconds (Warm-up)
## Chain 2:                0.045 seconds (Sampling)
## Chain 2:                0.09 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 9e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.045 seconds (Warm-up)
## Chain 3:                0.044 seconds (Sampling)
## Chain 3:                0.089 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 9e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.044 seconds (Warm-up)
## Chain 4:                0.041 seconds (Sampling)
## Chain 4:                0.085 seconds (Total)
## Chain 4:
fit
## Prob(Event) in arm 1: 0.136 
## Prob(Event) in arm 2: 0.204 
## Prob(Event rate in arm 2 > arm 1): 0.90175
fit$fit
## Inference for Stan model: bssd_unblinded.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##         mean se_mean   sd   2.5%    25%    50%    75%  97.5% n_eff Rhat
## mu[1]  -1.93    0.00 0.13  -2.19  -2.02  -1.93  -1.84  -1.67  3664    1
## mu[2]  -1.51    0.01 0.30  -2.13  -1.69  -1.50  -1.30  -0.96  2802    1
## pi1     0.14    0.00 0.02   0.11   0.12   0.14   0.15   0.17  3771    1
## pi2     0.20    0.00 0.05   0.11   0.17   0.20   0.24   0.32  2880    1
## lp__  -49.33    0.02 1.02 -52.09 -49.74 -49.01 -48.61 -48.32  1930    1
## 
## Samples were drawn using NUTS(diag_e) at Wed Nov 15 12:39:35 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).

Large current dataset

df <- read.csv("large_current.csv")

Using small historical dataset prior

w1 <- mix_small["w",]
pi1_alpha <- mix_small["a",]
pi1_beta <- mix_small["b",]

Blinded analysis

fit <- stan_bssd(y = df$y, time = df$time, prob_arm1 = prob_arm1,
                 w1 = w1, pi1_alpha = pi1_alpha, pi1_beta = pi1_beta,
                 w2 = w2, pi2_alpha = pi2_alpha, pi2_beta = pi2_beta)
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 1).
## Chain 1: Rejecting initial value:
## Chain 1:   Gradient evaluated at the initial value is not finite.
## Chain 1:   Stan can't start sampling from this initial value.
## Chain 1: Rejecting initial value:
## Chain 1:   Gradient evaluated at the initial value is not finite.
## Chain 1:   Stan can't start sampling from this initial value.
## Chain 1: 
## Chain 1: Gradient evaluation took 0.000246 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.46 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 2.562 seconds (Warm-up)
## Chain 1:                2.735 seconds (Sampling)
## Chain 1:                5.297 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 0.000255 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 2.55 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 2.497 seconds (Warm-up)
## Chain 2:                2.409 seconds (Sampling)
## Chain 2:                4.906 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 0.000271 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 2.71 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 2.582 seconds (Warm-up)
## Chain 3:                2.398 seconds (Sampling)
## Chain 3:                4.98 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 0.000258 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 2.58 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 2.524 seconds (Warm-up)
## Chain 4:                2.215 seconds (Sampling)
## Chain 4:                4.739 seconds (Total)
## Chain 4:
fit
## Prob(Event) in arm 1: 0.157 
## Prob(Event) in arm 2: 0.239 
## Prob(Event rate in arm 2 > arm 1): 0.77275
fit$fit
## Inference for Stan model: bssd.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##          mean se_mean   sd    2.5%     25%     50%     75%   97.5% n_eff Rhat
## mu[1]   -1.84    0.02 0.41   -2.49   -2.15   -1.92   -1.56   -0.95   522 1.01
## mu[2]   -1.32    0.01 0.23   -1.77   -1.49   -1.28   -1.14   -0.92   558 1.01
## pi1      0.16    0.00 0.06    0.08    0.11    0.14    0.19    0.32   495 1.01
## pi2      0.24    0.00 0.05    0.16    0.20    0.24    0.27    0.33   581 1.01
## lp__  -558.58    0.03 0.92 -561.00 -558.91 -558.34 -557.95 -557.64  1268 1.00
## 
## Samples were drawn using NUTS(diag_e) at Wed Nov 15 12:39:55 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).

Unblinded analysis

fit <- stan_bssd(y = df$y, time = df$time, tmt = df$tmt,
                 w1 = w1, pi1_alpha = pi1_alpha, pi1_beta = pi1_beta,
                 w2 = w2, pi2_alpha = pi2_alpha, pi2_beta = pi2_beta)
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 1).
## Chain 1: Rejecting initial value:
## Chain 1:   Log probability evaluates to log(0), i.e. negative infinity.
## Chain 1:   Stan can't start sampling from this initial value.
## Chain 1: 
## Chain 1: Gradient evaluation took 8e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.8 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.387 seconds (Warm-up)
## Chain 1:                0.402 seconds (Sampling)
## Chain 1:                0.789 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 2).
## Chain 2: 
## Chain 2: Gradient evaluation took 7.9e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.79 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.383 seconds (Warm-up)
## Chain 2:                0.349 seconds (Sampling)
## Chain 2:                0.732 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 8.7e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.87 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.387 seconds (Warm-up)
## Chain 3:                0.408 seconds (Sampling)
## Chain 3:                0.795 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 7.7e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.77 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.392 seconds (Warm-up)
## Chain 4:                0.37 seconds (Sampling)
## Chain 4:                0.762 seconds (Total)
## Chain 4:
fit
## Prob(Event) in arm 1: 0.148 
## Prob(Event) in arm 2: 0.229 
## Prob(Event rate in arm 2 > arm 1): 1
fit$fit
## Inference for Stan model: bssd_unblinded.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##          mean se_mean   sd    2.5%     25%     50%     75%   97.5% n_eff Rhat
## mu[1]   -1.83    0.00 0.07   -1.99   -1.88   -1.83   -1.79   -1.69  3643    1
## mu[2]   -1.35    0.00 0.05   -1.44   -1.38   -1.35   -1.31   -1.25  3502    1
## pi1      0.15    0.00 0.01    0.13    0.14    0.15    0.15    0.17  3661    1
## pi2      0.23    0.00 0.01    0.21    0.22    0.23    0.24    0.25  3476    1
## lp__  -545.34    0.02 0.94 -547.85 -545.70 -545.04 -544.68 -544.45  1951    1
## 
## Samples were drawn using NUTS(diag_e) at Wed Nov 15 12:39:58 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).

Using large historical dataset prior

w1 <- mix_large["w",]
pi1_alpha <- mix_large["a",]
pi1_beta <- mix_large["b",]

Blinded analysis

fit <- stan_bssd(y = df$y, time = df$time, prob_arm1 = prob_arm1,
                 w1 = w1, pi1_alpha = pi1_alpha, pi1_beta = pi1_beta,
                 w2 = w2, pi2_alpha = pi2_alpha, pi2_beta = pi2_beta)
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 1).
## Chain 1: Rejecting initial value:
## Chain 1:   Gradient evaluated at the initial value is not finite.
## Chain 1:   Stan can't start sampling from this initial value.
## Chain 1: 
## Chain 1: Gradient evaluation took 0.000246 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 2.46 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 1.569 seconds (Warm-up)
## Chain 1:                1.781 seconds (Sampling)
## Chain 1:                3.35 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 2).
## Chain 2: Rejecting initial value:
## Chain 2:   Log probability evaluates to log(0), i.e. negative infinity.
## Chain 2:   Stan can't start sampling from this initial value.
## Chain 2: Rejecting initial value:
## Chain 2:   Gradient evaluated at the initial value is not finite.
## Chain 2:   Stan can't start sampling from this initial value.
## Chain 2: 
## Chain 2: Gradient evaluation took 0.000264 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 2.64 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 1.608 seconds (Warm-up)
## Chain 2:                1.499 seconds (Sampling)
## Chain 2:                3.107 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 3).
## Chain 3: Rejecting initial value:
## Chain 3:   Gradient evaluated at the initial value is not finite.
## Chain 3:   Stan can't start sampling from this initial value.
## Chain 3: Rejecting initial value:
## Chain 3:   Gradient evaluated at the initial value is not finite.
## Chain 3:   Stan can't start sampling from this initial value.
## Chain 3: Rejecting initial value:
## Chain 3:   Gradient evaluated at the initial value is not finite.
## Chain 3:   Stan can't start sampling from this initial value.
## Chain 3: 
## Chain 3: Gradient evaluation took 0.000254 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 2.54 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 1.573 seconds (Warm-up)
## Chain 3:                1.535 seconds (Sampling)
## Chain 3:                3.108 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'bssd' NOW (CHAIN 4).
## Chain 4: 
## Chain 4: Gradient evaluation took 0.000255 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 2.55 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 1.56 seconds (Warm-up)
## Chain 4:                1.527 seconds (Sampling)
## Chain 4:                3.087 seconds (Total)
## Chain 4:
fit
## Prob(Event) in arm 1: 0.127 
## Prob(Event) in arm 2: 0.255 
## Prob(Event rate in arm 2 > arm 1): 1
fit$fit
## Inference for Stan model: bssd.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##          mean se_mean   sd    2.5%     25%     50%     75%   97.5% n_eff Rhat
## mu[1]   -2.00    0.00 0.13   -2.26   -2.09   -2.00   -1.91   -1.74  1469    1
## mu[2]   -1.23    0.00 0.11   -1.43   -1.30   -1.23   -1.16   -1.02  1447    1
## pi1      0.13    0.00 0.02    0.10    0.12    0.13    0.14    0.16  1443    1
## pi2      0.25    0.00 0.02    0.21    0.24    0.25    0.27    0.30  1459    1
## lp__  -557.14    0.03 0.97 -559.77 -557.49 -556.87 -556.45 -556.20  1457    1
## 
## Samples were drawn using NUTS(diag_e) at Wed Nov 15 12:40:11 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).

Unblinded analysis

fit <- stan_bssd(y = df$y, time = df$time, tmt = df$tmt,
                 w1 = w1, pi1_alpha = pi1_alpha, pi1_beta = pi1_beta,
                 w2 = w2, pi2_alpha = pi2_alpha, pi2_beta = pi2_beta)
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 1).
## Chain 1: Rejecting initial value:
## Chain 1:   Log probability evaluates to log(0), i.e. negative infinity.
## Chain 1:   Stan can't start sampling from this initial value.
## Chain 1: 
## Chain 1: Gradient evaluation took 8e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.8 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1: 
## Chain 1: 
## Chain 1: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 1: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 1: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 1: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 1: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 1: 
## Chain 1:  Elapsed Time: 0.377 seconds (Warm-up)
## Chain 1:                0.421 seconds (Sampling)
## Chain 1:                0.798 seconds (Total)
## Chain 1: 
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 2).
## Chain 2: Rejecting initial value:
## Chain 2:   Log probability evaluates to log(0), i.e. negative infinity.
## Chain 2:   Stan can't start sampling from this initial value.
## Chain 2: 
## Chain 2: Gradient evaluation took 8e-05 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.8 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2: 
## Chain 2: 
## Chain 2: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 2: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 2: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 2: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 2: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 2: 
## Chain 2:  Elapsed Time: 0.38 seconds (Warm-up)
## Chain 2:                0.377 seconds (Sampling)
## Chain 2:                0.757 seconds (Total)
## Chain 2: 
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 3).
## Chain 3: 
## Chain 3: Gradient evaluation took 7.9e-05 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.79 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3: 
## Chain 3: 
## Chain 3: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 3: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 3: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 3: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 3: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 3: 
## Chain 3:  Elapsed Time: 0.394 seconds (Warm-up)
## Chain 3:                0.374 seconds (Sampling)
## Chain 3:                0.768 seconds (Total)
## Chain 3: 
## 
## SAMPLING FOR MODEL 'bssd_unblinded' NOW (CHAIN 4).
## Chain 4: Rejecting initial value:
## Chain 4:   Log probability evaluates to log(0), i.e. negative infinity.
## Chain 4:   Stan can't start sampling from this initial value.
## Chain 4: 
## Chain 4: Gradient evaluation took 8e-05 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.8 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4: 
## Chain 4: 
## Chain 4: Iteration:    1 / 2000 [  0%]  (Warmup)
## Chain 4: Iteration:  200 / 2000 [ 10%]  (Warmup)
## Chain 4: Iteration:  400 / 2000 [ 20%]  (Warmup)
## Chain 4: Iteration:  600 / 2000 [ 30%]  (Warmup)
## Chain 4: Iteration:  800 / 2000 [ 40%]  (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%]  (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%]  (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%]  (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%]  (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%]  (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%]  (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%]  (Sampling)
## Chain 4: 
## Chain 4:  Elapsed Time: 0.379 seconds (Warm-up)
## Chain 4:                0.398 seconds (Sampling)
## Chain 4:                0.777 seconds (Total)
## Chain 4:
fit
## Prob(Event) in arm 1: 0.143 
## Prob(Event) in arm 2: 0.229 
## Prob(Event rate in arm 2 > arm 1): 1
fit$fit
## Inference for Stan model: bssd_unblinded.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##          mean se_mean   sd    2.5%     25%     50%     75%   97.5% n_eff Rhat
## mu[1]   -1.87    0.00 0.07   -2.01   -1.92   -1.87   -1.82   -1.74  3590    1
## mu[2]   -1.35    0.00 0.05   -1.45   -1.38   -1.35   -1.31   -1.25  3022    1
## pi1      0.14    0.00 0.01    0.13    0.14    0.14    0.15    0.16  3666    1
## pi2      0.23    0.00 0.01    0.21    0.22    0.23    0.24    0.25  3023    1
## lp__  -544.37    0.02 0.98 -547.05 -544.72 -544.09 -543.69 -543.43  1651    1
## 
## Samples were drawn using NUTS(diag_e) at Wed Nov 15 12:40:14 2023.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).