Skip to contents

Setup:

Show the code
# Set CRAN mirror
options(repos = c(CRAN = "https://cloud.r-project.org/"))

# Check and install required packages
packages <- c("dplyr", "kableExtra", "pander")
install.packages(setdiff(packages, rownames(installed.packages())))
invisible(lapply(packages, require, character.only = TRUE))
library(rctbayespower)

# Set number of cores and simulations:
n_cores <- parallel::detectCores() - 2
n_sims <- 500

The rctbayespower package uses two types of priors: estimation priors (for Bayesian model fitting) and design priors (for integrating effect size uncertainty). This vignette demonstrates their impact on power analysis through two focused examples.

1 Example 1: Different Estimation Priors

Estimation priors affect how treatment effects are estimated during Bayesian model fitting. Here we compare default priors versus informative priors.

Ancova model with two arms is used to illustrate the impact of different priors on power analysis results. We will compare uninformative and sceptical priors.

Let’s first define the sample size for our analysis:

Show the code
sample_size <- 100

1.1 Analysis of Medium Effect of Cohen’s d = 0.5

1.1.1 Analysis with uninformative priors

Uninformative, locally flat prior on treatment effect: \[d \sim \mathcal{N}(0,100)\]

Show the code
# Create ANCOVA model with uninformative priors
uninformative_model <- build_model_ancova_cont_2arms(
  prior_treatment = brms::set_prior("normal(0, 100)", class = "b"),
  prior_covariate = brms::set_prior("normal(0, 100)", class = "b", coef = "covariate"),
  prior_intercept = brms::set_prior("normal(0, 100)", class = "Intercept"),
  prior_sigma = brms::set_prior("normal(0, 100)", class = "sigma", lb = 0),
  link_sigma = "identity"
)
Compiling the brms model ...
Model compilation done!
Show the code
# Create design
uninformative_design <- build_design(
  model = uninformative_model,
  target_params = "b_arm2",
  n_interim_analyses = 0,
  thresholds_success = 0.3,
  thresholds_futility = 0.1,
  p_sig_success = 0.95,
  p_sig_futility = 0.5
)

# Create conditions
uninformative_conditions <- build_conditions(
  design = uninformative_design,
  condition_values = list(
    n_total = sample_size
  ),
  static_values = list(
    b_arm_treat = 0.5, # Medium effect
    b_covariate = 0 # No covariate effect
  )
)

# Run power analysis
uninformative_analysis <- rctbayespower::power_analysis(
  conditions = uninformative_conditions,
  n_sims = n_sims,
  n_cores = n_cores,
  verbose = FALSE
)
Show the code
res_uninformative <- uninformative_analysis@summarized_results|>
  select(
    n_total,
    median,
    sd,
    prob_success,
    power_success,
    prob_futility,
    power_futility
  ) |>
  dplyr::mutate(Prior = "uninformative", .before = everything())

res_uninformative |> 
  kableExtra::kable(digits = 3, format = "html")
Prior n_total median sd prob_success power_success prob_futility power_futility
uninformative 100 0.509 0.205 0.765 0.262 0.077 0.018

Analysis with Informative Prior

Informative, sceptical prior on treatment effect:

\[d \sim \mathcal{t_3}(0,1)\]

Show the code
# Create ANCOVA model with informative/sceptical priors
informative_model <- build_model_ancova_cont_2arms(
  prior_treatment = brms::set_prior("student_t(3, 0, 2)", class = "b"),
  prior_covariate = brms::set_prior("student_t(3, 0, 2)", class = "b", coef = "covariate"),
  prior_intercept = brms::set_prior("student_t(3, 0, 2)", class = "Intercept"),
  prior_sigma = brms::set_prior("student_t(3, 0, 2)", class = "sigma", lb = 0),
  link_sigma = "identity"
)
Compiling the brms model ...
Model compilation done!
Show the code
# Create design (same as uninformative)
informative_design <- build_design(
  model = informative_model,
  target_params = "b_arm2",
  n_interim_analyses = 0,
  thresholds_success = 0.3,
  thresholds_futility = 0.1,
  p_sig_success = 0.975,
  p_sig_futility = 0.5
)

# Create conditions
informative_conditions <- build_conditions(
  design = informative_design,
  condition_values = list(n_total = sample_size),
  static_values = list(
    b_arm_treat = 0.5, # Medium effect
    b_covariate = 0 # No covariate effect
  )
)

# Run power analysis
informative_analysis <- power_analysis(
  conditions = informative_conditions,
  n_sims = n_sims,
  n_cores = n_cores,
  verbose = FALSE
)
Show the code
res_informative <- informative_analysis@summarized_results|>
  select(
    n_total,
    median,
    sd,
    prob_success,
    power_success,
    prob_futility,
    power_futility
  ) |>
  dplyr::mutate(Prior = "informative", .before = everything())

res_informative |> 
  kableExtra::kable(digits = 3, format = "html")
Prior n_total median sd prob_success power_success prob_futility power_futility
informative 100 0.478 0.203 0.733 0.142 0.095 0.026

Comparing the results of the two analyses:

Show the code
estimation_comparison <- 
  rbind(
    res_uninformative,
    res_informative
  ) 

estimation_comparison|>
  # round the numeric columns for better readability
  kableExtra::kable(digits = 3)
Prior n_total median sd prob_success power_success prob_futility power_futility
uninformative 100 0.509 0.205 0.765 0.262 0.077 0.018
informative 100 0.478 0.203 0.733 0.142 0.095 0.026

1.2 Alpha Error Rate

To evaluate the alpha error rate (false positive rate) for both prior configurations, we need to run the power analysis under the null hypothesis (effect size = 0) and examine how often we incorrectly conclude for success. This tells us how well our Bayesian decision framework controls Type I error. Luckily, the rctbayespower package allows us to reuse the same model and design configurations, only changing the effect size to zero.

Show the code
# Create design (same as uninformative)
alpha_uninformative_design <- build_design(
  model = uninformative_model,
  target_params = "b_arm2",
  n_interim_analyses = 0,
  thresholds_success = 0,
  thresholds_futility = 0,
  p_sig_success = 0.95,
  p_sig_futility = 0.5
)

alpha_uninformative_conditions <- build_conditions(
  design = alpha_uninformative_design,
  condition_values = list(n_total = sample_size),
  static_values = list(
    b_arm_treat = 0, # Null hypothesis: no effect
    b_covariate = 0 # No covariate effect
  )
)

alpha_uninformative <- power_analysis(
  conditions = alpha_uninformative_conditions,
  n_sims = n_sims,
  n_cores = n_cores,
  verbose = FALSE
)
Show the code
alpha_informative_design <- build_design(
  model = informative_model,
  target_params = "b_arm2",
  n_interim_analyses = 0,
  thresholds_success = 0,
  thresholds_futility = 0,
  p_sig_success = 0.95,
  p_sig_futility = 0.5
)

alpha_informative_conditions <- build_conditions(
  design = alpha_informative_design,
  condition_values = list(n_total = sample_size),
  static_values = list(
    b_arm_treat = 0, # Null hypothesis: no effect
    b_covariate = 0 # No covariate effect
  )
)

alpha_informative <- power_analysis(
  conditions = alpha_informative_conditions,
  n_sims = n_sims,
  n_cores = n_cores,
  verbose = TRUE
)

=== Power Analysis ===
Conditions:
# A tibble: 1 × 2
  id_cond n_total
    <int>   <dbl>
1       1     100

Conditions to test: 1
Simulations per condition: 500
Total simulations: 500 
Functions exported from package namespace

Combined results dimensions: 500 18
Column names: parameter, threshold_success, threshold_futility, success_prob, futility_prob, power_success, power_futility, median, mad, mean, sd, rhat, ess_bulk, ess_tail, id_sim, id_cond, converged, error

Total analysis time: 1.46 minutes

Comparing the alpha error rates:

Show the code
res_alpha <- 
  rbind(
    alpha_uninformative@summarized_results |>
      select(
        n_total,
        prob_success,
        power_success
      ) |>
      dplyr::mutate(Prior = "uninformative", .before = everything()),
    alpha_informative@summarized_results |>
      select(
        n_total,
        prob_success,
        power_success,
      ) |>
      dplyr::mutate(Prior = "informative", .before = everything())
  ) |> 
  dplyr::rename(
    
    alpha = power_success,
    "p(d > 0)" = prob_success,
  )

res_alpha |> 
  kableExtra::kable(digits = 3, format = "html")
Prior n_total p(d > 0) alpha
uninformative 100 0.501 0.052
informative 100 0.525 0.050

Using a sceptical prior reduces the probability of falsely concluding success when there is no true effect, thus controlling the alpha error rate more effectively than the uninformative priors.

2 Example 2: Design Priors

Coming soon …


Show the code
pander::pander(sessionInfo())

R version 4.5.1 (2025-06-13 ucrt)

Platform: x86_64-w64-mingw32/x64

locale: LC_COLLATE=German_Germany.utf8, LC_CTYPE=German_Germany.utf8, LC_MONETARY=German_Germany.utf8, LC_NUMERIC=C and LC_TIME=German_Germany.utf8

attached base packages: stats, graphics, grDevices, utils, datasets, methods and base

other attached packages: rctbayespower(v.0.0.0.9000), pander(v.0.6.6), kableExtra(v.1.4.0) and dplyr(v.1.1.4)

loaded via a namespace (and not attached): gtable(v.0.3.6), tensorA(v.0.36.2.1), QuickJSR(v.1.8.1), xfun(v.0.53), ggplot2(v.4.0.0), htmlwidgets(v.1.6.4), processx(v.3.8.6), inline(v.0.3.21), lattice(v.0.22-7), callr(v.3.7.6), ps(v.1.9.1), vctrs(v.0.6.5), tools(v.4.5.1), generics(v.0.1.4), stats4(v.4.5.1), parallel(v.4.5.1), tibble(v.3.3.0), pkgconfig(v.2.0.3), brms(v.2.23.0), Matrix(v.1.7-4), data.table(v.1.17.8), checkmate(v.2.3.3), RColorBrewer(v.1.1-3), S7(v.0.2.0), distributional(v.0.5.0), RcppParallel(v.5.1.11-1), lifecycle(v.1.0.4), compiler(v.4.5.1), farver(v.2.1.2), stringr(v.1.5.2), textshaping(v.1.0.4), Brobdingnag(v.1.2-9), codetools(v.0.2-20), htmltools(v.0.5.8.1), bayesplot(v.1.14.0), yaml(v.2.3.10), lazyeval(v.0.2.2), plotly(v.4.11.0), pillar(v.1.11.1), tidyr(v.1.3.1), StanHeaders(v.2.32.10), bridgesampling(v.1.1-2), abind(v.1.4-8), nlme(v.3.1-168), posterior(v.1.6.1), rstan(v.2.32.7), tidyselect(v.1.2.1), digest(v.0.6.37), mvtnorm(v.1.3-3), stringi(v.1.8.7), purrr(v.1.0.4), fastmap(v.1.2.0), grid(v.4.5.1), cli(v.3.6.5), magrittr(v.2.0.3), loo(v.2.8.0), pkgbuild(v.1.4.8), withr(v.3.0.2), scales(v.1.4.0), backports(v.1.5.0), rmarkdown(v.2.30), httr(v.1.4.7), matrixStats(v.1.5.0), gridExtra(v.2.3), pbapply(v.1.7-4), coda(v.0.19-4.1), evaluate(v.1.0.5), knitr(v.1.50), pwr(v.1.3-0), viridisLite(v.0.4.2), rstantools(v.2.5.0), rlang(v.1.1.6), Rcpp(v.1.1.0), glue(v.1.8.0), xml2(v.1.4.0), svglite(v.2.2.2), rstudioapi(v.0.17.1), jsonlite(v.2.0.0), R6(v.2.6.1) and systemfonts(v.1.3.1)