From 5364f037a72863ef5ba81e14ba4417f68fd389f9 Mon Sep 17 00:00:00 2001 From: Johannes Ranke Date: Fri, 18 Nov 2022 19:14:47 +0100 Subject: Make mixed model test data permanent to ensure reproducibility To ensure that tests on different platforms work on the same data, the mixed modelling test data previosly generated in tests/testthat/setup_script.R were generated once using the script in inst/dataset/generation/ds_mixed.R, and are now distributed with the package. --- tests/testthat/test_mixed.R | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'tests/testthat/test_mixed.R') diff --git a/tests/testthat/test_mixed.R b/tests/testthat/test_mixed.R index 2d53c6dd..ab8dfc27 100644 --- a/tests/testthat/test_mixed.R +++ b/tests/testthat/test_mixed.R @@ -1,22 +1,23 @@ context("Nonlinear mixed-effects models") + # Round error model parameters as they are not rounded in print methods dfop_nlme_1$modelStruct$varStruct$const <- signif(dfop_nlme_1$modelStruct$varStruct$const, 3) dfop_nlme_1$modelStruct$varStruct$prop <- signif(dfop_nlme_1$modelStruct$varStruct$prop, 4) +dfop_sfo_pop <- attr(ds_dfop_sfo, "pop") + test_that("Print methods work", { expect_known_output(print(fits[, 2:3], digits = 2), "print_mmkin_parent.txt") expect_known_output(print(mixed(mmkin_sfo_1), digits = 2), "print_mmkin_sfo_1_mixed.txt") expect_known_output(print(dfop_nlme_1, digits = 1), "print_dfop_nlme_1.txt") + expect_known_output(print(sfo_saem_1_reduced, digits = 1), "print_sfo_saem_1_reduced.txt") - # In order to address the platform dependence of the results, we round to two - # significant digits before printing - dfop_saemix_1_print <- dfop_saemix_1 - dfop_saemix_1_print$so@results@conf.int[c("estimate", "lower", "upper")] <- - signif(dfop_saemix_1_print$so@results@conf.int[c("estimate", "lower", "upper")], 2) - expect_known_output(print(dfop_saemix_1_print, digits = 1), "print_dfop_saemix_1.txt") + skip_on_cran() # The following test is platform dependent and fails on + # win-builder with current (18 Nov 2022) R-devel and on the Fedora CRAN check systems + expect_known_output(print(dfop_saem_1, digits = 1), "print_dfop_saem_1.txt") }) test_that("nlme results are reproducible to some degree", { @@ -36,17 +37,16 @@ test_that("nlme results are reproducible to some degree", { # k1 and k2 just fail the first test (lower bound of the ci), so we need to exclude it dfop_no_k1_k2 <- c("parent_0", "k_m1", "f_parent_to_m1", "g") dfop_sfo_pop_no_k1_k2 <- as.numeric(dfop_sfo_pop[dfop_no_k1_k2]) - dfop_sfo_pop <- as.numeric(dfop_sfo_pop) # to remove names - ci_dfop_sfo_n <- summary(nlme_biphasic)$confint_back + ci_dfop_sfo_n <- summary(nlme_dfop_sfo)$confint_back expect_true(all(ci_dfop_sfo_n[dfop_no_k1_k2, "lower"] < dfop_sfo_pop_no_k1_k2)) - expect_true(all(ci_dfop_sfo_n[, "upper"] > dfop_sfo_pop)) + expect_true(all(ci_dfop_sfo_n[, "upper"] > as.numeric(dfop_sfo_pop))) }) test_that("saemix results are reproducible for biphasic fits", { - test_summary <- summary(saem_biphasic_s) + test_summary <- summary(saem_dfop_sfo_s) test_summary$saemixversion <- "Dummy 0.0 for testing" test_summary$mkinversion <- "Dummy 0.0 for testing" test_summary$Rversion <- "Dummy R version for testing" @@ -54,33 +54,33 @@ test_that("saemix results are reproducible for biphasic fits", { test_summary$date.summary <- "Dummy date for testing" test_summary$time <- c(elapsed = "test time 0") - expect_known_output(print(test_summary, digits = 1), "summary_saem_biphasic_s.txt") + expect_known_output(print(test_summary, digits = 1), "summary_saem_dfop_sfo_s.txt") dfop_sfo_pop <- as.numeric(dfop_sfo_pop) no_k1 <- c(1, 2, 3, 5, 6) no_k2 <- c(1, 2, 3, 4, 6) no_k1_k2 <- c(1, 2, 3, 6) - ci_dfop_sfo_s_s <- summary(saem_biphasic_s)$confint_back + ci_dfop_sfo_s_s <- summary(saem_dfop_sfo_s)$confint_back expect_true(all(ci_dfop_sfo_s_s[, "lower"] < dfop_sfo_pop)) expect_true(all(ci_dfop_sfo_s_s[, "upper"] > dfop_sfo_pop)) # k2 is not fitted well - ci_dfop_sfo_s_m <- summary(saem_biphasic_m)$confint_back + ci_dfop_sfo_s_m <- summary(saem_dfop_sfo_m)$confint_back expect_true(all(ci_dfop_sfo_s_m[no_k2, "lower"] < dfop_sfo_pop[no_k2])) expect_true(all(ci_dfop_sfo_s_m[no_k1, "upper"] > dfop_sfo_pop[no_k1])) # I tried to only do few iterations in routine tests as this is so slow # but then deSolve fails at some point (presumably at the switch between # the two types of iterations) - #saem_biphasic_2 <- saem(mmkin_biphasic, solution_type = "deSolve", + #saem_dfop_sfo_2 <- saem(mmkin_biphasic, solution_type = "deSolve", # control = list(nbiter.saemix = c(10, 5), nbiter.burn = 5), quiet = TRUE) skip("Fitting with saemix takes around 10 minutes when using deSolve") - saem_biphasic_2 <- saem(mmkin_biphasic, solution_type = "deSolve", quiet = TRUE) + saem_dfop_sfo_2 <- saem(mmkin_dfop_sfo, solution_type = "deSolve", quiet = TRUE) # As with the analytical solution, k1 and k2 are not fitted well - ci_dfop_sfo_s_d <- summary(saem_biphasic_2)$confint_back + ci_dfop_sfo_s_d <- summary(saem_dfop_sfo_2)$confint_back expect_true(all(ci_dfop_sfo_s_d[no_k2, "lower"] < dfop_sfo_pop[no_k2])) expect_true(all(ci_dfop_sfo_s_d[no_k1, "upper"] > dfop_sfo_pop[no_k1])) }) -- cgit v1.2.1