From a14237fc1580b09f8772cd3330b0a445785e48ac Mon Sep 17 00:00:00 2001 From: Johannes Ranke Date: Fri, 18 Nov 2022 10:09:28 +0100 Subject: Round parameters with signif() before printing This will hopefully make the test pass on all relevant platforms. --- tests/testthat/test_mixed.R | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'tests/testthat/test_mixed.R') diff --git a/tests/testthat/test_mixed.R b/tests/testthat/test_mixed.R index 646b6110..2d53c6dd 100644 --- a/tests/testthat/test_mixed.R +++ b/tests/testthat/test_mixed.R @@ -11,7 +11,12 @@ test_that("Print methods work", { expect_known_output(print(mixed(mmkin_sfo_1), digits = 2), "print_mmkin_sfo_1_mixed.txt") expect_known_output(print(dfop_nlme_1, digits = 1), "print_dfop_nlme_1.txt") - expect_known_output(print(dfop_saemix_1, digits = 1), "print_dfop_saemix_1.txt") + # In order to address the platform dependence of the results, we round to two + # significant digits before printing + dfop_saemix_1_print <- dfop_saemix_1 + dfop_saemix_1_print$so@results@conf.int[c("estimate", "lower", "upper")] <- + signif(dfop_saemix_1_print$so@results@conf.int[c("estimate", "lower", "upper")], 2) + expect_known_output(print(dfop_saemix_1_print, digits = 1), "print_dfop_saemix_1.txt") }) test_that("nlme results are reproducible to some degree", { -- cgit v1.2.1 From 5364f037a72863ef5ba81e14ba4417f68fd389f9 Mon Sep 17 00:00:00 2001 From: Johannes Ranke Date: Fri, 18 Nov 2022 19:14:47 +0100 Subject: Make mixed model test data permanent to ensure reproducibility To ensure that tests on different platforms work on the same data, the mixed modelling test data previosly generated in tests/testthat/setup_script.R were generated once using the script in inst/dataset/generation/ds_mixed.R, and are now distributed with the package. --- tests/testthat/test_mixed.R | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'tests/testthat/test_mixed.R') diff --git a/tests/testthat/test_mixed.R b/tests/testthat/test_mixed.R index 2d53c6dd..ab8dfc27 100644 --- a/tests/testthat/test_mixed.R +++ b/tests/testthat/test_mixed.R @@ -1,22 +1,23 @@ context("Nonlinear mixed-effects models") + # Round error model parameters as they are not rounded in print methods dfop_nlme_1$modelStruct$varStruct$const <- signif(dfop_nlme_1$modelStruct$varStruct$const, 3) dfop_nlme_1$modelStruct$varStruct$prop <- signif(dfop_nlme_1$modelStruct$varStruct$prop, 4) +dfop_sfo_pop <- attr(ds_dfop_sfo, "pop") + test_that("Print methods work", { expect_known_output(print(fits[, 2:3], digits = 2), "print_mmkin_parent.txt") expect_known_output(print(mixed(mmkin_sfo_1), digits = 2), "print_mmkin_sfo_1_mixed.txt") expect_known_output(print(dfop_nlme_1, digits = 1), "print_dfop_nlme_1.txt") + expect_known_output(print(sfo_saem_1_reduced, digits = 1), "print_sfo_saem_1_reduced.txt") - # In order to address the platform dependence of the results, we round to two - # significant digits before printing - dfop_saemix_1_print <- dfop_saemix_1 - dfop_saemix_1_print$so@results@conf.int[c("estimate", "lower", "upper")] <- - signif(dfop_saemix_1_print$so@results@conf.int[c("estimate", "lower", "upper")], 2) - expect_known_output(print(dfop_saemix_1_print, digits = 1), "print_dfop_saemix_1.txt") + skip_on_cran() # The following test is platform dependent and fails on + # win-builder with current (18 Nov 2022) R-devel and on the Fedora CRAN check systems + expect_known_output(print(dfop_saem_1, digits = 1), "print_dfop_saem_1.txt") }) test_that("nlme results are reproducible to some degree", { @@ -36,17 +37,16 @@ test_that("nlme results are reproducible to some degree", { # k1 and k2 just fail the first test (lower bound of the ci), so we need to exclude it dfop_no_k1_k2 <- c("parent_0", "k_m1", "f_parent_to_m1", "g") dfop_sfo_pop_no_k1_k2 <- as.numeric(dfop_sfo_pop[dfop_no_k1_k2]) - dfop_sfo_pop <- as.numeric(dfop_sfo_pop) # to remove names - ci_dfop_sfo_n <- summary(nlme_biphasic)$confint_back + ci_dfop_sfo_n <- summary(nlme_dfop_sfo)$confint_back expect_true(all(ci_dfop_sfo_n[dfop_no_k1_k2, "lower"] < dfop_sfo_pop_no_k1_k2)) - expect_true(all(ci_dfop_sfo_n[, "upper"] > dfop_sfo_pop)) + expect_true(all(ci_dfop_sfo_n[, "upper"] > as.numeric(dfop_sfo_pop))) }) test_that("saemix results are reproducible for biphasic fits", { - test_summary <- summary(saem_biphasic_s) + test_summary <- summary(saem_dfop_sfo_s) test_summary$saemixversion <- "Dummy 0.0 for testing" test_summary$mkinversion <- "Dummy 0.0 for testing" test_summary$Rversion <- "Dummy R version for testing" @@ -54,33 +54,33 @@ test_that("saemix results are reproducible for biphasic fits", { test_summary$date.summary <- "Dummy date for testing" test_summary$time <- c(elapsed = "test time 0") - expect_known_output(print(test_summary, digits = 1), "summary_saem_biphasic_s.txt") + expect_known_output(print(test_summary, digits = 1), "summary_saem_dfop_sfo_s.txt") dfop_sfo_pop <- as.numeric(dfop_sfo_pop) no_k1 <- c(1, 2, 3, 5, 6) no_k2 <- c(1, 2, 3, 4, 6) no_k1_k2 <- c(1, 2, 3, 6) - ci_dfop_sfo_s_s <- summary(saem_biphasic_s)$confint_back + ci_dfop_sfo_s_s <- summary(saem_dfop_sfo_s)$confint_back expect_true(all(ci_dfop_sfo_s_s[, "lower"] < dfop_sfo_pop)) expect_true(all(ci_dfop_sfo_s_s[, "upper"] > dfop_sfo_pop)) # k2 is not fitted well - ci_dfop_sfo_s_m <- summary(saem_biphasic_m)$confint_back + ci_dfop_sfo_s_m <- summary(saem_dfop_sfo_m)$confint_back expect_true(all(ci_dfop_sfo_s_m[no_k2, "lower"] < dfop_sfo_pop[no_k2])) expect_true(all(ci_dfop_sfo_s_m[no_k1, "upper"] > dfop_sfo_pop[no_k1])) # I tried to only do few iterations in routine tests as this is so slow # but then deSolve fails at some point (presumably at the switch between # the two types of iterations) - #saem_biphasic_2 <- saem(mmkin_biphasic, solution_type = "deSolve", + #saem_dfop_sfo_2 <- saem(mmkin_biphasic, solution_type = "deSolve", # control = list(nbiter.saemix = c(10, 5), nbiter.burn = 5), quiet = TRUE) skip("Fitting with saemix takes around 10 minutes when using deSolve") - saem_biphasic_2 <- saem(mmkin_biphasic, solution_type = "deSolve", quiet = TRUE) + saem_dfop_sfo_2 <- saem(mmkin_dfop_sfo, solution_type = "deSolve", quiet = TRUE) # As with the analytical solution, k1 and k2 are not fitted well - ci_dfop_sfo_s_d <- summary(saem_biphasic_2)$confint_back + ci_dfop_sfo_s_d <- summary(saem_dfop_sfo_2)$confint_back expect_true(all(ci_dfop_sfo_s_d[no_k2, "lower"] < dfop_sfo_pop[no_k2])) expect_true(all(ci_dfop_sfo_s_d[no_k1, "upper"] > dfop_sfo_pop[no_k1])) }) -- cgit v1.2.1 From 5ec8e1bad1aa7d79ee9c19bdd50be07f81a14278 Mon Sep 17 00:00:00 2001 From: Johannes Ranke Date: Fri, 18 Nov 2022 22:11:02 +0100 Subject: Move two saem fits from setup script to skipped tests Save winbuilder/CRAN check time... --- tests/testthat/test_mixed.R | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'tests/testthat/test_mixed.R') diff --git a/tests/testthat/test_mixed.R b/tests/testthat/test_mixed.R index ab8dfc27..8c257738 100644 --- a/tests/testthat/test_mixed.R +++ b/tests/testthat/test_mixed.R @@ -46,6 +46,9 @@ test_that("nlme results are reproducible to some degree", { test_that("saemix results are reproducible for biphasic fits", { + skip_on_cran() + saem_dfop_sfo_s <- saem(mmkin_dfop_sfo, transformations = "saemix", quiet = TRUE) + test_summary <- summary(saem_dfop_sfo_s) test_summary$saemixversion <- "Dummy 0.0 for testing" test_summary$mkinversion <- "Dummy 0.0 for testing" @@ -65,11 +68,6 @@ test_that("saemix results are reproducible for biphasic fits", { expect_true(all(ci_dfop_sfo_s_s[, "lower"] < dfop_sfo_pop)) expect_true(all(ci_dfop_sfo_s_s[, "upper"] > dfop_sfo_pop)) - # k2 is not fitted well - ci_dfop_sfo_s_m <- summary(saem_dfop_sfo_m)$confint_back - expect_true(all(ci_dfop_sfo_s_m[no_k2, "lower"] < dfop_sfo_pop[no_k2])) - expect_true(all(ci_dfop_sfo_s_m[no_k1, "upper"] > dfop_sfo_pop[no_k1])) - # I tried to only do few iterations in routine tests as this is so slow # but then deSolve fails at some point (presumably at the switch between # the two types of iterations) -- cgit v1.2.1 From 64e245a16ec22cf864fcb5bae3b4b2fb2eee5905 Mon Sep 17 00:00:00 2001 From: Johannes Ranke Date: Sat, 19 Nov 2022 00:17:33 +0100 Subject: Confirm which test fail on R-devel --- tests/testthat/test_mixed.R | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tests/testthat/test_mixed.R') diff --git a/tests/testthat/test_mixed.R b/tests/testthat/test_mixed.R index 8c257738..39a332f5 100644 --- a/tests/testthat/test_mixed.R +++ b/tests/testthat/test_mixed.R @@ -16,7 +16,8 @@ test_that("Print methods work", { expect_known_output(print(sfo_saem_1_reduced, digits = 1), "print_sfo_saem_1_reduced.txt") skip_on_cran() # The following test is platform dependent and fails on - # win-builder with current (18 Nov 2022) R-devel and on the Fedora CRAN check systems + # win-builder with current (18 Nov 2022) R-devel, on the Linux R-devel CRAN check systems + # and also using R-devel locally expect_known_output(print(dfop_saem_1, digits = 1), "print_dfop_saem_1.txt") }) -- cgit v1.2.1