summaryrefslogtreecommitdiff
path: root/CakeSummary.R
diff options
context:
space:
mode:
Diffstat (limited to 'CakeSummary.R')
-rw-r--r--CakeSummary.R37
1 files changed, 19 insertions, 18 deletions
diff --git a/CakeSummary.R b/CakeSummary.R
index 70f01e6..5849a4a 100644
--- a/CakeSummary.R
+++ b/CakeSummary.R
@@ -3,8 +3,8 @@
# and display the summary itself.
# Parts modified from package mkin
-# Parts Tessella for Syngenta: Copyright (C) 2011-2016 Syngenta
-# Tessella Project Reference: 6245, 7247, 8361, 7414
+# Parts Tessella for Syngenta: Copyright (C) 2011-2020 Syngenta
+# Tessella Project Reference: 6245, 7247, 8361, 7414, 10091
# The CAKE R modules are free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -100,8 +100,8 @@ CakeFOMCBackCalculatedDT<-function(parms.all) {
# type - type of compartment
# obs_var - compartment name
# parms.all - list of parameters
-# sannMaxIter - the maximum amount of iterations to do for SANN
-CakeDT<-function(type, obs_var, parms.all, sannMaxIter) {
+# dfopDtMaxIter - the maximum amount of iterations to do for DFOP DT calculation
+CakeDT<-function(type, obs_var, parms.all, dfopDtMaxIter) {
if (type == "SFO") {
k_name = paste("k", obs_var, sep="_")
k_tot = parms.all[k_name]
@@ -122,21 +122,19 @@ CakeDT<-function(type, obs_var, parms.all, sannMaxIter) {
fraction <- g * exp( - k1 * t) + (1 - g) * exp( - k2 * t)
(fraction - (1 - x/100))^2
}
-
- DTminBounds <- 0.001
- # Determine a decent starting point for numerical iteration. The latter two terms are also lower bounds for DT50.
- DT50min <- max(0.001, (1 / k1) * log(g * 2), (1 / k2) * log((1 - g) * 2))
+ # Determine a decent starting point for numerical iteration. These are lower bounds for DT50.
+ DT50min <- max((1 / k1) * log(g * 2), (1 / k2) * log((1 - g) * 2))
- # Set the starting point for the numerical solver to a be a bit smaller than the computed minimum as the R optim function seems to get confused if the min is
- # greater than the answer it's trying to converge to (up to its level of accuracy).
+ # Set the starting point for the numerical solver to a be a bit smaller than the computed minimum.
DT50Initial <- DT50min * 0.9
# Results greater than 10,000 are not interesting. The R optim routine also handles very large values unreliably and can claim to converge when it is nowhere near.
if (DT50min > 10000){
DT50 = ">10,000"
}else{
- DT50.temp <- optim(DT50Initial, f, method="SANN", control=list(fnscale=0.05, maxit=sannMaxIter), x = 50, lower = DTminBounds)
+ DT50.temp <- optim(DT50Initial, f, method="Nelder-Mead", control=list(maxit=dfopDtMaxIter, abstol=1E-10), x = 50)
+
DT50.o = DT50.temp$par
DT50.converged = DT50.temp$convergence == 0
DT50 = ifelse(!DT50.converged || DT50.o <= 0, NA, DT50.o)
@@ -146,17 +144,16 @@ CakeDT<-function(type, obs_var, parms.all, sannMaxIter) {
}
}
- # Determine a decent starting point for numerical iteration. The latter two terms are also lower bounds for DT90.
- DT90min <- max(0.001, (1 / k1) * log(g * 10), (1 / k2) * log((1 - g) * 10))
+ # Determine a decent starting point for numerical iteration. These are lower bounds for DT90.
+ DT90min <- max((1 / k1) * log(g * 10), (1 / k2) * log((1 - g) * 10))
- # Set the starting point for the numerical solver to a be a bit smaller than the computed minimum as the R optim function seems to get confused if the min is
- # greater than the answer it's trying to converge to (up to its level of accuracy).
+ # Set the starting point for the numerical solver to a be a bit smaller than the computed minimum.
DT90Initial <- DT90min * 0.9
if (DT90min > 10000){
DT90 = ">10,000"
}else{
- DT90.temp <- optim(DT90Initial, f, method="SANN", control=list(fnscale=0.05, maxit=sannMaxIter), x = 90, lower = DTminBounds)
+ DT90.temp <- optim(DT90Initial, f, method="Nelder-Mead", control=list(maxit=dfopDtMaxIter, abstol=1E-10), x = 90)
DT90.o = DT90.temp$par
DT90.converged = DT90.temp$convergence == 0
DT90 = ifelse(!DT90.converged || DT90.o <= 0, NA, DT90.o)
@@ -321,7 +318,8 @@ summary.CakeFit <- function(object, data = TRUE, distimes = TRUE, halflives = TR
dimnames(param) <- list(pnames, c("Estimate", "Std. Error",
"t value", "Pr(>t)", "Lower CI (90%)", "Upper CI (90%)", "Lower CI (95%)", "Upper CI (95%)"))
- ans <- list(residuals = object$residuals,
+ ans <- list(ssr = object$ssr,
+ residuals = object$residuals,
residualVariance = resvar,
sigma = sqrt(resvar),
modVariance = modVariance,
@@ -332,7 +330,8 @@ summary.CakeFit <- function(object, data = TRUE, distimes = TRUE, halflives = TR
par = param)
} else {
param <- cbind(param)
- ans <- list(residuals = object$residuals,
+ ans <- list(ssr = object$ssr,
+ residuals = object$residuals,
residualVariance = resvar,
sigma = sqrt(resvar),
modVariance = modVariance,
@@ -382,6 +381,8 @@ print.summary.CakeFit <- function(x, digits = max(3, getOption("digits") - 3), .
cat("\nOptimised parameters:\n")
printCoefmat(x$par, digits = digits, ...)
}
+
+ cat("\nSum of squared residuals:", format(signif(x$ssr, digits)))
cat("\nResidual standard error:",
format(signif(x$sigma, digits)), "on", rdf, "degrees of freedom\n")

Contact - Imprint