aboutsummaryrefslogtreecommitdiff
path: root/man
diff options
context:
space:
mode:
Diffstat (limited to 'man')
-rw-r--r--man/IORE.solution.Rd19
-rw-r--r--man/add_err.Rd8
-rw-r--r--man/logLik.mkinfit.Rd29
-rw-r--r--man/mccall81_245T.Rd6
-rw-r--r--man/mkin_long_to_wide.Rd6
-rw-r--r--man/mkin_wide_to_long.Rd4
-rw-r--r--man/mkinfit.Rd177
-rw-r--r--man/plot.mkinfit.Rd4
-rw-r--r--man/plot.mmkin.Rd6
-rw-r--r--man/summary.mkinfit.Rd12
10 files changed, 89 insertions, 182 deletions
diff --git a/man/IORE.solution.Rd b/man/IORE.solution.Rd
index 13c81302..65760f95 100644
--- a/man/IORE.solution.Rd
+++ b/man/IORE.solution.Rd
@@ -29,15 +29,16 @@
Media
}
\examples{
- plot(function(x) IORE.solution(x, 100, 0.2, 1.3), 0, 2,
- ylim = c(0, 100))
- fit.fomc <- mkinfit("FOMC", FOCUS_2006_C, quiet = TRUE)
- fit.iore <- mkinfit("IORE", FOCUS_2006_C, quiet = TRUE)
- fit.iore.deS <- mkinfit("IORE", FOCUS_2006_C, solution_type = "deSolve", quiet = TRUE)
+ plot(function(x) IORE.solution(x, 100, 0.2, 1.3), 0, 2, ylim = c(0, 100))
+ \dontrun{
+ fit.fomc <- mkinfit("FOMC", FOCUS_2006_C, quiet = TRUE)
+ fit.iore <- mkinfit("IORE", FOCUS_2006_C, quiet = TRUE)
+ fit.iore.deS <- mkinfit("IORE", FOCUS_2006_C, solution_type = "deSolve", quiet = TRUE)
- print(data.frame(coef(fit.fomc), coef(fit.iore), coef(fit.iore.deS),
- row.names = paste("model par", 1:3)))
- print(rbind(fomc = endpoints(fit.fomc)$distimes, iore = endpoints(fit.iore)$distimes,
- iore.deS = endpoints(fit.iore)$distimes))
+ print(data.frame(fit.fomc$par, fit.iore$par, fit.iore.deS$par,
+ row.names = paste("model par", 1:4)))
+ print(rbind(fomc = endpoints(fit.fomc)$distimes, iore = endpoints(fit.iore)$distimes,
+ iore.deS = endpoints(fit.iore)$distimes))
+ }
}
\keyword{ manip }
diff --git a/man/add_err.Rd b/man/add_err.Rd
index e098d95c..506c6fdc 100644
--- a/man/add_err.Rd
+++ b/man/add_err.Rd
@@ -78,14 +78,12 @@ d_SFO_SFO_err <- add_err(d_SFO_SFO, function(x) 10, n = 3, seed = 123456789 )
# Name the datasets for nicer plotting
names(d_SFO_SFO_err) <- paste("Dataset", 1:3)
-# Name the model in the list of models (with only one member in this case)
-# for nicer plotting later on.
-# Be quiet and use the faster Levenberg-Marquardt algorithm, as the datasets
-# are easy and examples are run often. Use only one core not to offend CRAN
+# Name the model in the list of models (with only one member in this case) for
+# nicer plotting later on. Be quiet and use only one core not to offend CRAN
# checks
f_SFO_SFO <- mmkin(list("SFO-SFO" = m_SFO_SFO),
d_SFO_SFO_err, cores = 1,
- quiet = TRUE, method.modFit = "Marq")
+ quiet = TRUE)
plot(f_SFO_SFO)
diff --git a/man/logLik.mkinfit.Rd b/man/logLik.mkinfit.Rd
index 736ccd1e..5e910c2e 100644
--- a/man/logLik.mkinfit.Rd
+++ b/man/logLik.mkinfit.Rd
@@ -5,27 +5,13 @@
}
\description{
This function simply calculates the product of the likelihood densities
- calculated using \code{\link{dnorm}}, i.e. assuming normal distribution.
+ calculated using \code{\link{dnorm}}, i.e. assuming normal distribution,
+ with of the mean predicted by the degradation model, and the
+ standard deviation predicted by the error model.
The total number of estimated parameters returned with the value
of the likelihood is calculated as the sum of fitted degradation
model parameters and the fitted error model parameters.
-
- For the case of unweighted least squares fitting, we calculate one
- constant standard deviation from the residuals using \code{\link{sd}}
- and add one to the number of fitted degradation model parameters.
-
- For the case of manual weighting, we use the weight given for each
- observation as standard deviation in calculating its likelihood
- and the total number of estimated parameters is equal to the
- number of fitted degradation model parameters.
-
- In the case of iterative reweighting, the variances obtained by this
- procedure are used in the likelihood calculations, and the number of
- estimated parameters is obtained by the number of degradation model
- parameters plus the number of variance model parameters, i.e. the number of
- observed variables if the reweighting method is "obs", and two if the
- reweighting method is "tc".
}
\usage{
\method{logLik}{mkinfit}(object, ...)
@@ -54,13 +40,10 @@
m1 = mkinsub("SFO")
)
d_t <- FOCUS_2006_D
- d_t[23:24, "value"] <- c(NA, NA) # can't cope with zero values at the moment
f_nw <- mkinfit(sfo_sfo, d_t, quiet = TRUE) # no weighting (weights are unity)
- f_obs <- mkinfit(sfo_sfo, d_t, reweight.method = "obs", quiet = TRUE)
- f_tc <- mkinfit(sfo_sfo, d_t, reweight.method = "tc", quiet = TRUE)
- d_t$err <- d_t$value # Manual weighting assuming sigma ~ y
- f_man <- mkinfit(sfo_sfo, d_t, err = "err", quiet = TRUE)
- AIC(f_nw, f_obs, f_tc, f_man)
+ f_obs <- mkinfit(sfo_sfo, d_t, error_model = "obs", quiet = TRUE)
+ f_tc <- mkinfit(sfo_sfo, d_t, error_model = "tc", quiet = TRUE)
+ AIC(f_nw, f_obs, f_tc)
}
}
\author{
diff --git a/man/mccall81_245T.Rd b/man/mccall81_245T.Rd
index 22368d6d..00a05781 100644
--- a/man/mccall81_245T.Rd
+++ b/man/mccall81_245T.Rd
@@ -13,10 +13,10 @@
\format{
A dataframe containing the following variables.
\describe{
- \item{\code{name}}{the name of the compound observed. Note that T245 is used as
+ \item{\code{name}}{the name of the compound observed. Note that T245 is used as
an acronym for 2,4,5-T. T245 is a legitimate object name
in R, which is necessary for specifying models using
- \code{\link{mkinmod}}.}
+ \code{\link{mkinmod}}.}
\item{\code{time}}{a numeric vector containing sampling times in days after
treatment}
\item{\code{value}}{a numeric vector containing concentrations in percent of applied radioactivity}
@@ -35,13 +35,13 @@
fit.1 <- mkinfit(SFO_SFO_SFO, subset(mccall81_245T, soil == "Commerce"), quiet = TRUE)
summary(fit.1)$bpar
endpoints(fit.1)
- # No convergence, no covariance matrix ...
# k_phenol_sink is really small, therefore fix it to zero
fit.2 <- mkinfit(SFO_SFO_SFO, subset(mccall81_245T, soil == "Commerce"),
parms.ini = c(k_phenol_sink = 0),
fixed_parms = "k_phenol_sink", quiet = TRUE)
summary(fit.2)$bpar
endpoints(fit.1)
+ plot_sep(fit.2)
}
}
\keyword{datasets}
diff --git a/man/mkin_long_to_wide.Rd b/man/mkin_long_to_wide.Rd
index 5dd335de..c83f7c78 100644
--- a/man/mkin_long_to_wide.Rd
+++ b/man/mkin_long_to_wide.Rd
@@ -7,9 +7,9 @@
mkin_long_to_wide(long_data, time = "time", outtime = "time")
}
\description{
- This function takes a dataframe in the long form as required by \code{\link{modCost}}
- and converts it into a dataframe with one independent variable and several
- dependent variables as columns.
+ This function takes a dataframe in the long form, i.e. with a row
+ for each observed value, and converts it into a dataframe with one
+ independent variable and several dependent variables as columns.
}
\arguments{
\item{long_data}{
diff --git a/man/mkin_wide_to_long.Rd b/man/mkin_wide_to_long.Rd
index b531c41c..dc523755 100644
--- a/man/mkin_wide_to_long.Rd
+++ b/man/mkin_wide_to_long.Rd
@@ -8,7 +8,7 @@ mkin_wide_to_long(wide_data, time = "t")
}
\description{
This function simply takes a dataframe with one independent variable and several
- dependent variable and converts it into the long form as required by \code{\link{modCost}}.
+ dependent variable and converts it into the long form as required by \code{\link{mkinfit}}.
}
\arguments{
\item{wide_data}{
@@ -20,7 +20,7 @@ mkin_wide_to_long(wide_data, time = "t")
}
}
\value{
- Dataframe in long format as needed for \code{\link{modCost}}.
+ Dataframe in long format as needed for \code{\link{mkinfit}}.
}
\author{
Johannes Ranke
diff --git a/man/mkinfit.Rd b/man/mkinfit.Rd
index 59bb5e5f..d9efe05f 100644
--- a/man/mkinfit.Rd
+++ b/man/mkinfit.Rd
@@ -4,17 +4,16 @@
Fit a kinetic model to data with one or more state variables
}
\description{
- This function uses the Flexible Modelling Environment package
- \code{\link{FME}} to create a function calculating the model cost, i.e. the
- deviation between the kinetic model and the observed data. This model cost is
- then minimised using the Port algorithm \code{\link{nlminb}},
- using the specified initial or fixed parameters and starting values.
- Per default, parameters in the kinetic models are internally transformed in order
- to better satisfy the assumption of a normal distribution of their estimators.
- In each step of the optimsation, the kinetic model is solved using the
- function \code{\link{mkinpredict}}. The variance of the residuals for each
- observed variable can optionally be iteratively reweighted until convergence
- using the argument \code{reweight.method = "obs"}.
+ This function maximises the likelihood of the observed data using
+ the Port algorithm \code{\link{nlminb}}, and the specified initial or fixed
+ parameters and starting values. In each step of the optimsation, the kinetic
+ model is solved using the function \code{\link{mkinpredict}}. The parameters
+ of the selected error model are fitted simultaneously with the degradation
+ model parameters, as both of them are arguments of the likelihood function.
+
+ Per default, parameters in the kinetic models are internally transformed in
+ order to better satisfy the assumption of a normal distribution of their
+ estimators.
}
\usage{
mkinfit(mkinmod, observed,
@@ -25,36 +24,31 @@ mkinfit(mkinmod, observed,
solution_type = c("auto", "analytical", "eigen", "deSolve"),
method.ode = "lsoda",
use_compiled = "auto",
- method.modFit = c("Port", "Marq", "SANN", "Nelder-Mead", "BFGS", "CG", "L-BFGS-B"),
- maxit.modFit = "auto",
- control.modFit = list(),
+ control = list(eval.max = 300, iter.max = 200),
transform_rates = TRUE,
transform_fractions = TRUE,
- plot = FALSE, quiet = FALSE, err = NULL,
- weight = c("none", "manual", "std", "mean", "tc"),
- tc = c(sigma_low = 0.5, rsd_high = 0.07),
- scaleVar = FALSE,
+ quiet = FALSE,
atol = 1e-8, rtol = 1e-10, n.outtimes = 100,
- error_model = c("auto", "obs", "tc", "const"),
+ error_model = c("const", "obs", "tc"),
trace_parms = FALSE, ...)
}
\arguments{
\item{mkinmod}{
A list of class \code{\link{mkinmod}}, containing the kinetic model to be
fitted to the data, or one of the shorthand names ("SFO", "FOMC", "DFOP",
- "HS", "SFORB"). If a shorthand name is given, a parent only degradation
+ "HS", "SFORB", "IORE"). If a shorthand name is given, a parent only degradation
model is generated for the variable with the highest value in
\code{observed}.
}
\item{observed}{
- The observed data. It has to be in the long format as described in
- \code{\link{modFit}}, i.e. the first column called "name" must contain the
- name of the observed variable for each data point. The second column must
- contain the times of observation, named "time". The third column must be
- named "value" and contain the observed values. Optionally, a further column
- can contain weights for each data point. Its name must be passed as a
- further argument named \code{err} which is then passed on to
- \code{\link{modFit}}.
+ A dataframe with the observed data. The first column called "name" must
+ contain the name of the observed variable for each data point. The second
+ column must contain the times of observation, named "time". The third
+ column must be named "value" and contain the observed values. Zero values
+ in the "value" column will be removed, with a warning, in order to
+ avoid problems with fitting the two-component error model. This is not
+ expected to be a problem, because in general, values of zero are not
+ observed in degradation data, because there is a lower limit of detection.
}
\item{parms.ini}{
A named vector of initial values for the parameters, including parameters
@@ -102,10 +96,10 @@ mkinfit(mkinmod, observed,
solution of the model is used. This is only implemented for simple
degradation experiments with only one state variable, i.e. with no
metabolites. The default is "auto", which uses "analytical" if possible,
- otherwise "eigen" if the model can be expressed using eigenvalues and
- eigenvectors, and finally "deSolve" for the remaining models (time
- dependence of degradation rates and metabolites). This argument is passed
- on to the helper function \code{\link{mkinpredict}}.
+ otherwise "deSolve" if a compiler is present, and "eigen" if no
+ compiler is present and the model can be expressed using eigenvalues and
+ eigenvectors. This argument is passed on to the helper function
+ \code{\link{mkinpredict}}.
}
\item{method.ode}{
The solution method passed via \code{\link{mkinpredict}} to
@@ -114,37 +108,11 @@ mkinfit(mkinmod, observed,
}
\item{use_compiled}{
If set to \code{FALSE}, no compiled version of the \code{\link{mkinmod}}
- model is used, in the calls to \code{\link{mkinpredict}} even if
- a compiled verion is present.
- }
- \item{method.modFit}{
- The optimisation method passed to \code{\link{modFit}}.
-
- In order to optimally deal with problems where local minima occur, the
- "Port" algorithm is now used per default as it is less prone to get trapped
- in local minima and depends less on starting values for parameters than
- the Levenberg Marquardt variant selected by "Marq". However, "Port" needs
- more iterations.
-
- The former default "Marq" is the Levenberg Marquardt algorithm
- \code{\link{nls.lm}} from the package \code{minpack.lm} and usually needs
- the least number of iterations.
-
- The "Pseudo" algorithm is not included because it needs finite parameter bounds
- which are currently not supported.
-
- The "Newton" algorithm is not included because its number of iterations
- can not be controlled by \code{control.modFit} and it does not appear
- to provide advantages over the other algorithms.
+ model is used in the calls to \code{\link{mkinpredict}} even if a compiled
+ version is present.
}
- \item{maxit.modFit}{
- Maximum number of iterations in the optimisation. If not "auto", this will
- be passed to the method called by \code{\link{modFit}}, overriding
- what may be specified in the next argument \code{control.modFit}.
- }
- \item{control.modFit}{
- Additional arguments passed to the optimisation method used by
- \code{\link{modFit}}.
+ \item{control}{
+ A list of control arguments passed to \code{\link{nlminb}}.
}
\item{transform_rates}{
Boolean specifying if kinetic rate constants should be transformed in the
@@ -152,8 +120,8 @@ mkinfit(mkinmod, observed,
assumption of normal distribution of the estimator. If TRUE, also
alpha and beta parameters of the FOMC model are log-transformed, as well
as k1 and k2 rate constants for the DFOP and HS models and the break point
- tb of the HS model.
- If FALSE, zero is used as a lower bound for the rates in the optimisation.
+ tb of the HS model. If FALSE, zero is used as a lower bound for the rates
+ in the optimisation.
}
\item{transform_fractions}{
Boolean specifying if formation fractions constants should be transformed in the
@@ -164,28 +132,9 @@ mkinfit(mkinmod, observed,
data. The transformation used for these transformations is the
\code{\link{ilr}} transformation.
}
- \item{plot}{
- Should the observed values and the numerical solutions be plotted at each
- stage of the optimisation?
- }
\item{quiet}{
- Suppress printing out the current model cost after each improvement?
- }
- \item{err }{either \code{NULL}, or the name of the column with the
- \emph{error} estimates, used to weigh the residuals (see details of
- \code{\link{modCost}}); if \code{NULL}, then the residuals are not weighed.
- }
- \item{weight}{
- only if \code{err}=\code{NULL}: how to weight the residuals, one of "none",
- "std", "mean", see details of \code{\link{modCost}}, or "tc" for the
- two component error model. The option "manual" is available for
- the case that \code{err}!=\code{NULL}, but it is not necessary to specify it.
- }
- \item{tc}{The two components of the error model as used for (initial)
- weighting}.
- \item{scaleVar}{
- Will be passed to \code{\link{modCost}}. Default is not to scale Variables
- according to the number of observations.
+ Suppress printing out the current value of the negative log-likelihood
+ after each improvement?
}
\item{atol}{
Absolute error tolerance, passed to \code{\link{ode}}. Default is 1e-8,
@@ -202,36 +151,32 @@ mkinfit(mkinmod, observed,
The default value is 100.
}
\item{error_model}{
- If the error model is "auto", the generalised error model described by Ranke
- et al. (2019) is used for specifying the likelihood function. Simplications
- of this error model are tested as well and the model yielding the lowest
- AIC is returned.
+ If the error model is "const", a constant standard deviation
+ is assumed.
If the error model is "obs", each observed variable is assumed to have its
- own variance.
+ own variance.
- If the error model is "tc" (two-component error model).
- When using this method, a two component error model similar to the
- one described by Rocke and Lorenzato (1995) is used for setting up
- the likelihood function, as described in the abovementioned paper.
- Note that this model deviates from the model by Rocke and Lorenzato, as
- their model implies that the errors follow a lognormal distribution for
- large values, not a normal distribution as assumed by this method.
+ If the error model is "tc" (two-component error model), a two component
+ error model similar to the one described by Rocke and Lorenzato (1995) is
+ used for setting up the likelihood function. Note that this model deviates
+ from the model by Rocke and Lorenzato, as their model implies that the
+ errors follow a lognormal distribution for large values, not a normal
+ distribution as assumed by this method.
}
\item{trace_parms}{
Should a trace of the parameter values be listed?
}
\item{\dots}{
- Further arguments that will be passed to \code{\link{modFit}}.
+ Further arguments that will be passed on to \code{\link{deSolve}}.
}
}
\value{
- A list with "mkinfit" and "modFit" in the class attribute.
- A summary can be obtained by \code{\link{summary.mkinfit}}.
+ A list with "mkinfit" in the class attribute. A summary can be obtained by
+ \code{\link{summary.mkinfit}}.
}
\seealso{
- Plotting methods \code{\link{plot.mkinfit}} and
- \code{\link{mkinparplot}}.
+ Plotting methods \code{\link{plot.mkinfit}} and \code{\link{mkinparplot}}.
Comparisons of models fitted to the same data can be made using \code{\link{AIC}}
by virtue of the method \code{\link{logLik.mkinfit}}.
@@ -240,12 +185,6 @@ mkinfit(mkinmod, observed,
\code{\link{mmkin}}.
}
\note{
- The implementation of iteratively reweighted least squares is inspired by the
- work of the KinGUII team at Bayer Crop Science (Walter Schmitt and Zhenglei
- Gao). A similar implemention can also be found in CAKE 2.0, which is the
- other GUI derivative of mkin, sponsored by Syngenta.
-}
-\note{
When using the "IORE" submodel for metabolites, fitting with
"transform_rates = TRUE" (the default) often leads to failures of the
numerical ODE solver. In this situation it may help to switch off the
@@ -312,25 +251,11 @@ SFO_SFO.ff <- mkinmod(parent = mkinsub("SFO", "m1"),
m1 = mkinsub("SFO"), use_of_ff = "max")
f.noweight <- mkinfit(SFO_SFO.ff, FOCUS_2006_D, quiet = TRUE)
summary(f.noweight)
-f.irls <- mkinfit(SFO_SFO.ff, FOCUS_2006_D, reweight.method = "obs", quiet = TRUE)
-summary(f.irls)
-f.w.mean <- mkinfit(SFO_SFO.ff, FOCUS_2006_D, weight = "mean", quiet = TRUE)
-summary(f.w.mean)
-f.w.value <- mkinfit(SFO_SFO.ff, subset(FOCUS_2006_D, value != 0), err = "value",
- quiet = TRUE)
-summary(f.w.value)
+f.obs <- mkinfit(SFO_SFO.ff, FOCUS_2006_D, error_model = "obs", quiet = TRUE)
+summary(f.obs)
+f.tc <- mkinfit(SFO_SFO.ff, FOCUS_2006_D, error_model = "tc", quiet = TRUE)
+summary(f.tc)
}
-\dontrun{
-# Manual weighting
-dw <- FOCUS_2006_D
-errors <- c(parent = 2, m1 = 1)
-dw$err.man <- errors[FOCUS_2006_D$name]
-f.w.man <- mkinfit(SFO_SFO.ff, dw, err = "err.man", quiet = TRUE)
-summary(f.w.man)
-f.w.man.irls <- mkinfit(SFO_SFO.ff, dw, err = "err.man", quiet = TRUE,
- reweight.method = "obs")
-summary(f.w.man.irls)
-}
}
\keyword{ optimize }
diff --git a/man/plot.mkinfit.Rd b/man/plot.mkinfit.Rd
index 42f2559c..733cdf76 100644
--- a/man/plot.mkinfit.Rd
+++ b/man/plot.mkinfit.Rd
@@ -106,10 +106,10 @@ plot_sep(fit, sep_obs = TRUE, show_residuals = TRUE, show_errmin = TRUE, \dots)
}
\examples{
# One parent compound, one metabolite, both single first order, path from
-# parent to sink included, use Levenberg-Marquardt for speed
+# parent to sink included
SFO_SFO <- mkinmod(parent = mkinsub("SFO", "m1", full = "Parent"),
m1 = mkinsub("SFO", full = "Metabolite M1" ))
-fit <- mkinfit(SFO_SFO, FOCUS_2006_D, quiet = TRUE, method.modFit = "Marq")
+fit <- mkinfit(SFO_SFO, FOCUS_2006_D, quiet = TRUE)
plot(fit)
plot(fit, show_residuals = TRUE)
diff --git a/man/plot.mmkin.Rd b/man/plot.mmkin.Rd
index 23f35c4b..b3312292 100644
--- a/man/plot.mmkin.Rd
+++ b/man/plot.mmkin.Rd
@@ -48,10 +48,10 @@
Johannes Ranke
}
\examples{
- # Only use one core not to offend CRAN checks, use Levenberg-Marquardt for speed
- fits <- mmkin(c("FOMC", "HS"),
+ # Only use one core not to offend CRAN checks
+ fits <- mmkin(c("FOMC", "HS"),
list("FOCUS B" = FOCUS_2006_B, "FOCUS C" = FOCUS_2006_C), # named list for titles
- cores = 1, quiet = TRUE, method.modFit = "Marq")
+ cores = 1, quiet = TRUE)
plot(fits[, "FOCUS C"])
plot(fits["FOMC", ])
diff --git a/man/summary.mkinfit.Rd b/man/summary.mkinfit.Rd
index 3d71917e..cbc9098c 100644
--- a/man/summary.mkinfit.Rd
+++ b/man/summary.mkinfit.Rd
@@ -5,10 +5,10 @@
Summary method for class "mkinfit"
}
\description{
- Lists model equations, the summary as returned by \code{\link{summary.modFit}},
- the chi2 error levels calculated according to FOCUS guidance (2006) as far
- as defined therein, and optionally the data, consisting of observed, predicted
- and residual values.
+ Lists model equations, initial parameter values, optimised parameters with some
+ uncertainty statistics, the chi2 error levels calculated according to FOCUS
+ guidance (2006) as defined therein, formation fractions, DT50 values and
+ optionally the data, consisting of observed, predicted and residual values.
}
\usage{
\method{summary}{mkinfit}(object, data = TRUE, distimes = TRUE, alpha = 0.05, ...)
@@ -39,8 +39,7 @@
}
}
\value{
- The summary function returns a list derived from
- \code{\link{summary.modFit}}, with components, among others
+ The summary function returns a list with components, among others
\item{version, Rversion}{The mkin and R versions used}
\item{date.fit, date.summary}{The dates where the fit and the summary were produced}
\item{use_of_ff}{Was maximum or minimum use made of formation fractions}
@@ -54,6 +53,7 @@
\item{errmin }{The chi2 error levels for each observed variable.}
\item{bparms.ode }{All backtransformed ODE parameters, for use as starting parameters for
related models.}
+ \item{errparms }{Error model parameters. }
\item{ff }{The estimated formation fractions derived from the fitted model.}
\item{distimes }{The DT50 and DT90 values for each observed variable.}
\item{SFORB}{If applicable, eigenvalues of SFORB components of the model.}

Contact - Imprint