aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--DESCRIPTION4
-rw-r--r--R/calplot.R11
-rw-r--r--R/inverse.predict.lm.R22
-rw-r--r--R/lod.R23
-rw-r--r--R/loq.R2
-rw-r--r--inst/doc/chemCal-001.pdf4
-rw-r--r--inst/doc/chemCal.log8
-rw-r--r--inst/doc/chemCal.pdfbin107539 -> 107306 bytes
-rw-r--r--man/inverse.predict.Rd16
-rw-r--r--man/lod.Rd55
-rw-r--r--man/loq.Rd76
11 files changed, 153 insertions, 68 deletions
diff --git a/DESCRIPTION b/DESCRIPTION
index 9f45d05..8dc884e 100644
--- a/DESCRIPTION
+++ b/DESCRIPTION
@@ -1,6 +1,6 @@
Package: chemCal
-Version: 0.05-8
-Date: 2006-05-12
+Version: 0.05-11
+Date: 2006-05-16
Title: Calibration functions for analytical chemistry
Author: Johannes Ranke <jranke@uni-bremen.de>
Maintainer: Johannes Ranke <jranke@uni-bremen.de>
diff --git a/R/calplot.R b/R/calplot.R
index feb9727..2deed5a 100644
--- a/R/calplot.R
+++ b/R/calplot.R
@@ -21,9 +21,10 @@ calplot.lm <- function(object, xlim = "auto", ylim = "auto",
m <- object
level <- 1 - alpha
- x <- m$model$x
- y <- m$model$y
- newdata <- data.frame(x = seq(0,max(x),length=250))
+ y <- m$model[[1]]
+ x <- m$model[[2]]
+ newdata <- list(x = seq(0,max(x),length=250))
+ names(newdata) <- names(m$model)[[2]]
pred.lim <- predict(m, newdata, interval = "prediction",level=level)
conf.lim <- predict(m, newdata, interval = "confidence",level=level)
if (xlim == "auto") xlim = c(0,max(x))
@@ -36,9 +37,9 @@ calplot.lm <- function(object, xlim = "auto", ylim = "auto",
ylim = ylim
)
points(x,y, pch = 21, bg = "yellow")
- matlines(newdata$x, pred.lim, lty = c(1, 4, 4),
+ matlines(newdata[[1]], pred.lim, lty = c(1, 4, 4),
col = c("black", "red", "red"))
- matlines(newdata$x, conf.lim, lty = c(1, 3, 3),
+ matlines(newdata[[1]], conf.lim, lty = c(1, 3, 3),
col = c("black", "green4", "green4"))
legend(min(x),
diff --git a/R/inverse.predict.lm.R b/R/inverse.predict.lm.R
index 2ead9fb..e5f014c 100644
--- a/R/inverse.predict.lm.R
+++ b/R/inverse.predict.lm.R
@@ -17,32 +17,36 @@ inverse.predict.default <- function(object, newdata, ...,
inverse.predict.lm <- function(object, newdata, ...,
ws = "auto", alpha = 0.05, ss = "auto")
{
+ yname = names(object$model)[[1]]
+ xname = names(object$model)[[2]]
if (ws == "auto") {
ws <- ifelse(length(object$weights) > 0, mean(object$weights), 1)
}
if (length(object$weights) > 0) {
- wx <- split(object$weights,object$model$x)
+ wx <- split(object$weights,object$model[[xname]])
w <- sapply(wx,mean)
} else {
- w <- rep(1,length(split(object$model$y,object$model$x)))
+ w <- rep(1,length(split(object$model[[yname]],object$model[[xname]])))
}
.inverse.predict(object = object, newdata = newdata,
- ws = ws, alpha = alpha, ss = ss, w = w)
+ ws = ws, alpha = alpha, ss = ss, w = w, xname = xname, yname = yname)
}
inverse.predict.rlm <- function(object, newdata, ...,
ws = "auto", alpha = 0.05, ss = "auto")
{
+ yname = names(object$model)[[1]]
+ xname = names(object$model)[[2]]
if (ws == "auto") {
ws <- mean(object$w)
}
- wx <- split(object$weights,object$model$x)
+ wx <- split(object$weights,object$model[[xname]])
w <- sapply(wx,mean)
.inverse.predict(object = object, newdata = newdata,
- ws = ws, alpha = alpha, ss = ss, w = w)
+ ws = ws, alpha = alpha, ss = ss, w = w, xname = xname, yname = yname)
}
-.inverse.predict <- function(object, newdata, ws, alpha, ss, w)
+.inverse.predict <- function(object, newdata, ws, alpha, ss, w, xname, yname)
{
if (length(object$coef) > 2)
stop("More than one independent variable in your model - not implemented")
@@ -53,12 +57,12 @@ inverse.predict.rlm <- function(object, newdata, ...,
ybars <- mean(newdata)
m <- length(newdata)
- yx <- split(object$model$y,object$model$x)
+ yx <- split(object$model[[yname]],object$model[[xname]])
n <- length(yx)
df <- n - length(objects$coef)
x <- as.numeric(names(yx))
ybar <- sapply(yx,mean)
- yhatx <- split(object$fitted.values,object$model$x)
+ yhatx <- split(object$fitted.values,object$model[[xname]])
yhat <- sapply(yhatx,mean)
se <- sqrt(sum(w*(ybar - yhat)^2)/df)
if (ss == "auto") {
@@ -67,7 +71,7 @@ inverse.predict.rlm <- function(object, newdata, ...,
ss <- ss
}
- b1 <- object$coef[["x"]]
+ b1 <- object$coef[[xname]]
ybarw <- sum(w * ybar)/sum(w)
diff --git a/R/lod.R b/R/lod.R
index 1bb7981..73f5353 100644
--- a/R/lod.R
+++ b/R/lod.R
@@ -10,18 +10,25 @@ lod.default <- function(object, ..., alpha = 0.05, beta = 0.05)
lod.lm <- function(object, ..., alpha = 0.05, beta = 0.05)
{
- y0 <- predict(object, data.frame(x = 0), interval="prediction",
- level = 1 - alpha)
+ xname <- names(object$model)[[2]]
+ newdata <- data.frame(0)
+ names(newdata) <- xname
+ y0 <- predict(object, newdata, interval="prediction",
+ level = 1 - 2 * alpha )
yc <- y0[[1,"upr"]]
xc <- inverse.predict(object,yc)[["Prediction"]]
f <- function(x)
{
- # Here I need the variance of y values as a function of x or
- # y values
- # Strangely, setting the confidence level to 0.5 does not result
- # in a zero confidence or prediction interval
+ newdata <- data.frame(x)
+ names(newdata) <- xname
+ pi.y <- predict(object, newdata, interval = "prediction",
+ level = 1 - 2 * beta)
+ yd <- pi.y[[1,"lwr"]]
+ (yd - yc)^2
}
- lod.x <- optimize(f,interval=c(0,max(object$model$x)))$minimum
- lod.y <- predict(object, data.frame(x = lod.x))
+ lod.x <- optimize(f,interval=c(0,max(object$model[[xname]])))$minimum
+ newdata <- data.frame(x = lod.x)
+ names(lod.x) <- xname
+ lod.y <- predict(object, data.frame(lod.x))
return(list(x = lod.x, y = lod.y))
}
diff --git a/R/loq.R b/R/loq.R
index 33e9556..c493a64 100644
--- a/R/loq.R
+++ b/R/loq.R
@@ -5,7 +5,7 @@ loq <- function(object, ..., alpha = 0.05, k = 3, n = 1, w = "auto")
loq.default <- function(object, ..., alpha = 0.05, k = 3, n = 1, w = "auto")
{
- stop("lod is only implemented for univariate lm objects.")
+ stop("loq is only implemented for univariate lm objects.")
}
loq.lm <- function(object, ..., alpha = 0.05, k = 3, n = 1, w = "auto")
diff --git a/inst/doc/chemCal-001.pdf b/inst/doc/chemCal-001.pdf
index 8375fd5..748b5e1 100644
--- a/inst/doc/chemCal-001.pdf
+++ b/inst/doc/chemCal-001.pdf
@@ -2,8 +2,8 @@
%ρ\r
1 0 obj
<<
-/CreationDate (D:20060515234125)
-/ModDate (D:20060515234125)
+/CreationDate (D:20060516214219)
+/ModDate (D:20060516214219)
/Title (R Graphics Output)
/Producer (R 2.3.0)
/Creator (R)
diff --git a/inst/doc/chemCal.log b/inst/doc/chemCal.log
index 785a8cf..d399d73 100644
--- a/inst/doc/chemCal.log
+++ b/inst/doc/chemCal.log
@@ -1,4 +1,4 @@
-This is pdfeTeX, Version 3.141592-1.21a-2.2 (Web2C 7.5.4) (format=pdflatex 2006.4.8) 15 MAY 2006 23:41
+This is pdfeTeX, Version 3.141592-1.21a-2.2 (Web2C 7.5.4) (format=pdflatex 2006.4.5) 16 MAY 2006 21:42
entering extended mode
**chemCal.tex
(./chemCal.tex
@@ -325,8 +325,8 @@ LaTeX Font Info: External font `cmex10' loaded for size
[2 <./chemCal-001.pdf>]
[3] (./chemCal.aux) )
Here is how much of TeX's memory you used:
- 3764 strings out of 94500
- 51832 string characters out of 1175772
+ 3764 strings out of 94499
+ 51832 string characters out of 1175814
96290 words of memory out of 1000000
6863 multiletter control sequences out of 10000+50000
21774 words of font info for 52 fonts, out of 500000 for 2000
@@ -346,4 +346,4 @@ type1/bluesky/cm/cmbx12.pfb> </var/cache/fonts/pk/ljfour/jknappen/tc/tctt1000.6
xmf-tetex/fonts/type1/bluesky/cm/cmtt10.pfb></usr/share/texmf-tetex/fonts/type1
/bluesky/cm/cmr10.pfb></usr/share/texmf-tetex/fonts/type1/bluesky/cm/cmr12.pfb>
</usr/share/texmf-tetex/fonts/type1/bluesky/cm/cmr17.pfb>
-Output written on chemCal.pdf (3 pages, 107539 bytes).
+Output written on chemCal.pdf (3 pages, 107306 bytes).
diff --git a/inst/doc/chemCal.pdf b/inst/doc/chemCal.pdf
index e73cc52..6036bd3 100644
--- a/inst/doc/chemCal.pdf
+++ b/inst/doc/chemCal.pdf
Binary files differ
diff --git a/man/inverse.predict.Rd b/man/inverse.predict.Rd
index 8c2be9c..5be0250 100644
--- a/man/inverse.predict.Rd
+++ b/man/inverse.predict.Rd
@@ -57,14 +57,14 @@
p. 200
}
\examples{
-data(massart97ex3)
-attach(massart97ex3)
-yx <- split(y,factor(x))
-s <- round(sapply(yx,sd),digits=2)
-w <- round(1/(s^2),digits=3)
-weights <- w[factor(x)]
-m <- lm(y ~ x,w=weights)
+ data(massart97ex3)
+ attach(massart97ex3)
+ yx <- split(y,factor(x))
+ s <- round(sapply(yx,sd),digits=2)
+ w <- round(1/(s^2),digits=3)
+ weights <- w[factor(x)]
+ m <- lm(y ~ x,w=weights)
-inverse.predict(m,15,ws = 1.67)
+ inverse.predict(m,15,ws = 1.67)
}
\keyword{manip}
diff --git a/man/lod.Rd b/man/lod.Rd
index e6ce345..15f9603 100644
--- a/man/lod.Rd
+++ b/man/lod.Rd
@@ -3,14 +3,9 @@
\alias{lod.lm}
\alias{lod.rlm}
\alias{lod.default}
-\alias{loq}
-\alias{loq.lm}
-\alias{loq.rlm}
-\alias{loq.default}
-\title{Estimate a limit of detection (LOD) or quantification (LOQ)}
+\title{Estimate a limit of detection (LOD)}
\usage{
- lod(object, \dots, alpha = 0.05, k = 1, n = 1, w = "auto")
- loq(object, \dots, alpha = 0.05, k = 3, n = 1, w = "auto")
+ lod(object, \dots, alpha = 0.05, beta = 0.05)
}
\arguments{
\item{object}{
@@ -19,40 +14,42 @@
with model formula \code{y ~ x} or \code{y ~ x - 1},
optionally from a weighted regression.
}
- \item{alpha}{
- The error tolerance for the prediction of x values in the calculation.
- }
\item{\dots}{
Placeholder for further arguments that might be needed by
future implementations.
}
- \item{k}{
- The inverse of the maximum relative error tolerated at the
- desired LOD/LOQ.
- }
- \item{n}{
- The number of replicate measurements for which the LOD/LOQ should be
- specified.
+ \item{alpha}{
+ The error tolerance for the decision limit (critical value).
}
- \item{w}{
- The weight that should be attributed to the LOD/LOQ. Defaults
- to one for unweighted regression, and to the mean of the weights
- for weighted regression. See \code{\link{massart97ex3}} for
- an example how to take advantage of knowledge about the variance function.
+ \item{beta}{
+ The error tolerance beta for the detection limit.
}
}
\value{
- The estimated limit of detection for a model used for calibration.
-}
+ A list containig the corresponding x and y values of the estimated limit of
+ detection of a model used for calibration. }
\description{
- A useful operationalisation of a lower limit L of a measurement method is
- simply the solution of the equation
- \deqn{L = k c(L)}{L = k * c(L)}
- where c(L) is half of the lenght of the confidence interval at the limit L.
+ The decision limit (German: Nachweisgrenze) is defined as the signal or
+ analyte concentration that is significantly different from the blank signal
+ with a first order error alpha (one-sided significance test).
+ The detection limit, or more precise, the minimum detectable value
+ (German: Erfassungsgrenze), is then defined as the signal or analyte
+ concentration where the probability that the signal is not detected although
+ the analyte is present (type II or false negative error), is beta (also a
+ one-sided significance test).
+}
+\references{
+ J. Inczedy, T. Lengyel, and A.M. Ure (2002) International Union of Pure and
+ Applied Chemistry Compendium of Analytical Nomenclature: Definitive Rules.
+ Web edition.
}
\examples{
data(din32645)
m <- lm(y ~ x, data = din32645)
- lod(m)
+ # The decision limit (critical value) is obtained by using beta = 0.5:
+ lod(m, alpha = 0.01, beta = 0.5) # approx. Nachweisgrenze in Dintest 2002
+ lod(m, alpha = 0.01, beta = 0.01)
+ # In the latter case (Erfassungsgrenze), we get a slight deviation from
+ # Dintest 2002 test data.
}
\keyword{manip}
diff --git a/man/loq.Rd b/man/loq.Rd
new file mode 100644
index 0000000..1030399
--- /dev/null
+++ b/man/loq.Rd
@@ -0,0 +1,76 @@
+\name{loq}
+\alias{loq}
+\alias{loq.lm}
+\alias{loq.rlm}
+\alias{loq.default}
+\title{Estimate a limit of quantification (LOQ)}
+\usage{
+ loq(object, \dots, alpha = 0.05, k = 3, n = 1, w = "auto")
+}
+\arguments{
+ \item{object}{
+ A univariate model object of class \code{\link{lm}} or
+ \code{\link[MASS:rlm]{rlm}}
+ with model formula \code{y ~ x} or \code{y ~ x - 1},
+ optionally from a weighted regression.
+ }
+ \item{alpha}{
+ The error tolerance for the prediction of x values in the calculation.
+ }
+ \item{\dots}{
+ Placeholder for further arguments that might be needed by
+ future implementations.
+ }
+ \item{k}{
+ The inverse of the maximum relative error tolerated at the
+ desired LOQ.
+ }
+ \item{n}{
+ The number of replicate measurements for which the LOQ should be
+ specified.
+ }
+ \item{w}{
+ The weight that should be attributed to the LOQ. Defaults
+ to one for unweighted regression, and to the mean of the weights
+ for weighted regression. See \code{\link{massart97ex3}} for
+ an example how to take advantage of knowledge about the
+ variance function.
+ }
+}
+\value{
+ The estimated limit of quantification for a model used for calibration.
+}
+\description{
+ A useful operationalisation of a limit of quantification is simply the
+ solution of the equation
+ \deqn{L = k c(L)}{L = k * c(L)}
+ where c(L) is half of the length of the confidence interval at the limit L as
+ estimated by \code{\link{inverse.predict}}. By virtue of this formula, the
+ limit of detection is the x value, where the relative error
+ of the quantification with the given calibration model is 1/k.
+}
+\examples{
+ data(massart97ex3)
+ attach(massart97ex3)
+ m0 <- lm(y ~ x)
+ loq(m0)
+
+ # Now we use a weighted regression
+ yx <- split(y,factor(x))
+ s <- round(sapply(yx,sd),digits=2)
+ w <- round(1/(s^2),digits=3)
+ weights <- w[factor(x)]
+ mw <- lm(y ~ x,w=weights)
+ loq(mw)
+
+ # In order to define the weight at the loq, we can use
+ # the variance function 1/y for the model
+ mwy <- lm(y ~ x, w = 1/y)
+
+ # Let's do this with one iteration only
+ loq(mwy, w = 1 / predict(mwy,list(x = loq(mwy))))
+
+ # We can get better by doing replicate measurements
+ loq(mwy, n = 3, w = 1 / predict(mwy,list(x = loq(mwy))))
+}
+\keyword{manip}

Contact - Imprint