[R] How to use mle or similar with integrate?

Spencer Graves spencer.graves at pdf.com
Sat Jun 24 21:50:20 CEST 2006


	  Could you use 'optim' directly?  This won't return a nice object of 
class 'mle', but it will give you parameter estimates.

	  If you want an object of class 'mle', have you worked through the 
examples in the help file?

	  Also, are your arguments "a" and "b" are scalars?  If yes, did you 
try passing them to your 'minuslogl' function in the 'fixed' argument?

	  If this won't work for you, you can try the functions 'mle.' and 
'profile.mle' below.  If I'm not mistaken, the standard 'mle' requires 
the 'fixed' argument to be a named list of scalars.  I don't remember 
now for sure, but I think the 'mle.' function below differs from the 
standard 'mle' only in allowing 'fixed' arguments of length greater than 
1 and then dropping them from operations that require scalars.  My 
'profile.mle' is modified from the function in 'stats4' to make error 
termination less likely, at least for the applications I used.

	  Hope this helps.
	  Spencer Graves
p.s.  I thought about sending this to someone, but the "Maintainer" for 
the "stats4" package is the "R Core Team <R-core at r-project.org>", and I 
decided not to bother that whole group.

#######################################
mle. <-
function(minuslogl, start = formals(minuslogl), method = "BFGS",
     fixed = list(), ...)
{
     call <- match.call()
     n <- names(fixed)
     fullcoef <- formals(minuslogl)
     if (any(!n %in% names(fullcoef)))stop(
        "some named arguments in 'fixed' are not arguments to the 
supplied log-likelihood")
     fullcoef[n] <- fixed
     if (!missing(start) && (!is.list(start) || is.null(names(start))))
         stop("'start' must be a named list")
     start[n] <- NULL
     start <- sapply(start, eval.parent)
     nm <- names(start)
     oo <- match(nm, names(fullcoef))
     if (any(is.na(oo)))
         stop("some named arguments in 'start' are not arguments to the 
supplied log-likelihood")
     start <- start[order(oo)]
     f <- function(p) {
         l <- as.list(p)
         names(l) <- nm
         l[n] <- fixed
         do.call("minuslogl", l)
     }
     dots <- list(...)
     oout <- optim(start, f, method = method, hessian = TRUE,
         ...)
     oout$fixed <- fixed
     oout$dots <- list(...)
     coef <- oout$par
     vcov <- if (length(coef))
         solve(oout$hessian)
     else matrix(numeric(0), 0, 0)
     min <- oout$value
     fcoef <- fullcoef
     fcoef[nm] <- coef
     len.coef <- sapply(fcoef, length)
#   Drop "fixed" arguments of length != 1 as they
#   almost certainly relate arguments of "minuslogl"
#   that are NOT parameters to be estimated.
     f.coef <- fcoef[len.coef==1]
     newMLE <- new("mle", call = call, coef = coef, fullcoef = 
unlist(f.coef),
         vcov = vcov, min = min, details = oout, minuslogl = minuslogl,
         method = method)

}

#########################
#getMethods("profile", "package:stats4")

profile.mle <- function (fitted, ...){
     .local <- function (fitted, which = 1:p, maxsteps = 100,
         alpha = 0.01, zmax = sqrt(qchisq(1 - alpha/2, p)), del = zmax/5,
         trace = FALSE, ...)
     {
       onestep <- function(step, call, fixed) {
             bi <- B0[i] + sgn * step * del * std.err[i]
#           Additional fix for profile
#           Get original fix
             {
               if(is.list(fixed) && (length(fixed)>0)){
                 fixed[[pi]] <- NULL
                 k. <- length(fixed)
                 fix2 <- vector(k.+1, mode="list")
                 names(fix2) <- c(names(fixed), pi)
                 fix2[1:k.] <- fixed
                 fix2[k.+1] <- bi
               }
               else{
                 fix2 <- list(bi)
                 names(fix2) <- pi
               }
             }
             call$fixed <- fix2
             pfit <- try(eval.parent(call, 2), silent = TRUE)
             if (inherits(pfit, "try-error"))
                 return(NA)
             else {
                 zz <- 2 * (pfit at min - fitted at min)
                 ri <- pv0
                 ri[, names(pfit at coef)] <- pfit at coef
                 ri[, pi] <- bi
                 if(zz<=0){
                   msg0 <- paste("Profiling has found a better ",
                       "solution (log(LR) = ", zz, " <0), so the ",
                       "original fit had not converged:  ",
                       "This will be ignored for now, but try ",
                       "mle(..., start=list(", sep="")
                   pars <- paste(Pnames, coef(pfit), sep="=")
                   pars. <- paste(pars, collapse=", ")
                   msg1 <- paste(msg0, pars., ")).\n", sep="")
                   warning(msg1)
                 }
                 z0 <- max(zz, 0)
                 z <- sgn * sqrt(z0)
                 pvi <<- rbind(pvi, ri)
                 zi <<- c(zi, z)
               }
             if (trace)
                 cat(bi, z, "\n")
########### end subfunction onestep ##########
             list(z=z, zfit=pfit)
         }
       betterSum <- function(fit){
         if((!inherits(fit, "try-error")) && (class(fit)=="mle")){
           fitMin <- fit at min
           if(!is.numeric(fitMin)){
             warn("'mle' returned 'min' of class ", class(fitMin),
                  ";  must be numeric.")
             return(B1.)
           }
           if((nMin <- length(fitMin))!=1){
             warn("'mle' returned 'min' of length ", nMin,
                  ";  must be 1")
             return(B1.)
           }
           if(fitMin<B1.$minuslogl)
             return(list(coef=fit at fullcoef[Pnames],minuslogl=fit at min,
                nImprovementsFound=B1.$nImprovementsFound+1))
         }
########### end subfunction betterSum ##########
         B1.
       }
         summ <- summary(fitted)
         std.err <- summ at coef[, "Std. Error"]
         Pnames <- names(B0 <- fitted at coef)
         pv0 <- t(as.matrix(B0))
         B0. <- list(coef=B0, minuslogl=fitted at min)
         B1. <- list(coef=B0, minuslogl=fitted at min,
              nImprovementsFound=0)
         p <- length(Pnames)
         prof <- vector("list", length = length(which))
         names(prof) <- Pnames[which]
         call <- fitted at call
         call$minuslogl <- fitted at minuslogl
         call$method <- fitted at method
         ndeps <- eval.parent(call$control$ndeps)
         parscale <- eval.parent(call$control$parscale)
         fixed <- fitted at details$fixed
         dots <- fitted at details$dots
         if(length(dots)>0)
           for(i.. in names(dots))
             call[[i..]] <- dots[[i..]]
         for (i in which) {
             zi <- 0
             pvi <- pv0
             pi <- Pnames[i]
             if (!is.null(ndeps))
                 call$control$ndeps <- ndeps[-i]
             if (!is.null(parscale))
                 call$control$parscale <- parscale[-i]
             for (sgn in c(-1, 1)) {
                 if (trace)
                   cat("\nParameter:", pi, c("down", "up")[(sgn +
                     1)/2 + 1], "\n")
                 step <- 0
                 z <- 0
                 call$start <- as.list(B0)
                 lastz <- 0
                 while ((step <- step + 1) < maxsteps && abs(z) <
                   zmax) {
                   z. <- onestep(step, call, fixed)
                   B1. <- betterSum(z.$zfit)
                   z <- z.$z
                   if (is.na(z))
                     break
                   lastz <- z
                 }
                 if (abs(lastz) < zmax) {
                   for (dstep in c(0.2, 0.4, 0.6, 0.8, 0.9)) {
                     z. <- onestep(step-1+dstep, call, fixed)
                     B1. <- betterSum(z.$zfit)
                     z <- z.$z
                     if (is.na(z) || abs(z) > zmax)
                       break
                   }
                 }
                 else if (length(zi) < 5) {
                   mxstep <- step - 1
                   step <- 0.5
                   while ((step <- step + 1) < mxstep){
                        z. <- onestep(step, call, fixed)
                        B1. <- betterSum(z.$zfit)
#                      z <- z.$z
                      }
                 }
             }
             si <- order(pvi[, i])
             prof[[pi]] <- data.frame(z = zi[si])
             prof[[pi]]$par.vals <- pvi[si, , drop = FALSE]
         }
         new("profile.mle", profile = prof, summary = summ)
     }
     .local(fitted, ...)
}
###################################################
# mle examples:
      x <- 0:10
      y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8)
      ll <- function(ymax=15, xhalf=6)
          -sum(stats::dpois(y, lambda=ymax/(1+x/xhalf), log=TRUE))
(fit <- mle(ll))
      (fit. <- mle.(ll))
      (fit.5 <- mle.(ll, fixed=list(xhalf=6)))

      summary(fit)
      summary(fit.)
      logLik(fit)
      logLik(fit.)
      vcov(fit)
      vcov(fit.)
      plot(profile(fit), absVal=FALSE)
      plot(prof. <- profile.mle(fit.), absVal=FALSE)
      confint(fit)
      confint(prof.)

      ## use bounded optimization
      ## the lower bounds are really > 0, but we use >=0 to stress-test 
profiling
      (fit1 <- mle(ll, method="L-BFGS-B", lower=c(0, 0)))
      (fit1. <- mle.(ll, method="L-BFGS-B", lower=c(0, 0)))
      plot(profile(fit1), absVal=FALSE)
      plot(profile.mle(fit1), absVal=FALSE)

      ## a better parametrization:
      ll2 <- function(lymax=log(15), lxhalf=log(6))
          -sum(stats::dpois(y, lambda=exp(lymax)/(1+x/exp(lxhalf)), 
log=TRUE))
      (fit2 <- mle(ll2))
      (fit2. <- mle.(ll2))
      plot(profile(fit2), absVal=FALSE)
      plot(prof2 <- profile.mle(fit2), absVal=FALSE)
      exp(confint(fit2))
      exp(confint(prof2))

      ll. <- function(ymax=15, xhalf=6, x, y)
          -sum(stats::dpois(y, lambda=ymax/(1+x/xhalf), log=TRUE))

fit <- mle(ll)
fit. <- mle.(ll)
all.equal(fit, fit.)

prof <- profile(fit)
plot(prof)
with(prof at profile$xhalf,
      plot(par.vals[,2], z, type="l"))
with(prof at profile$xhalf,
      plot(par.vals[,2], abs(z), type="l"))
with(prof at profile$xhalf,
      plot(par.vals[,1], abs(z), type="l"))

undebug(profile.mle)
prof. <- profile.mle(fit.)
plot(prof.)

(fit.xy <- mle.(ll., start=list(ymax=20, xhalf=3),
             fixed=list(x=x, y=y)))
(prof.fit <- profile.mle(fit.xy))
plot(prof.fit)

logLik(fit.xy)
logLik(fit)
logLik(fit.)

mle2 <- function(x, y){
   mle.(ll., start=list(ymax=20, xhalf=3),
        fixed=list(x=x, y=y))
}
fit2 <- mle2(x, y)
prof2 <- profile.mle(fit2)
plot(prof2)
confint(prof2)

mle3 <- function(x, y){
   do.call('mle.',
     list(ll., start=list(ymax=20, xhalf=3),
         fixed=list(x=x, y=y)))
}
fit3 <- mle3(x, y)
prof3 <- profile.mle(fit3)
confint(prof3)
plot(prof3)

mle4 <- function(x, y){
   eval(mle.(ll., start=list(ymax=20, xhalf=3),
         fixed=list(x=x, y=y)))
}
fit4 <- mle4(x, y)
prof4 <- profile.mle(fit4)
confint(prof4)

mle5 <- function(x, y){
   mle.call <- paste(
      "mle.(ll.,",
      "start=list(ymax=20, xhalf=3),",
      "fixed=list(x=x, y=y))")
   mle.parsed <- parse(text=mle.call)
   eval(mle.parsed)
}

debug(mle5)
fit5 <- mle5(x, y)
prof5 <- profile.mle(fit5)
confint(prof5)
plot(prof5)

#######################################
Rainer M Krug wrote:
> Hi
> 
> I have the following formula (I hope it is clear - if no, I can try to
> do better the next time)
> 
> h(x, a, b) =
> integral(0 to pi/2)
> (
>   (
>     integral(D/sin(alpha) to Inf)
>     (
>       (
>         f(x, a, b)
>       )
>       dx
>     )
>   dalpha
> )
> 
> and I want to do an mle with it.
> I know how to use mle() and I also know about integrate(). My problem is
> to give the parameter values a and b to the integrate function.
> 
> In other words, how can I write
> 
> h <- function...
> 
> so that I can estimate a and b?
> 
> Thanks,
> 
> Rainer
> 
>



More information about the R-help mailing list