########################################################################################## ## Functions to fit space time models using mcglm approach ############################### ## Author: Wagner Hugo Bonat LEG/IMADA ################################################### ## Date: 26/03/2015 ###################################################################### ########################################################################################## ########################################################################################## ## Build a Voronoi tecelation ############################################################ ########################################################################################## voronoi <- function(coords.x1,coords.x2,poligono){ xy <- xy.coords(coords.x1,coords.x2) xy <- data.frame(x = xy$x, y = xy$y) boundary <- as(poligono, "gpc.poly") dummies <- data.frame(x = c(-1, -1, 1, 1), y = c(-1, 1, -1,1)) * 10 * max(abs(xy)) xy <- rbind(xy, dummies) vpolys <- voronoi.polygons(voronoi.mosaic(xy, duplicate = "remove")) vpolys <- lapply(vpolys, as, "gpc.poly") subpolys <- lapply(vpolys, intersect, boundary) mapa = append.poly(subpolys[[1]],subpolys[[2]]) for(i in 3:length(coords.x1)){ mapa = append.poly(mapa,subpolys[[i]])} return(subpolys)} ########################################################################################## ## Convert one object from grp class to sp class ######################################### ########################################################################################## grp2sp <- function(subpolys,ID){ lista = list() lista.pols2 <- list() for(i in 1:length(subpolys)){ obje = get.pts(subpolys[[i]]) obje = data.frame(obje[[1]]$x,obje[[1]]$y) names(obje) = c("x","y") substi = dim(obje)[1] obje[substi+1,]=obje[1,] lista[[1]] = Polygon(obje) lista.pols = Polygons(lista,ID= ID[i]) print(i) lista.pols2[[i]] = lista.pols } final = SpatialPolygons(lista.pols2) return(final)} ########################################################################################## ## Log link function ##################################################################### ########################################################################################## my.log <- function(beta, X, extra, derivative = TRUE){ saida <- list() Xbeta = X%*%beta if(is.null(extra)){saida[[1]] <- exp(Xbeta)} if(!is.null(extra)){saida[[1]] <- exp(Xbeta + log(extra))} n <- length(saida[[1]]) mu <- as.numeric(saida[[1]]) if(derivative == TRUE){saida[[2]] <- t(Diagonal(n,mu)%*%X)} saida[[3]] <- as.numeric(saida[[1]]) return(saida) } ########################################################################################## ## Generic score estimating function - Regression parameters ############################# ########################################################################################## quasi.score <- function(E.Y, Y, t.D, inv.C){ e1 = t.D%*%inv.C esc <- e1%*%(Y-E.Y) Sensi <- e1%*%t(t.D) saida <- list("Score" = esc, "S.beta" = -Sensi) return(saida) } ########################################################################################## ## Build a structure matrix for CAR models based on a list of neighbors ################### ########################################################################################## mat.espacial <- function(list.viz,ncol,nrow){ vizi <- Matrix(0,nrow=nrow,ncol=ncol, sparse = TRUE) diagonal <- c() for(i in 1:ncol){ diagonal[i] <- length(list.viz[[i]])} diag(vizi) <- diagonal for(i in 1:ncol){ vizi[i,c(list.viz[[i]])] <- -1} return(vizi)} ########################################################################################## ## Build a structure matrix of a first order random walk ################################# ########################################################################################## mat.temporal <- function(nrow,ncol){ R <- Matrix(0,nrow=nrow,ncol=ncol, sparse = TRUE) ## Restrições de borda R[1,c(1,2)] <- c(1,-1) R[ncol,c(ncol-1,ncol)] <- c(-1,1) ## Corpo da matriz n <- ncol-1 for(i in 2:n){ R[i,c(i-1,i,i+1)] <- c(-1,2,-1)} R <- as(R, "symmetricMatrix") return(R)} ########################################################################################## ## Inverse of square root of V and its derivative ######################################## ########################################################################################## inv.V.sqrt <- function(power,mu){ n.obs <- length(mu) sqrt.V <- Diagonal(n.obs, sqrt(1/mu^power)) D.sqrt.V <- Diagonal(n.obs, - ((mu^power)*log(mu))/ (2* (mu^power)^(3/2))) saida <- list("V.sqrt" = sqrt.V, "D.V.sqrt" = D.sqrt.V) return(saida) } ########################################################################################## ## Multiply each matrix for parameters and sum to get the Omega matrix ################### ########################################################################################## build.SXU <- function(par, XU){ if(class(XU) != "list"){XU <- list(XU)} #if(length(XU) != length(par)){print("Incorrect number of parameters")} saida <- mapply('*', XU, par, SIMPLIFY = FALSE) saida <- Reduce("+",saida) ## Sum all matrices inside the list saida <- as(saida,"symmetricMatrix") return(saida) } ########################################################################################## ## Invese of Omega matrix ############################################################### ########################################################################################## build.invOmega <- function(tau.vector, list.R){ inv.Omega <- build.SXU(par = tau.vector, XU = list.R) return(inv.Omega) } ########################################################################################## ## Sandwich matrices multiplication ###################################################### ########################################################################################## sandwich <- function(interno, externo, sign = "positive"){ if(sign == "positive"){saida <- externo%*%interno%*%externo} if(sign == "negative"){saida <- -(externo%*%interno%*%externo)} return(saida) } ########################################################################################## ## Inverse of C and weight matrices ###################################################### ########################################################################################## build.invC <- function(tau.vector, power, mu, list.R, power.fixed = FALSE){ invs.V <- inv.V.sqrt(power = power, mu = mu) #tau.vector <- exp(tau.vector) inv.Omega <- build.invOmega(tau.vector = tau.vector, list.R = list.R) #for(i in 1:length(tau.vector)){list.R[[i]] <- tau.vector[i]*list.R[[i]]} inv.C <- sandwich(interno = inv.Omega, externo = invs.V$V.sqrt, sign = "positive") W.tau <- lapply(list.R, sandwich, externo = invs.V$V.sqrt, sign = "negative") if(power.fixed == FALSE){ W.power <- -2*(invs.V$D.V.sqrt%*%inv.Omega%*%invs.V$V.sqrt) W.tau <- c(W.power,W.tau) } #diag(inv.C) <- diag(inv.C)+0.001 saida <- list("inv.C" = inv.C, "W" = W.tau) return(saida) } ########################################################################################## ## Core of Pearson estimating function ################################################### ########################################################################################## core.pearson.cholesky <- function(W, traco, res){ saida <- t(res)%*%W%*%res - traco return(saida) } core.pearson.inla <- function(W, res, C){ saida <- t(res)%*%W%*%res - sum(W*C) return(saida) } multiplica <- function(W,C){ saida <- W%*%C return(saida)} multiplica.trace <- function(W,C){ saida <- sum(W*C) return(saida)} traco <- function(M){ tr <- sum(diag(M)) return(tr)} build.Sensitive <- function(WC){ npar <- length(WC) Sensitive <- matrix(NA, ncol = npar, nrow = npar) for(i in 1:npar){ for(j in 1:npar){ Sensitive[i,j] <- -sum(WC[[i]]*WC[[j]])} } return(Sensitive) } ########################################################################################## ## Inverse using RcppArmadillo ########################################################### ########################################################################################## #src <- ' #arma::mat X = Rcpp::as(X_); #arma::mat saida = inv_sympd(X); # return(wrap(saida)); #' #mySolve <- cxxfunction(signature(X_="numeric"), body = src, plugin="RcppArmadillo") ########################################################################################## ## Pearson estimating function ########################################################### ########################################################################################## pearson.function <- function(tau.vector, power, mu, Y, list.R, power.fixed, strategy){ res <- Y - mu MAT <- build.invC(tau.vector = tau.vector, power = power, mu = mu, list.R = list.R, power.fixed = power.fixed) if(strategy == "chol1"){ chol.invC <- chol(MAT$inv.C) #s.C <- solve(chol.invC) #C <- s.C%*%s.C #C = solve(MAT$inv.C) C <- chol2inv(chol.invC) rm(chol.invC) gc(reset = TRUE) WC <- lapply(MAT$W, multiplica, C) tr.escore <- lapply(WC, traco) npar <- length(c(power,tau.vector)) Sensitive <- build.Sensitive(WC) escore <- c() for(i in 1:npar){ escore[i] <- as.numeric(core.pearson.cholesky(W = MAT$W[[i]], res = res, traco = tr.escore[[i]])) } Variability <- V.cov2(cov.parameter = c(power,tau.vector), mu = mu, MAT = MAT, C=C, Y = Y, power.fixed = FALSE) saida <- list("escore" = escore, "Sensitive" = Sensitive, "Variability" = Variability) } if(strategy == "inla"){ C <- inla.qinv(MAT$inv.C) escore <- lapply(MAT$W, core.pearson.inla, res = res, C=C) escore <- unlist(lapply(escore, as.numeric)) WC <- lapply(MAT$W, multiplica, C) Sensitive <- build.Sensitive(WC) Variability <- V.cov2(cov.parameter = c(power,tau.vector), mu = mu, MAT = MAT, C=C, Y = Y, power.fixed = FALSE) saida <- list("escore" = escore, "Sensitive" = Sensitive, "Variability" = Variability) } if(strategy == "simple"){ C <- inla.qinv(MAT$inv.C) pearson <- function(par, mu, list.R, power.fixed = FALSE){ tau.vector <- par[2:length(par)] power = par[1] MAT <- build.invC(tau.vector = tau.vector, power = power, mu = mu, list.R = list.R, power.fixed = power.fixed) C <- inla.qinv(MAT$inv.C) escore <- lapply(MAT$W, core.pearson.inla, res = res, C=C) escore <- unlist(lapply(escore, as.numeric)) return(escore) } escore <- pearson(par = c(power, tau.vector), mu = mu, list.R = list.R) Sensitive <- gradient(pearson, x = c(power,tau.vector), centered = TRUE, mu = mu, list.R = list.R) Variability <- V.cov3(cov.parameter = c(power,tau.vector), mu = mu, MAT = MAT, C=C, Sen = Sensitive, Y = Y, power.fixed = FALSE) saida <- list("escore" = escore, "Sensitive" = Sensitive, "Variability" = Variability) } if(strategy == "Richardson"){ C <- inla.qinv(MAT$inv.C) pearson <- function(par, mu, list.R, power.fixed = FALSE){ #print(par) tau.vector <- par[2:length(par)] power = par[1] MAT <- build.invC(tau.vector = tau.vector, power = power, mu = mu, list.R = list.R, power.fixed = power.fixed) C <- inla.qinv(MAT$inv.C) escore <- lapply(MAT$W, core.pearson.inla, res = res, C=C) escore <- unlist(lapply(escore, as.numeric)) return(escore) } escore <- pearson(par = c(power, tau.vector), mu = mu, list.R = list.R) Sensitive <- jacobian(pearson, x = c(power,tau.vector), method = "Richardson", mu = mu, list.R = list.R) Variability <- V.cov3(cov.parameter = c(power,tau.vector), mu = mu, MAT = MAT, C=C, Sen = Sensitive, Y = Y, power.fixed = FALSE) saida <- list("escore" = escore, "Sensitive" = Sensitive, "Variability" = Variability) } if(strategy == "empirical"){ C <- Y%*%t(Y) escore <- lapply(MAT$W, multiplica.trace, C=C) escore <- unlist(lapply(escore, as.numeric)) WC <- lapply(MAT$W, multiplica, C) Sensitive <- build.Sensitive(WC) Variability <- V.cov2(cov.parameter = c(power,tau.vector), mu = mu, MAT = MAT, C=C, Y = Y, power.fixed = FALSE) saida <- list("escore" = escore, "Sensitive" = Sensitive, "Variability" = Variability) } return(saida) } ########################################################################################## ## Chaser algorithm ###################################################################### ########################################################################################## chaser <- function(beta.ini, tau.vector, power, Y, X, list.R, tol=0.0001, max.iter = 20, extra, power.fixed = FALSE, strategy = "chol"){ cov.ini <- c(power,tau.vector) solucao.beta <- matrix(NA, max.iter,length(beta.ini)) solucao.cov <- matrix(NA, max.iter, length(cov.ini)) solucao.beta[1,] <- beta.ini solucao.cov[1,] <- cov.ini for(i in 2:max.iter){ mean.struc <- my.log(beta = beta.ini, X = X, extra = extra) MAT = build.invC(power = cov.ini[1], tau.vector = cov.ini[2:length(cov.ini)], mu = mean.struc[[1]], list.R = list.R, power.fixed = power.fixed) temp.invC <- MAT$inv.C beta.temp <- quasi.score(E.Y = mean.struc[[1]], Y = Y, t.D = mean.struc[[2]], inv.C = temp.invC) #print(sum(beta.temp$Score)) solucao.beta[i,] <- as.numeric(beta.ini - solve(beta.temp$S.beta, beta.temp$Score)) mean.struc <- my.log(beta = solucao.beta[i,], X = X, extra = extra) res = Y - mean.struc[[1]] cov.temp <- try(pearson.function(tau.vector = cov.ini[2:length(cov.ini)], power = cov.ini[1], mu = mean.struc[[1]], Y = Y, list.R = list.R, power.fixed = power.fixed, strategy = strategy)) solucao.cov[i,] <- as.numeric(cov.ini - solve(cov.temp$Sensitive, cov.temp$escore)) #print(round(cov.temp$escore,4)) beta.ini <- solucao.beta[i,] cov.ini <- solucao.cov[i,] print(round(cov.ini,3)) tolera <- abs(c(solucao.beta[i,],solucao.cov[i,]) - c(solucao.beta[i-1,],solucao.cov[i-1,])) if(all(tolera < tol) == TRUE)break } saida <<- list("IterationRegression" = solucao.beta, "IterarionCovariance" = solucao.cov, "Regression" = beta.ini, "Covariance" = cov.ini) return(saida) } ########################################################################################## ## Pearson escore ######################################################################## ########################################################################################## pearson.escore <- function(par, mu, Y, list.R, power.fixed, strategy){ escore <- pearson.function(tau.vector = par[2:length(par)], power = par[1], mu = mu, Y = Y, list.R = list.R, power.fixed = power.fixed, strategy = strategy)$escore return(as.numeric(escore)) } ########################################################################################## ## Variance-covariance matrix regression parameters ###################################### ########################################################################################## V.beta <- function(mean.struc, Y, MAT){ S.beta = quasi.score(E.Y = mean.struc[[1]], Y = Y, t.D = mean.struc[[2]], inv.C = MAT$inv.C)$S.beta V.b <- -S.beta return(V.b) } ########################################################################################## ## Variance-covariance matrix covariance parameters ###################################### ########################################################################################## V.cov <- function(cov.parameter, mu, list.R, Y, power.fixed = FALSE, strategy){ MAT <- build.invC(tau.vector = cov.parameter[2:length(cov.parameter)], power = cov.parameter[1], mu = mu, list.R = list.R, power.fixed = power.fixed) C <- inla.qinv(MAT$inv.C) if(strategy == "chol1"){C <- chol2inv(chol(MAT$inv.C))} k4 <- (Y - mu)^4 - 3*diag(C)^2 n.par <- length(cov.parameter) Var.cov <- matrix(NA, ncol = n.par, nrow = n.par) for(i in 1:n.par){ for(j in 1:n.par){ W1 <- MAT$W[[i]] W2 <- MAT$W[[j]] T1 <- W1%*%C T2 <- W2%*%C Var.cov[i,j] <- 2*sum(T1*T2) + sum(k4*diag(W1)*diag(W2)) } } return(Var.cov) } ########################################################################################## ## Auxiliary function to compute thirth moment ########################################### ########################################################################################## covprod <- function(A, res, W){ res =as.numeric(res) saida <- (res%*%W%*%res)%*%(t(res)%*%A) return(as.numeric(saida)) } ######################################################################################### ## Cross covariance matrix between regression and covariance parameters ################# ########################################################################################## V.c.b <- function(mean.struc, MAT, res){ n.beta <- dim(mean.struc[[2]])[1] n.cov <- length(MAT$W) Wlist <- MAT$W A = mean.struc[[2]]%*%MAT$inv.C n.beta <- dim(A)[1] mat.V.c.b <- matrix(NA, ncol = n.cov, nrow = n.beta) for(j in 1:n.beta){ for(i in 1:n.cov){ mat.V.c.b[j,i] <- covprod(A[j,], Wlist[[i]], r = res) } } return(mat.V.c.b) } ########################################################################################## ## Joint variance-covariance matrix ###################################################### ########################################################################################## VarCov <- function(mean.struc, cov.parameter, list.R, MAT, Y, strategy){ V.b <- V.beta(mean.struc = mean.struc, Y = Y, MAT = MAT) V.c <- V.cov(cov.parameter, mu = mean.struc[[1]], Y = Y, list.R = list.R, strategy = strategy) res = Y - mean.struc[[1]] V.cross <- V.c.b(mean.struc = mean.struc, MAT = MAT, res = res) p1 <- rbind(as.matrix(V.b), as.matrix(t(V.cross))) p2 <- rbind(as.matrix(V.cross), as.matrix(t(V.c))) COV.mat <- cbind(p1,p2) return(COV.mat) } ########################################################################################## ## Cross sensitivity matrix ############################################################## ########################################################################################## S.cov.beta <- function(beta, cov.parameter, mean.struc, X, list.R, MAT){ D.beta = D.C.beta(beta = beta, cov.parameter = cov.parameter, mu = mean.struc[[1]], X = X, list.R = list.R) n.beta = length(beta) n.cov <- length(cov.parameter) S.c.b <- matrix(NA, ncol = n.beta, nrow = n.cov) for(i in 1:n.cov){ for(j in 1:n.beta){ S.c.b[i,j] <- -sum(MAT$W[[i]]*D.beta[[j]]) } } return(S.c.b) } ########################################################################################## ## Derivative of V{-1/2} with respect beta ############################################### ########################################################################################## D.V.beta <- function(beta, power.hat, X){ saida <- list() N <- dim(X)[1] for(i in 1:length(beta)){ p1 <- exp(-((X%*%beta)*power.hat)/2) saida[[i]] <- Diagonal(N, -power.hat*X[,i]*p1)} return(saida) } ########################################################################################## ## Derivative of C with respect to beta ################################################## ########################################################################################## D.C.beta <- function(beta,cov.parameter, mu, X, list.R){ invs.V <- inv.V.sqrt(power = cov.parameter[1], mu = mu) inv.Omega <- build.invOmega(tau.vector = cov.parameter[2:length(cov.parameter)], list.R = list.R) D.invV.beta <- D.V.beta(beta = beta, power.hat = cov.parameter[1], X = X) invC <- invs.V$V.sqrt%*%inv.Omega%*%invs.V$V.sqrt C <- inla.qinv(invC) D.invC.beta <- list() n.par <- length(beta) for(i in 1:n.par){ D.invC.beta[[i]] <- D.invV.beta[[i]]%*%inv.Omega%*%invs.V$V.sqrt + invs.V$V.sqrt%*%inv.Omega%*%D.invV.beta[[i]]} D.C.beta2 <- list() for(i in 1:n.par){ D.C.beta2[[i]] <- -C%*%D.invC.beta[[i]]%*%C } return(D.C.beta2) } ########################################################################################## ## Joint sensitivity matrix ############################################################## ########################################################################################## inv.joint.sensitivity <- function(beta, cov.parameter, Y, list.R, mean.struc, MAT, strategy){ inv.S.beta <- solve(quasi.score(E.Y = mean.struc[[1]] , Y = Y, t.D = mean.struc[[2]], inv.C = MAT$inv.C)$S.beta) res <- Y - mean.struc[[1]] inv.S.cov = solve(pearson.function(tau.vector = cov.parameter[2:length(cov.parameter)], power = cov.parameter[1], mu = mean.struc[[1]], Y = as.numeric(Y), list.R = list.R, power.fixed = FALSE, strategy = strategy)$Sensitive) S.c.b = S.cov.beta(beta = beta, cov.parameter = cov.parameter, mean.struc = mean.struc, X = X, list.R = list.R, MAT = MAT) tt <- dim(S.c.b) mat0 <- matrix(0, ncol = tt[1], nrow = tt[2]) cross.term <- -inv.S.cov%*%S.c.b%*%inv.S.beta p1 <- rbind(as.matrix(inv.S.beta),as.matrix(cross.term)) p2 <- rbind(mat0, as.matrix(inv.S.cov)) S.j <- cbind(p1,p2) return(as.matrix(S.j)) } vcov.cglm <- function(saida.model, X, Y, list.R, extra, strategy){ mean.struc <- my.log(beta = saida.model$Regression, X = X, extra = extra) n.cov <- length(saida.model$Covariance) MAT <- build.invC(tau.vector = saida.model$Covariance[2:n.cov], power = saida.model$Covariance[1], mu = mean.struc[[1]], list.R = list.R) Variability <- VarCov(mean.struc = mean.struc, cov.parameter = saida.model$Covariance, list.R = list.R, MAT = MAT, Y = Y, strategy = strategy) Sensibility <- inv.joint.sensitivity(beta = saida.model$Regression, cov.parameter = saida.model$Covariance, Y = Y, list.R = list.R, mean.struc = mean.struc, MAT = MAT, strategy = strategy) VCOV <- Sensibility%*%Variability%*%t(Sensibility) return(VCOV) } summary.cglm <- function(saida.model, X, Y, list.R, extra, strategy){ VCOV <<- vcov.cglm(saida.model = saida.model, X = X, Y = Y, list.R = list.R, extra = extra, strategy = strategy) std.error <- sqrt(diag(VCOV)) n.beta <- length(saida.model$Regression) Regression <- round(data.frame("Estimates" = saida.model$Regression, "Std.error" = std.error[1:n.beta], "Z.value" = saida.model$Regression/std.error[1:n.beta]),4) n.tot <- length(c(saida.model$Regression,saida.model$Covariance)) Covariance <- round(data.frame("Estimates" = saida.model$Covariance, "Std.error" = std.error[c(n.beta+1):n.tot], "Z.value" = saida.model$Covariance/std.error[c(n.beta+1):n.tot]),4) saida <- list("Regression" = Regression, "Covariance" = Covariance) return(saida) } ########################################################################################## ## Initial values ######################################################################## ########################################################################################## mcglm.initial <- function(Y, X, list.R, extra){ fit <- glm(Y ~ X, family = quasi(link = "log", variance = "mu")) if(class(extra) == "numeric"){fit <- glm(Y ~ X-1, family = quasi(link = "log", variance = "mu"), offset = log(extra))} mean.struc <- my.log(beta = coef(fit), X = X, extra = extra) mu <- mean.struc[[1]] n.tau <- length(list.R) MAT <- build.invC(tau.vector = rep(1, n.tau) , power = 1, mu = mu, list.R = list.R, power.fixed = power.fixed) temp <- pearson.function(tau.vector = rep(1, n.tau), power = 1, Y = Y, mu = mu, list.R = list.R, power.fixed = FALSE, strategy = "empirical") } pearson.function2 <- function(tau.vector, power, mu, Y, list.R, power.fixed, strategy){ res <- Y - mu MAT <- build.invC(tau.vector = tau.vector, power = power, mu = mu, list.R = list.R, power.fixed = power.fixed) if(strategy == "inla"){ C <- inla.qinv(MAT$inv.C) escore <- lapply(MAT$W, core.pearson.inla, res = res, C=C) escore <- unlist(lapply(escore, as.numeric)) #WC <- lapply(MAT$W, multiplica, C) #Sensitive <- build.Sensitive(WC) #Variability <- V.cov2(cov.parameter = c(power,tau.vector), mu = mu, MAT = MAT, C=C, Y = Y, power.fixed = FALSE) saida <- list("escore" = escore)} return(saida) } ########################################################################################## ## Pearson residual ###################################################################### ########################################################################################## residuals.cglm <- function(model, X, Y, list.R){ mu <- my.log(beta = model$Regression$Estimates , X = X, extra = NULL) eta <- X%*%model$Regression$Estimates cov.parameter <- model$Covariance$Estimates MAT <- build.invC(tau.vector = cov.parameter[2:length(cov.parameter)], power = cov.parameter[1], mu = mu[[1]], list.R = list.R, power.fixed = FALSE) C <- inla.qinv(MAT$inv.C) ordinario <- Y- mu[[1]] res.pearson <- ordinario/sqrt(diag(C)) saida <- data.frame("Fitted" = mu[[1]], "Residuals" = res.pearson, "Eta" = eta) return(saida) } ########################################################################################## ## Plot cglm ############################################################################# ########################################################################################## plot.cglm <- function(model, X, Y, list.R){ res = residuals.cglm(model = model, X = X, Y = Y, list.R = list.R) par(mfrow = c(2,2)) plot(res$Residuals ~ res$Eta, xlab = "Predict values", ylab = "Residuals values") qqnorm(res$Residuals,asp=1, ylab = "Pearson residuals") qqline(res$Residuals) plot(res$Residuals ~ Y, xlab = "Observed values", ylab = "Residuals values") plot(Y ~ res$Fitted, ylab = "Observed values", xlab = "Fitted values") } ########################################################################################## ## Generic function to fit cGLM ########################################################## ########################################################################################## cglm <- function(formula, data, tau.ini, power.ini, list.R, strategy,max.iter = 100, lambda){ X <- model.matrix(formula, data = data) mod <- model.frame(formula, data = data) Y = model.response(mod) beta.ini <- coef(glm(formula, family = quasi(link = "log", variance = "mu"), data=data)) modelo <- chaser.rc(beta.ini = beta.ini, tau.vector = tau.ini, power = power.ini, Y = Y, X = X, list.R = list.R, extra = NULL, strategy = strategy, max.iter = max.iter, lambda = lambda) mod.summary <- summary.cglm(saida.model = modelo, X = X, Y = Y, list.R = list.R, extra = NULL,strategy = strategy) mu <- my.log(beta = modelo$Regression, X = X, extra = NULL) esc <- pearson.function(tau.vector = modelo$Covariance[2:length(modelo$Covariance)], power = modelo$Covariance[1], mu = mu[[1]], Y = Y, list.R = list.R, power.fixed =FALSE,strategy = strategy) saida <- list("Diagnostic" = esc, "Model" = mod.summary) return(saida) } ########################################################################################## ## RC algorithm ########################################################################## ########################################################################################## chaser.rc <- function(beta.ini, tau.vector, power, Y, X, list.R, tol=0.0001, max.iter = 20, extra, power.fixed = FALSE, strategy = "chol", lambda = 1){ cov.ini <- c(power,tau.vector) solucao.beta <- matrix(NA, max.iter,length(beta.ini)) solucao.cov <- matrix(NA, max.iter, length(cov.ini)) solucao.beta[1,] <- beta.ini solucao.cov[1,] <- cov.ini for(i in 2:max.iter){ mean.struc <- my.log(beta = beta.ini, X = X, extra = extra) MAT = build.invC(power = cov.ini[1], tau.vector = cov.ini[2:length(cov.ini)], mu = mean.struc[[1]], list.R = list.R, power.fixed = power.fixed) beta.temp <- quasi.score(E.Y = mean.struc[[1]], Y = Y, t.D = mean.struc[[2]], inv.C = MAT$inv.C) solucao.beta[i,] <- as.numeric(beta.ini - solve(beta.temp$S.beta, beta.temp$Score)) mean.struc <- my.log(beta = solucao.beta[i,], X = X, extra = extra) res = Y - mean.struc[[1]] saida1 <- pearson.function(tau.vector = cov.ini[2:length(cov.ini)], power = cov.ini[1], mu = mean.struc[[1]], Y = Y, list.R = list.R, power.fixed = power.fixed, strategy = strategy) if(lambda == "empirical"){tunning <- as.numeric(1/((t(saida1$escore)%*%saida1$escore)/length(cov.ini)))} if(class(lambda) != "character"){tunning = lambda} for(j in 1:100){ step <- solve(tunning*saida1$escore%*%t(saida1$escore)%*%solve(saida1$Variability)%*%saida1$Sensitive + saida1$Sensitive)%*%saida1$escore prox <- cov.ini - step temp.num <- try(pearson.function(tau.vector = prox[2:length(prox)], power = prox[1], mu = mean.struc[[1]], Y = Y, list.R = list.R, power.fixed = power.fixed, strategy = strategy)) if(class(temp.num) != "try-error"){ cov.ini <- prox break } #step[which.max(abs(step))] <- 0.5*step[which.max(abs(step))] tunning <- tunning + 0.01 } #print(i) print(saida1$escore) #print(saida1$escore) cov.ini <- as.numeric(prox) beta.ini <- solucao.beta[i,] solucao.cov[i,] <- cov.ini print(round(cov.ini,3)) tolera <- abs(c(solucao.beta[i,],solucao.cov[i,]) - c(solucao.beta[i-1,],solucao.cov[i-1,])) if(all(tolera < tol) == TRUE)break } saida <<- list("IterationRegression" = solucao.beta, "IterarionCovariance" = solucao.cov, "Regression" = beta.ini, "Covariance" = cov.ini) return(saida) } ########################################################################################## ## Variance-covariance matrix covariance parameters ###################################### ########################################################################################## V.cov2 <- function(cov.parameter, mu, MAT, C, Y, power.fixed = FALSE){ k4 <- (Y - mu)^4 - 3*diag(C)^2 n.par <- length(cov.parameter) Var.cov <- matrix(NA, ncol = n.par, nrow = n.par) for(i in 1:n.par){ for(j in 1:n.par){ W1 <- MAT$W[[i]] W2 <- MAT$W[[j]] T1 <- W1%*%C T2 <- W2%*%C Var.cov[i,j] <- 2*sum(T1*T2) + sum(k4*diag(W1)*diag(W2)) } } return(Var.cov) } V.cov3 <- function(cov.parameter, mu, MAT, C, Y, Sen, power.fixed = FALSE){ k4 <- (Y - mu)^4 - 3*diag(C)^2 n.par <- length(cov.parameter) Var.cov <- matrix(NA, ncol = n.par, nrow = n.par) for(i in 1:n.par){ for(j in 1:n.par){ W1 <- MAT$W[[i]] W2 <- MAT$W[[j]] Var.cov[i,j] <- -2*Sen[i,j] + sum(k4*diag(W1)*diag(W2)) } } return(Var.cov) }