source("gensimu.r") source("suppIT.r") source("suppITP1new.r") library(penalizedSVM) library(svmpath) library(lars) library(glmnet) library(gcdnet) ##example exnum=9 s=7 run = 50 #number of replicates n = 400 #training size d = 20000 d0 = 12 set.seed(2010) impcoef = runif(d0,0,0.5) #notation impid = 1:d0 ntune = n #tuning size ntest = 20*n ntot = n+ntune+ntest #M = 50 # number of iterations #Make M = n/epsilon + 1 M = (1000*n)+1 ind = 1 # 1 is small KK, 2 is large KK #tuning parameter range lam1 = 5^seq(-5,5,0.2) lam2 = 0 ## solution ## sum1: (beta, beta0) ## sum2: selection frequency ## sum3: (testerr, type I error, type II error, comp time) ## selected index #itLsum1 = matrix(0,run,d+1) #ITL programming (iteratively update beta0 with lasso penalty, lam2=0) #itLsum2 = matrix(0,run,d) #itLsum3 = matrix(0,run,4) # it2Lsum1brad = matrix(0,run,d+1) #IT2L programming (do not iteratively update beta0 with lasso penalty, lam2=0) # it2Lsum2brad = matrix(0,run,d) # it2Lsum3brad = matrix(0,run,4) # it2LMbrad = rep(0,run) it2Lsum1bradgreed = matrix(0,run,d+1) #IT2L programming (do not iteratively update beta0 with lasso penalty, lam2=0) it2Lsum2bradgreed = matrix(0,run,d) it2Lsum3bradgreed = matrix(0,run,4) it2LMbradgreed = rep(0,run) #it2Lsum1 = matrix(0,run,d+1) #IT2L programming (do not iteratively update beta0 with lasso penalty, lam2=0) #it2Lsum2 = matrix(0,run,d) #it2Lsum3 = matrix(0,run,4) #itp1newsum1 = matrix(0,run,d+1) #ITnew+power 1 #itp1newsum2 = matrix(0,run,d) #itp1newsum3 = matrix(0,run,4) # itp1new2sum1brad = matrix(0,run,d+1) #ITnew+power 1 # itp1new2sum2brad = matrix(0,run,d) # itp1new2sum3brad = matrix(0,run,4) # itp1new2Mbrad = rep(0, run) itp1new2sum1bradgreed = matrix(0,run,d+1) #ITnew+power 1 itp1new2sum2bradgreed = matrix(0,run,d) itp1new2sum3bradgreed = matrix(0,run,4) itp1new2Mbradgreed = rep(0, run) #itp1new2sum1 = matrix(0,run,d+1) #ITnew+power 1 #itp1new2sum2 = matrix(0,run,d) #itp1new2sum3 = matrix(0,run,4) #lssum1 = matrix(0,run,d+1) #least square + LASSO #lssum2 = matrix(0,run,d) #lssum3 = matrix(0,run,4) logitsum1 = matrix(0,run,d+1) #logistic regression + LASSO logitsum2 = matrix(0,run,d) #GLM NET logitsum3 = matrix(0,run,4) # logitEnetsum1 = matrix(0,run,d+1) #logistic regression + Enet # logitEnetsum2 = matrix(0,run,d) #GLM NET # logitEnetsum3 = matrix(0,run,4) # hhsvmsum1.lam.5 = matrix(0,run,d+1) #HHSVMnet # hhsvmsum2.lam.5 = matrix(0,run,d) #GCD NET # hhsvmsum3.lam.5 = matrix(0,run,4) #lambda2 = 0.5 # hhsvmsum1.lam1 = matrix(0,run,d+1) #HHSVMnet # hhsvmsum2.lam1 = matrix(0,run,d) #GCD NET # hhsvmsum3.lam1 = matrix(0,run,4) #lambda2 = 1 # hhsvmsum1.lam2 = matrix(0,run,d+1) #HHSVMnet # hhsvmsum2.lam2 = matrix(0,run,d) #GCD NET # hhsvmsum3.lam2 = matrix(0,run,4) #lambda2 = 2 # logitGCDsum1.lam.5 = matrix(0,run,d+1) #logitGCD # logitGCDsum2.lam.5 = matrix(0,run,d) #GCD NET # logitGCDsum3.lam.5 = matrix(0,run,4) #lambda2 = 0.5 # logitGCDsum1.lam1 = matrix(0,run,d+1) #logitGCD # logitGCDsum2.lam1 = matrix(0,run,d) #GCD NET # logitGCDsum3.lam1 = matrix(0,run,4) #lambda2 = 1 # logitGCDsum1.lam2 = matrix(0,run,d+1) #logitGCD # logitGCDsum2.lam2 = matrix(0,run,d) #GCD NET # logitGCDsum3.lam2 = matrix(0,run,4) #lambda2 = 2 # sqsvmsum1.lam.5 = matrix(0,run,d+1) #sqsvm # sqsvmsum2.lam.5 = matrix(0,run,d) #GCD NET # sqsvmsum3.lam.5 = matrix(0,run,4) #lambda2 = 0.5 # sqsvmsum1.lam1 = matrix(0,run,d+1) #sqsvm # sqsvmsum2.lam1 = matrix(0,run,d) #GCD NET # sqsvmsum3.lam1 = matrix(0,run,4) #lambda2 = 1 # sqsvmsum1.lam2 = matrix(0,run,d+1) #sqsvm # sqsvmsum2.lam2 = matrix(0,run,d) #GCD NET # sqsvmsum3.lam2 = matrix(0,run,4) #lambda2 = 2 # scadsum1 = matrix(0,run,d+1) #SCAD +SVM # scadsum2 = matrix(0,run,d) # scadsum3 = matrix(0,run,4) # lpsvmsum1 = matrix(0,run,d+1) #L1 SVM # lpsvmsum2 = matrix(0,run,d) # lpsvmsum3 = matrix(0,run,4) svmsum1 = matrix(0,run,d+1) #standard SVM svmsum2 = matrix(1,run,d) svmsum3 = matrix(0,run,4) #itLRsum1 = matrix(0,run,d+1) #ITL+Rfit programming (iteratively update beta0, lam2=0) #itLRsum2 = matrix(0,run,d) #itLRsum3 = matrix(0,run,4) #it2LRsum1 = matrix(0,run,d+1) #IT2L+Rfit programming (do not iteratively update beta0, lam2=0) #it2LRsum2 = matrix(0,run,d) #it2LRsum3 = matrix(0,run,4) #itp1newRsum1 = matrix(0,run,d+1) #ITnew+power 1+Rfit #itp1newRsum2 = matrix(0,run,d) #itp1newRsum3 = matrix(0,run,4) #itp1new2Rsum1 = matrix(0,run,d+1) #ITnew+power 1+no iterative update beta0+Rfit #itp1new2Rsum2 = matrix(0,run,d) #itp1new2Rsum3 = matrix(0,run,4) # baysum3 = matrix(0,run,4) #valid for logistic regression only Tbaysum3 = matrix(0,run,4) #probit link print("Making Cov Matrix...") S <- genS(p=d,q=d0,rho=.5,ctype=3) for (rep in 1:run){ myseed = 2000+rep print(rep) print("Generating Data...") yxtrain <- gendata(exnum, myseed, n,d,impcoef,S) yxtune <- gendata(exnum, myseed+1,ntune,d,impcoef,S) yxtest <- gendata(exnum, myseed+2,ntest,d,impcoef,S) yxtot <- rbind(yxtrain,yxtune,yxtest) xxtot <- yxtot[,-1] ##centering and scaling x # for (i in 1:d) # {xxtot[,i] <- (xxtot[,i]-mean(xxtot[,i]))/sqrt(var(xxtot[,i]))} xxtot <- scale(xxtot) xxtot<-round(xxtot,6) ## y <- yxtrain[,1] ytune <- yxtune[,1] ytest <- yxtest[,1] ## x <- xxtot[1:n,] xtune <- xxtot[c((n+1):(n+ntune)),] xtest <- xxtot[c((n+ntune+1):ntot),] # for (i in 1:d){ # x[,i] <- (x[,i]-mean(x[,i]))/sqrt(var(x[,i])) # xtune[,i] <- (x[,i]-mean(xtune[,i]))/sqrt(var(xtune[,i])) # xtest[,i] <- (x[,i]-mean(xtest[,i]))/sqrt(var(xtest[,i])) # } # x<-round(x,6) # xtune<-round(xtune,6) # xtest<-round(xtest,6) # only valid for logistic regression model # beta <- c(impcoef, rep(0,d-d0)) # logodd <- xtest%*%beta # probtest <- exp(logodd)/(1+exp(logodd)) # baytest <- 2*(runif(n)0.5)) # lssum3[rep,2:3] <- varsum(lscoef,impid) ## L1 logistic regression, GLMNET print("L1 logistic regression") yytune = (ytune+1)/2 time1 = proc.time() ss=seq(0.01,0.2,by=0.01) nrep = length(ss) logfit <- glmnet(x,yy,family="binomial",nlambda=nrep) logcoef <- coef(logfit,ss) slogcoef <- logcoef logerr <- rep(0,nrep) tunelogist = rep(0,nrep) for (j in 1:nrep){ jnorm = sqrt(sum(logcoef[,j]^2)) slogcoef[,j] = logcoef[,j]/jnorm tunelogist[j] = mean(yytune!=(cbind(1,xtune)%*%logcoef[,j]>0)) } bestss = ss[which.min(tunelogist)] bestlogit <- coef(logfit,bestss) logitcoef <- bestlogit[-1] logitint <- bestlogit[1] logitsum3[rep,4] <- (proc.time()-time1)[[1]] logitsum1[rep,] <- c(logitcoef, logitint) logitsum2[rep,] <- 1*(logitcoef!=0) logitsum3[rep,1] <- mean(yytest!=(cbind(1,xtest)%*%bestlogit>0)) logitsum3[rep,2:3] <- varsum(logitcoef,impid) #Enet Logistic Regression # time1 = proc.time() # ss=seq(0.01,0.2,by=0.01) # nrep = length(ss) # logitEnetfit <- glmnet(x,yy,family="binomial",nlambda=nrep,alpha=.5) # logitEnetcoef <- coef(logitEnetfit,ss) # slogitEnetcoef <- logitEnetcoef # logitEneterr <- rep(0,nrep) # tunelogitEnet = rep(0,nrep) # for (j in 1:nrep){ # jnorm = sqrt(sum(logitEnetcoef[,j]^2)) # slogitEnetcoef[,j] = logitEnetcoef[,j]/jnorm # tunelogitEnet[j] = mean(yytune!=(cbind(1,xtune)%*%logitEnetcoef[,j]>0)) # } # bestss = ss[which.min(tunelogitEnet)] # bestlogitEnet <- coef(logitEnetfit,bestss) # logitEnetcoef <- bestlogitEnet[-1] # logitEnetint <- bestlogitEnet[1] # logitEnetsum3[rep,4] <- (proc.time()-time1)[[1]] # logitEnetsum1[rep,] <- c(logitEnetcoef, logitEnetint) # logitEnetsum2[rep,] <- 1*(logitEnetcoef!=0) # logitEnetsum3[rep,1] <- mean(yytest!=(cbind(1,xtest)%*%bestlogitEnet>0)) # logitEnetsum3[rep,2:3] <- varsum(logitEnetcoef,impid) ## penalized SVM # lambda1 = 10^seq(-4,4,by=0.5) # tunescad = rep(100,length(lambda1)) # time1 = proc.time() # for (j in 1:length(lambda1)) # {scadfit <- scadsvc(lambda1[j],x,y) # jscad <- rep(0,d+1) # if (length(scadfit)>1) # { jscad[1] <- scadfit$b # jscad[scadfit$xind+1] <- scadfit$w # tunescad[j] = mean(yytune!=(cbind(1,xtune)%*%jscad>0)) # } # } # bestlam = lambda1[which.min(tunescad)] # bestscad <- scadsvc(bestlam,x,y) # scadcoef <- rep(0,d) # scadcoef[bestscad$xind] <- bestscad$w # scadint <- bestscad$b # scadsum3[rep,4] <- (proc.time()-time1)[[1]] # scadsum1[rep,] <- c(scadcoef, scadint) # scadsum2[rep,] <- 1*(scadcoef!=0) # scadsum3[rep,1] <- mean(yytest!=(cbind(1,xtest)%*%c(scadint,scadcoef)>0)) # scadsum3[rep,2:3] <- varsum(scadcoef,impid) ## L1 SVM # print("L1 SVM") # lambda1 = 10^seq(-4,4,by=0.5) # tunelpsvm = rep(100,length(lambda1)) # time1 = proc.time() # for (j in 1:length(lambda1)) # {lpsvmfit <- lpsvm(x,y,k=0,epsi=lambda1[j]) # jlpsvm <- rep(0,d+1) # if (length(lpsvmfit)>1) # { jlpsvm[1] <- lpsvmfit$b # jlpsvm[lpsvmfit$xind+1] <- lpsvmfit$w # tunelpsvm[j] = mean(yytune!=(cbind(1,xtune)%*%jlpsvm>0)) # } # } # bestlam = lambda1[which.min(tunelpsvm)] # bestlpsvm <- lpsvm(x,y,k=0,epsi=bestlam) # lpsvmcoef <- rep(0,d) # lpsvmcoef[bestlpsvm$xind] <- bestlpsvm$w # lpsvmint <- bestlpsvm$b # lpsvmsum3[rep,4] <- (proc.time()-time1)[[1]] # lpsvmsum1[rep,] <- c(lpsvmcoef, lpsvmint) # lpsvmsum2[rep,] <- 1*(lpsvmcoef!=0) # lpsvmsum3[rep,1] <- mean(yytest!=(cbind(1,xtest)%*%c(lpsvmint,lpsvmcoef)>0)) # lpsvmsum3[rep,2:3] <- varsum(lpsvmcoef,impid) #svm path print("SVM") lambda1 = 10^seq(-4,4,by=1) tunesvm = rep(0,length(lambda1)) time1 = proc.time() svmfit <- svmpath(x,y) svmpred <- predict(svmfit, xtune, lambda=lambda1,type="class") for (nj in 1:length(lambda1)) {tunesvm[nj]=mean(ytune!=svmpred[,nj]) } bestlam = lambda1[which.min(tunesvm)] svmsum3[rep,4] <- (proc.time()-time1)[[1]] svmsum3[rep,1] <- mean(ytest!=predict(svmfit,xtest,lambda=bestlam,type="class")) svmsum3[rep,2:3] <- c(0,d-d0) ################################### ##########GCD Net Methods########## ################################### #hhsvm, lambda2=.5 # time1 = proc.time() # ss=seq(0.01,0.2,by=0.01) # nrep = length(ss) # hhsvmfit <- gcdnet(x,factor(y),method="hhsvm",nlambda=nrep,lambda2=0.5) # tunehhsvm = rep(0,nrep) # tunehhsvm= apply(ytune != predict(hhsvmfit,xtune),2,mean) # bestss = ss[which.min(tunehhsvm)] # besthhsvm <- coef(hhsvmfit,bestss) # hhsvmcoef <- besthhsvm[-1] # hhsvmint <- besthhsvm[1] # hhsvmsum3.lam.5[rep,4] <- (proc.time()-time1)[[1]] # hhsvmsum1.lam.5[rep,] <- c(hhsvmcoef, hhsvmint) # hhsvmsum2.lam.5[rep,] <- 1*(hhsvmcoef!=0) # hhsvmsum3.lam.5[rep,1] <- mean( ytest != predict(hhsvmfit,xtest,bestss)) # hhsvmsum3.lam.5[rep,2:3] <- varsum(hhsvmcoef,impid) # #hhsvm, lambda2=1 # time1 = proc.time() # ss=seq(0.01,0.2,by=0.01) # nrep = length(ss) # hhsvmfit <- gcdnet(x,factor(y),method="hhsvm",nlambda=nrep,lambda2=1) # tunehhsvm = rep(0,nrep) # tunehhsvm= apply(ytune != predict(hhsvmfit,xtune),2,mean) # bestss = ss[which.min(tunehhsvm)] # besthhsvm <- coef(hhsvmfit,bestss) # hhsvmcoef <- besthhsvm[-1] # hhsvmint <- besthhsvm[1] # hhsvmsum3.lam1[rep,4] <- (proc.time()-time1)[[1]] # hhsvmsum1.lam1[rep,] <- c(hhsvmcoef, hhsvmint) # hhsvmsum2.lam1[rep,] <- 1*(hhsvmcoef!=0) # hhsvmsum3.lam1[rep,1] <- mean( ytest != predict(hhsvmfit,xtest,bestss)) # hhsvmsum3.lam1[rep,2:3] <- varsum(hhsvmcoef,impid) # #hhsvm, lambda2=2 # time1 = proc.time() # ss=seq(0.01,0.2,by=0.01) # nrep = length(ss) # hhsvmfit <- gcdnet(x,factor(y),method="hhsvm",nlambda=nrep,lambda2=2) # tunehhsvm = rep(0,nrep) # tunehhsvm= apply(ytune != predict(hhsvmfit,xtune),2,mean) # bestss = ss[which.min(tunehhsvm)] # besthhsvm <- coef(hhsvmfit,bestss) # hhsvmcoef <- besthhsvm[-1] # hhsvmint <- besthhsvm[1] # hhsvmsum3.lam2[rep,4] <- (proc.time()-time1)[[1]] # hhsvmsum1.lam2[rep,] <- c(hhsvmcoef, hhsvmint) # hhsvmsum2.lam2[rep,] <- 1*(hhsvmcoef!=0) # hhsvmsum3.lam2[rep,1] <- mean( ytest != predict(hhsvmfit,xtest,bestss)) # hhsvmsum3.lam2[rep,2:3] <- varsum(hhsvmcoef,impid) # #logitGCD, lambda2=.5 # time1 = proc.time() # ss=seq(0.01,0.2,by=0.01) # nrep = length(ss) # logitGCDfit <- gcdnet(x,factor(y),method="logit",nlambda=nrep,lambda2=0.5) # tunelogitGCD = rep(0,nrep) # tunelogitGCD= apply(ytune != predict(logitGCDfit,xtune),2,mean) # bestss = ss[which.min(tunelogitGCD)] # bestlogitGCD <- coef(logitGCDfit,bestss) # logitGCDcoef <- bestlogitGCD[-1] # logitGCDint <- bestlogitGCD[1] # logitGCDsum3.lam.5[rep,4] <- (proc.time()-time1)[[1]] # logitGCDsum1.lam.5[rep,] <- c(logitGCDcoef, logitGCDint) # logitGCDsum2.lam.5[rep,] <- 1*(logitGCDcoef!=0) # logitGCDsum3.lam.5[rep,1] <- mean( ytest != predict(logitGCDfit,xtest,bestss)) # logitGCDsum3.lam.5[rep,2:3] <- varsum(logitGCDcoef,impid) # #logitGCD, lambda2=1 # time1 = proc.time() # ss=seq(0.01,0.2,by=0.01) # nrep = length(ss) # logitGCDfit <- gcdnet(x,factor(y),method="logit",nlambda=nrep,lambda2=1) # tunelogitGCD = rep(0,nrep) # tunelogitGCD= apply(ytune != predict(logitGCDfit,xtune),2,mean) # bestss = ss[which.min(tunelogitGCD)] # bestlogitGCD <- coef(logitGCDfit,bestss) # logitGCDcoef <- bestlogitGCD[-1] # logitGCDint <- bestlogitGCD[1] # logitGCDsum3.lam1[rep,4] <- (proc.time()-time1)[[1]] # logitGCDsum1.lam1[rep,] <- c(logitGCDcoef, logitGCDint) # logitGCDsum2.lam1[rep,] <- 1*(logitGCDcoef!=0) # logitGCDsum3.lam1[rep,1] <- mean( ytest != predict(logitGCDfit,xtest,bestss)) # logitGCDsum3.lam1[rep,2:3] <- varsum(logitGCDcoef,impid) # #logitGCD, lambda2=2 # time1 = proc.time() # ss=seq(0.01,0.2,by=0.01) # nrep = length(ss) # logitGCDfit <- gcdnet(x,factor(y),method="logit",nlambda=nrep,lambda2=2) # tunelogitGCD = rep(0,nrep) # tunelogitGCD= apply(ytune != predict(logitGCDfit,xtune),2,mean) # bestss = ss[which.min(tunelogitGCD)] # bestlogitGCD <- coef(logitGCDfit,bestss) # logitGCDcoef <- bestlogitGCD[-1] # logitGCDint <- bestlogitGCD[1] # logitGCDsum3.lam2[rep,4] <- (proc.time()-time1)[[1]] # logitGCDsum1.lam2[rep,] <- c(logitGCDcoef, logitGCDint) # logitGCDsum2.lam2[rep,] <- 1*(logitGCDcoef!=0) # logitGCDsum3.lam2[rep,1] <- mean( ytest != predict(logitGCDfit,xtest,bestss)) # logitGCDsum3.lam2[rep,2:3] <- varsum(logitGCDcoef,impid) # #sqsvm, lambda2=.5 # time1 = proc.time() # ss=seq(0.01,0.2,by=0.01) # nrep = length(ss) # sqsvmfit <- gcdnet(x,factor(y),method="sqsvm",nlambda=nrep,lambda2=0.5) # tunesqsvm = rep(0,nrep) # tunesqsvm= apply(ytune != predict(sqsvmfit,xtune),2,mean) # bestss = ss[which.min(tunesqsvm)] # bestsqsvm <- coef(sqsvmfit,bestss) # sqsvmcoef <- bestsqsvm[-1] # sqsvmint <- bestsqsvm[1] # sqsvmsum3.lam.5[rep,4] <- (proc.time()-time1)[[1]] # sqsvmsum1.lam.5[rep,] <- c(sqsvmcoef, sqsvmint) # sqsvmsum2.lam.5[rep,] <- 1*(sqsvmcoef!=0) # sqsvmsum3.lam.5[rep,1] <- mean( ytest != predict(sqsvmfit,xtest,bestss)) # sqsvmsum3.lam.5[rep,2:3] <- varsum(sqsvmcoef,impid) # #sqsvm, lambda2=1 # time1 = proc.time() # ss=seq(0.01,0.2,by=0.01) # nrep = length(ss) # sqsvmfit <- gcdnet(x,factor(y),method="sqsvm",nlambda=nrep,lambda2=1) # tunesqsvm = rep(0,nrep) # tunesqsvm= apply(ytune != predict(sqsvmfit,xtune),2,mean) # bestss = ss[which.min(tunesqsvm)] # bestsqsvm <- coef(sqsvmfit,bestss) # sqsvmcoef <- bestsqsvm[-1] # sqsvmint <- bestsqsvm[1] # sqsvmsum3.lam1[rep,4] <- (proc.time()-time1)[[1]] # sqsvmsum1.lam1[rep,] <- c(sqsvmcoef, sqsvmint) # sqsvmsum2.lam1[rep,] <- 1*(sqsvmcoef!=0) # sqsvmsum3.lam1[rep,1] <- mean( ytest != predict(sqsvmfit,xtest,bestss)) # sqsvmsum3.lam1[rep,2:3] <- varsum(sqsvmcoef,impid) # #sqsvm, lambda2=2 # time1 = proc.time() # ss=seq(0.01,0.2,by=0.01) # nrep = length(ss) # sqsvmfit <- gcdnet(x,factor(y),method="sqsvm",nlambda=nrep,lambda2=2) # tunesqsvm = rep(0,nrep) # tunesqsvm= apply(ytune != predict(sqsvmfit,xtune),2,mean) # bestss = ss[which.min(tunesqsvm)] # bestsqsvm <- coef(sqsvmfit,bestss) # sqsvmcoef <- bestsqsvm[-1] # sqsvmint <- bestsqsvm[1] # sqsvmsum3.lam2[rep,4] <- (proc.time()-time1)[[1]] # sqsvmsum1.lam2[rep,] <- c(sqsvmcoef, sqsvmint) # sqsvmsum2.lam2[rep,] <- 1*(sqsvmcoef!=0) # sqsvmsum3.lam2[rep,1] <- mean( ytest != predict(sqsvmfit,xtest,bestss)) # sqsvmsum3.lam2[rep,2:3] <- varsum(sqsvmcoef,impid) } #End simulation Loop # result1 <- rbind(apply(it2Lsum1,2,mean),apply(itp1new2sum1,2,mean) ) # result2 <- rbind(apply(it2Lsum2,2,sum),apply(itp1new2sum2,2,sum) ) # result3 <- rbind(apply(it2Lsum3,2,mean),apply(itp1new2sum3,2,mean)) # result4 <- rbind(sqrt(apply(it2Lsum3,2,var)),sqrt(apply(itp1new2sum3,2,var)))/sqrt(run) result1brad <- rbind(apply(it2Lsum1bradgreed,2,mean),apply(itp1new2sum1bradgreed,2,mean),apply(svmsum1,2,mean),apply(logitsum1,2,mean) ) result2brad <- rbind( apply(it2Lsum2bradgreed,2,sum),apply(itp1new2sum2bradgreed,2,sum),apply(svmsum2,2,sum),apply(logitsum2,2,mean) ) result3brad <- rbind( apply(it2Lsum3bradgreed,2,mean),apply(itp1new2sum3bradgreed,2,mean),apply(svmsum3,2,mean),apply(logitsum3,2,mean), apply(Tbaysum3,2,mean) ) result4brad <- rbind( sqrt(apply(it2Lsum3bradgreed,2,var)),sqrt(apply(itp1new2sum3bradgreed,2,var)),sqrt(apply(svmsum3,2,var)),sqrt(apply(logitsum3,2,var)), sqrt(apply(Tbaysum3,2,var))) / sqrt(run) result5brad <- rbind( mean(it2LMbradgreed), mean(itp1new2Mbradgreed) ) result6brad <- rbind( sd(it2LMbradgreed), sd(itp1new2Mbradgreed) )/sqrt(run) rname<-c("gClassic2","gClassic1","SVM","GLMNET-Lasso") rname3<-c("gClassic2","gClassic1","SVM","GLMNET-Lasso","Bayes") rname4 <- c("gClassic2","gClassic1") cname1<-paste("X",c(1:d,0),sep="") cname2<-paste("X",1:d,sep="") cname3 <- c("Test Error", "Incor-0", "Incor-non0","Time") cname4 <- c("M") # dimnames(result1)<-list(rname,cname1) # dimnames(result2)<-list(rname,cname2) # dimnames(result3)<-list(rname3,cname3) # dimnames(result4)<-list(rname3,cname3) dimnames(result1brad)<-list(rname,cname1) dimnames(result2brad)<-list(rname,cname2) dimnames(result3brad)<-list(rname3,cname3) dimnames(result4brad)<-list(rname3,cname3) dimnames(result5brad)<-list(rname4,cname4) dimnames(result6brad)<-list(rname4,cname4) filename = paste("ex",exnum,"s",s,"n",n,"d",d, sep="") sink(filename) #print("Firstcal+ITL Performance") #print(itLsum1) #print(itLsum2) #print(itLsum3) #print("Firstcal+IT2L Performance") #print(it2Lsum1) #print(it2Lsum2) #print(it2Lsum3) #print("Firstcal+ITP1new Performance") #print(itp1newsum1) #print(itp1newsum2) #print(itp1newsum3) #print("Firstcal+ITP1new2 Performance") #print(itp1new2sum1) #print(itp1new2sum2) #print(itp1new2sum3) #print("Firstcal+ITLR Performance") #print(itLRsum1) #print(itLRsum2) #print(itLRsum3) #print("Firstcal+IT2LR Performance") #print(it2LRsum1) #print(it2LRsum2) #print(it2LRsum3) #print("Firstcal+ITP1newR Performance") #print(itp1newRsum1) #print(itp1newRsum2) #print(itp1newRsum3) #print("Firstcal+ITP1new2R Performance") #print(itp1new2Rsum1) #print(itp1new2Rsum2) #print(itp1new2Rsum3) #print("lasso Performance") #print(lssum1) #print(lssum2) #print(lssum3) #print("logistic Performance") #print(logitsum1) #print(logitsum2) #print(logitsum3) #print("SCAD+SVM Performance") #print(scadsum1) #print(scadsum2) #print(scadsum3) #print("SVM Performance") #print(svmsum1) #print(svmsum2) #print(svmsum3) cat(paste("Average Performance: n=",n), paste("d=",d,sep=""),paste("d0=",d0,sep=""), paste("ex",exnum,sep=""),paste("KK",ind,sep=""),"\n") print("Estimated Coefficients") print(result1brad) print("Variable Selection") print(result2brad) print("Mean Errors and Computation Time") print(result3brad) print("Sd Errors and Computation Time") print(result4brad) print("Number of Iterations M, mean") print(result5brad) print("Number of Iterations M, sd errors") print(result6brad) sink()