How do I replace the bootstrap step in the package randomForest r

╄→尐↘猪︶ㄣ 提交于 2019-12-05 16:53:13

To directly alter sampling of randomForest(type="reggression"): Learn basic C programming, download from cran source code randomForest.4.6-10.tar.gz, (if windows install Rtools), (if OSX install Xcode), install and open Rstudio, start new project, choose package, unpack ...tar.gz into folder, look into src folder, open regrf.c, checkout line 151 and 163. Write new sampling strategy, press occationally Ctrl+Shift+B package to rebuild/compile and overwrite randomForest library, correct stated compile errors, test occasionally if package still works, spend some hours figuring out the old uninformative code, perhaps change description file, namespace file, and some few other references so the package will change name to randomForestMod, rebuild, voilla.

A more easy way not changing the randomForest is described below. Any trees with the same feature inputs can be patched together with the function randomForest::combine, so you can design your sampling regime in pure R code. I thought it actually was a bad idea, but for this very naive simulation it actually works with similar/slightly better performance! Remember to not predict the absolute target value, but instead a stationary derivative such as relative change, absolute change etc. If predicting the absolute value, RF will fall back to predicting tomorrow is something pretty close of today. Which is a trivial useless information.

edited code [22:42 CEST]

library(randomForest)
library(doParallel) #parallel package and mclapply is better for linux

#parallel backend ftw
nCPU = detectCores()
cl = makeCluster(nCPU)
registerDoParallel(cl)

#simulated time series(y) with time roll and lag=1
timepoints=1000;var=6;noise.factor=.2

#past to present orientation    
y = sin((1:timepoints)*pi/30) * 1000 +
    sin((1:timepoints)*pi/40) * 1000 + 1:timepoints
y = y+rnorm(timepoints,sd=sd(y))*noise.factor
plot(y,type="l")

#convert to absolute change, with lag=1
dy = c(0,y[-1]-y[-length(y)]) # c(0,t2-t1,t3-t2,...)

#compute lag 
dy = dy + rnorm(timepoints)*sd(dy)*noise.factor #add noise
dy = c(0,y[-1]-y[-length(y)]) #convert to absolute change, with lag=1 
dX = sapply(1:40,function(i){
  getTheseLags = (1:timepoints) - i
  getTheseLags[getTheseLags<1] = NA #remove before start timePoints
  dx.lag.i = dy[getTheseLags]
})
dX[is.na(dX)]=-100 #quick fix of when lag exceed timeseries
pairs(data.frame(dy,dX[,1:5]),cex=.2)#data structure

#make train- and test-set
train=1:600
dy.train = dy[ train]
dy.test  = dy[-train]
dX.train  = dX[ train,]
dX.test   = dX[-train,]

#classic rf
rf = randomForest(dX.train,dy.train,ntree=500)
print(rf)

#like function split for a vector without mixing
split2 = function(aVector,splits=31) {
  lVector = length(aVector)
  mod = lVector %% splits
  lBlocks = rep(floor(lVector/splits),splits)
  if(mod!=0) lBlocks[1:mod] = lBlocks[1:mod] + 1
  lapply(1:splits,function(i) {
    Stop  = sum(lBlocks[1:i])
    Start = Stop - lBlocks[i] + 1
    aVector[Start:Stop]
  })
}  

nBlocks=10 #combine do not support block of unequal size
rfBlocks = foreach(aBlock = split2(train,splits=nBlocks),
                   .combine=randomForest::combine,
                   .packages=("randomForest")) %dopar% {
                     dXblock = dX.train[aBlock,] ; dyblock = dy.train[aBlock]
                     rf = randomForest(x=dXblock,y=dyblock,sampsize=length(dyblock),
                                       replace=T,ntree=50)
                   }
print(rfBlocks)

#predict test, make results table
results = data.frame(predBlock   = predict(rfBlocks,newdata=dX.test),
                     true=dy.test,
                     predBootstrap = predict(rf,newdata=dX.test))
plot(results[,1:2],xlab="OOB-CV predicted change",
     ylab="trueChange",
     main="black bootstrap and blue block train")
points(results[,3:2],xlab="OOB-CV predicted change",
       ylab="trueChange",
       col="blue")

#prediction results
print(cor(results)^2)


stopCluster(cl)#close cluster

So for me the solution wasn't editing the existing randomForest function. Instead I coded the block-wise bootstrap myself, using the split2 function given by Soren H. Welling to create the blocks. Once I had my data block-wise bootstrapped, I looked for a package (rpart) that performed just a single Regression Tree and aggregated it myself (taking the means).

The result for my actual data is a slightly but consistently improved version over the normal random forest performance in terms of RMSPE.

For the code below the performance seems to be a coin-toss.

Taking Soren's code as an example it looks a bit like this:

library(randomForest)
library(doParallel) #parallel package and mclapply is better for linux
library(rpart)

#parallel backend ftw
nCPU = detectCores()
cl = makeCluster(nCPU)
registerDoParallel(cl)

#simulated time series(y) with time roll and lag=1
timepoints=1000;var=6;noise.factor=.2

#past to present orientation    
y = sin((1:timepoints)*pi/30) * 1000 +
  sin((1:timepoints)*pi/40) * 1000 + 1:timepoints
y = y+rnorm(timepoints,sd=sd(y))*noise.factor
plot(y,type="l")

#convert to absolute change, with lag=1
dy = c(0,y[-1]-y[-length(y)]) # c(0,t2-t1,t3-t2,...)

#compute lag 
dy = dy + rnorm(timepoints)*sd(dy)*noise.factor #add noise
dy = c(0,y[-1]-y[-length(y)]) #convert to absolute change, with lag=1 
dX = sapply(1:40,function(i){
  getTheseLags = (1:timepoints) - i
  getTheseLags[getTheseLags<1] = NA #remove before start timePoints
  dx.lag.i = dy[getTheseLags]
})
dX[is.na(dX)]=-100 #quick fix of when lag exceed timeseries
pairs(data.frame(dy,dX[,1:5]),cex=.2)#data structure

#make train- and test-set
train=1:600
dy.train = dy[ train]
dy.test  = dy[-train]
dX.train  = dX[ train,]
dX.test   = dX[-train,]

#classic rf
rf = randomForest(dX.train,dy.train,ntree=500)
print(rf)

#like function split for a vector without mixing
split2 = function(aVector,splits=31) {
  lVector = length(aVector)
  mod = lVector %% splits
  lBlocks = rep(floor(lVector/splits),splits)
  if(mod!=0) lBlocks[1:mod] = lBlocks[1:mod] + 1
  lapply(1:splits,function(i) {
    Stop  = sum(lBlocks[1:i])
    Start = Stop - lBlocks[i] + 1
    aVector[Start:Stop]
  })
}  


#create a list of block-wise bootstrapped samples
aBlock <- list()
numTrees <- 500
splits <- 40
for (ttt in 1:numTrees){

  aBlock[[ttt]] <- unlist(
    sample(
      split2(1:nrow(dX.train),splits=splits),
      splits,
      replace=T
    )
  )
}

#put data into a dataframe so rpart understands it
df1 <- data.frame(dy.train, dX.train)
#perform regression trees for Blocks
rfBlocks = foreach(aBlock = aBlock,
                   .packages=("rpart")) %dopar% {
                     dBlock = df1[aBlock,] 
                     rf = predict( rpart( dy.train ~., data = dBlock, method ="anova" ), newdata=data.frame(dX.test) ) 
                   } 

#predict test, make results table
#use rowMeans to aggregate the block-wise predictions
results = data.frame(predBlock   = rowMeans(do.call(cbind.data.frame, rfBlocks)),
                     true=dy.test,
                     predBootstrap = predict(rf,newdata=dX.test)
                     )
plot(results[,1:2],xlab="OOB-CV predicted change",
     ylab="trueChange",
     main="black bootstrap and blue block train")
points(results[,3:2],xlab="OOB-CV predicted change",
       ylab="trueChange",
       col="blue")

#prediction results
print(cor(results)^2)


stopCluster(cl)#close cluster
易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!