问题
I am trying to optimize my code to run glms multiple times, and I would like to leverage parallelization, either with foreach
or some other more efficient way.
As you can see; the for
loop takes about 800 secs to run 270000 glms; while foreach
with dopar
unintuitively takes for ever (It either crashes or I force it to stop after a couple of hours).
Thanks for your help.
Jinesh
library(data.table)
library(parallel)
library(doParallel)
library(foreach)
scen_bin <- expand.grid(n = c(10, 20, 30), rate1 = c(0.1, 0.2, 0.3),
rate2 = c(0.5, 0.6, 0.9))
rep <- 10000
scen_sims <- rbindlist(replicate(rep, scen_bin, simplify = FALSE),
idcol = TRUE)
scen_sims[, `:=`(glm, list(c(1L, 2L)))]
for (i in 1:270000) {
set(scen_sims, i, 8L, list(glm(formula = c(rbinom(scen_sims$drug[i], 1L, scen_sims$Treatment_Rates[i]),
rbinom(scen_sims$control[i], 1L, scen_sims$Comparator_Rates[i])) ~ factor(c(rep("Trt",
scen_sims$drug[i]), rep("Cont", scen_sims$control[i]))), family = "binomial")))
}
split_scen_sims <- split(scen_sims, seq(1, 270000, length.out = 1000))
jh <- foreach(x = 1:1000, .packages = c("data.table")) %dopar% {
jh <- split_scen_sims[[x]]
for (i in 1:270000) {
set(jh, i, 8L, list(glm(formula = c(rbinom(jh$n[i], 1L, jh$rate1[i]), rbinom(jh$n[i],
1L, jh$rate1[i])) ~ factor(c(rep("Trt", jh$n[i]), rep("Cont", jh$n[i]))),
family = "binomial")))
}
return(jh)
}
回答1:
The first thing to note is that using the extract function $
within a loop makes this perform poorly. It would be better to 1) make a functions and then 2) use a regular data.table
call.
fx_make_glm = function(drug, treat_rate, control, Comparator_Rates){
glm(formula = c(rbinom(drug, 1L, treat_rate),
rbinom(control, 1L, Comparator_Rates)) ~
factor(c(rep("Trt", drug), rep("Cont", control))),
family = "binomial")
}
This will greatly simplify the rest - I will use Map
which will loop through each element of the variables of interest:
scen_sims[, glm := list(Map(fx_make_glm, n, rate1, n, rate2))]
Unfortunately, that still didn't provide as much performance as ideal :(
Unit: seconds
expr min lq mean median uq max neval
OP_loop 3.01 3.21 3.21 3.22 3.26 3.36 5
map_call 2.64 2.89 2.90 2.92 2.96 3.08 5
My parallel package of choice is future.apply
- just put future_
in front of your *apply
series and you have a parallel evaluation:
library(future.apply)
plan(multiprocess)
system.time({
scen_sims[, glm := list(future_Map(fx_make_glm, n, rate1, n, rate2))]
})
user system elapsed
1.22 0.13 3.22
## truncated the microbenchmark call
Unit: seconds
expr min lq mean median uq max neval
OP_loop 2.93 2.98 3.08 3.00 3.18 3.32 5
map_call 2.65 2.70 2.94 2.89 3.18 3.25 5
future_map_call 2.84 3.24 3.37 3.43 3.49 3.85 5
I am on Windows with 2 cores / 4 threads. If I were on Linux, I would try plan(multicore)
to see if forking processes were more productive.
Data generation:
library(data.table)
## generate data
scen_bin <- expand.grid(n = c(10, 20, 30), rate1 = c(0.1, 0.2, 0.3),
rate2 = c(0.5, 0.6, 0.9))
rep <- 50L
scen_sims <- rbindlist(replicate(rep, scen_bin, simplify = FALSE),
idcol = TRUE)
scen_sims[, `:=`(glm, list(c(1L, 2L)))]
来源:https://stackoverflow.com/questions/60498786/process-optimisation-of-code-within-dopar