Libraries

Set random seed for reproducibility

set.seed(1234)
library(tidyverse)
library(lubridate)
library(ggpubr)
library(lime)       # ML local interpretation
library(caret)      # ML model building
library(ranger)
library(vip)
library(pdp)

Data

Read in data

all.df <- read.csv("./data/aml.all.df.csv")

Convert dates

all.df$dot <- ymd(all.df$dot)
all.df$dor <- ymd(all.df$dor)
all.df$bdate <- ymd(all.df$bdate)
all.df$pdate <- ymd(all.df$pdate)

Convert all character strings to factors

all.df <- all.df %>% mutate_if(is.character,as.factor)

Make outcome a binary variable (0/1 relapse)

all.df$rbin <- factor(all.df$rbin, levels = c("yes", "no"))

Filter out any tests that are post-relapse

all.df <- all.df[which(all.df$bdate < all.df$dor | is.na(all.df$dor)), ]

Filter out relapse >720 days

all.df <- all.df[which(all.df$rbin == "no" | all.df$rtime < 720),]

Filter out any missing tests

all.df <- all.df[!is.na(all.df$bmc_cdw) & !is.na(all.df$bmc_cd3) & 
                   !is.na(all.df$bmc_cd15) & !is.na(all.df$bmc_cd34) &
                   !is.na(all.df$pbc_cdw) & !is.na(all.df$pbc_cd3) & 
                   !is.na(all.df$pbc_cd15) & !is.na(all.df$pbc_cd34),]
all.df <<- all.df
# dat2 <- dat %>%
#   select(rbin, txage, hla, tbi, abd, ci, mtx, mmf, agvhd, cgvhd,
#          bmc_cdw, bmc_cd3, bmc_cd15, bmc_cd34, 
#          pbc_cdw, pbc_cd3, pbc_cd15, pbc_cd34, ID)
all.df <- all.df %>%
  select(rbin, sex, txage, 
         rstatprtx, ghgp, tbi, 
         bmc_cdw, bmc_cd3, bmc_cd15, bmc_cd34, 
         pbc_cdw, pbc_cd3, pbc_cd15, pbc_cd34, ID)

all.df <- all.df %>% 
  mutate_if(is.character, as.factor)  %>% 
  mutate_if(is.integer, as.numeric) %>%
  # mutate(abd = tolower(abd)) %>%
  drop_na() %>%
  droplevels()

Set up for LIME plots

Set up random forest through caret

all.df2 <- all.df %>%
  select(-ID)

fit.caret <- train(
  rbin ~ ., 
  data = all.df2, 
  method = 'rf'
)
fit.caret
## Random Forest 
## 
## 102 samples
##  13 predictor
##   2 classes: 'yes', 'no' 
## 
## No pre-processing
## Resampling: Bootstrapped (25 reps) 
## Summary of sample sizes: 102, 102, 102, 102, 102, 102, ... 
## Resampling results across tuning parameters:
## 
##   mtry  Accuracy   Kappa    
##    2    0.8059948  0.2008426
##   10    0.8158994  0.3713088
##   19    0.8147371  0.3971010
## 
## Accuracy was used to select the optimal model using the largest value.
## The final value used for the model was mtry = 10.

VIP

Optional rf model –> probably not needed

fit.rf <- randomForest::randomForest(
  rbin ~ ., 
  data = all.df2)
pfun <- function(object, newdata) {
  # Need vector of predicted class probabilities when using  log-loss metric
  predict(object, newdata = newdata, type = "prob")[, "yes"]
}
vis <- vi(fit.rf, method = "permute", train = all.df2, target = "rbin", 
          metric = "roc_auc", pred_wrapper = pfun, 
          reference_class = "no", nsim = 100) 
## Warning: Consider setting the `event_level` argument when using "roc_auc" as
## the metric; see `?vip::vi_permute` for details. Defaulting to `event_level =
## "first"`.
vip(vis, geom = "boxplot") # Figure 12

p <- ggplot(vis, aes(reorder(Variable, Importance), Importance)) + 
  geom_bar(stat="identity", color="black", 
           position=position_dodge()) + 
  geom_errorbar(aes(ymin = Importance-StDev, 
                    ymax = Importance+StDev), width = 0.2) +
  coord_flip() + theme_bw() + scale_x_discrete(name = "Variable")
print(p)

Explainers

explainer_caret <- lime(all.df2, fit.caret, n_bins = 5)
summary(explainer_caret)
##                      Length Class  Mode     
## model                25     train  list     
## preprocess            1     -none- function 
## bin_continuous        1     -none- logical  
## n_bins                1     -none- numeric  
## quantile_bins         1     -none- logical  
## use_density           1     -none- logical  
## feature_type         14     -none- character
## bin_cuts             14     -none- list     
## feature_distribution 14     -none- list

All patients

all_patients = unique(all.df$ID)
for (i in 1:length(all_patients)) {
  patientID <- which(all.df$ID == all_patients[i])
  
  explanation_caret <- explain(
    x = all.df2[patientID,], 
    explainer = explainer_caret, 
    n_permutations = 5000,
    dist_fun = "gower",
    kernel_width = .75,
    n_features = 10, 
    feature_select = "highest_weights",
    labels = "yes"
  )
  
  p1 <- plot_features(explanation_caret) + 
    ggtitle(paste("Patient", all_patients[i])) + 
    scale_fill_manual(values = c('firebrick', 'steelblue'), drop = FALSE)
  print(p1)
  #ggsave(paste0("./lime_plots/patient_",i,".pdf"), plot = p1)
}


  1. Stanford Medicine, ↩︎

  2. University of Utah, ↩︎