From f8d228e0acda9d53ab22cd34c13ab4814ed3a534 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Mon, 28 Jun 2021 16:34:39 +0200 Subject: [PATCH 001/140] added sparse matrix conversion using RTorch --- R/sparseRTorch.R | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 R/sparseRTorch.R diff --git a/R/sparseRTorch.R b/R/sparseRTorch.R new file mode 100644 index 0000000..b03fdef --- /dev/null +++ b/R/sparseRTorch.R @@ -0,0 +1,24 @@ +source('R/Formatting.R') + +toSparseRTorch <- function(plpData, population, map=NULL, temporal=T){ + + newCovariateData <- MapCovariates(plpData$covariateData, + population, + mapping=map) + + indices <- newCovariateData$covariates %>% select(rowId, covariateId, timeId) %>% collect() %>% as.matrix() + values <- newCovariateData$covariates %>% select(covariateValue) %>% collect() %>% as.matrix() + + indicesTensor <- torch::torch_tensor(indices, dtype=torch::torch_long())$t() + valuesTensor <- torch::torch_tensor(values)$squeeze() + + sparseMatrix <- torch::torch_sparse_coo_tensor(indices=indicesTensor, + values=valuesTensor) + results = list( + data=sparseMatrix, + covariateRef=as.data.frame(newCovariateData$covariateRef), + map=as.data.frame(newCovariateData$mapping)) + + return(results) + +} \ No newline at end of file From 05ca2308d6b42ccb052ab631501dc50714e0f714 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Wed, 30 Jun 2021 14:02:35 +0200 Subject: [PATCH 002/140] fixed bugs in pytorch sparse matrix conversion --- R/Formatting.R | 41 ++++++++++++++++++++++++----------------- inst/python/TorchMap.py | 32 +++++++++++++++----------------- 2 files changed, 39 insertions(+), 34 deletions(-) diff --git a/R/Formatting.R b/R/Formatting.R index 600c24b..3ed62a3 100644 --- a/R/Formatting.R +++ b/R/Formatting.R @@ -183,7 +183,7 @@ toSparseM <- function(plpData,population, map=NULL, temporal=F){ } # restricts to pop and saves/creates mapping -MapCovariates <- function(covariateData,population, mapping){ +MapCovariates <- function(covariateData,population, mapping=NULL){ # to remove check notes #covariateId <- oldCovariateId <- newCovariateId <- NULL @@ -196,7 +196,7 @@ MapCovariates <- function(covariateData,population, mapping){ ParallelLogger::logTrace('restricting to population for speed and mapping') if(is.null(mapping)){ mapping <- data.frame(oldCovariateId = as.data.frame(covariateData$covariateRef %>% dplyr::select(.data$covariateId)), - newCovariateId = 1:nrow(covariateData$covariateRef)) + newCovariateId = 1:nrow(as.data.frame(covariateData$covariateRef))) } if(sum(colnames(mapping)%in%c('oldCovariateId','newCovariateId'))!=2){ colnames(mapping) <- c('oldCovariateId','newCovariateId') @@ -236,6 +236,7 @@ MapCovariates <- function(covariateData,population, mapping){ #' @param map A covariate map (telling us the column number for covariates) #' @param temporal Whether to include timeId into tensor #' @param pythonExePath Location of python exe you want to use +#' @param nonTemporalCovs If non-temporal covariates (such as age or sex) should be included in temporal sparse matrix #' @examples #' #TODO #' @@ -249,7 +250,8 @@ MapCovariates <- function(covariateData,population, mapping){ #' } #' #' @export -toSparseTorchPython <- function(plpData,population, map=NULL, temporal=F, pythonExePath=NULL){ +toSparseTorchPython <- function(plpData,population, map=NULL, temporal=F, pythonExePath=NULL, + nonTemporalCovs=F){ map_python_initiate <- map_python <- function(){return(NULL)} @@ -273,7 +275,7 @@ toSparseTorchPython <- function(plpData,population, map=NULL, temporal=F, python maxT <- NULL if(temporal){ - maxT <- as.data.frame(newcovariateData$covariates$timeId %>% dplyr::summarise(max = max(.data$id, na.rm=T))) + maxT <- as.data.frame(newcovariateData$covariates %>% dplyr::summarise(max = max(timeId, na.rm = TRUE)))$max ParallelLogger::logDebug(paste0('Max timeId: ', maxT)) } @@ -286,10 +288,7 @@ toSparseTorchPython <- function(plpData,population, map=NULL, temporal=F, python dataEnv <- e # adding to remove <<- #dataPlp <<- map_python_initiate(maxCol = as.integer(maxCol), - dataPlp <- map_python_initiate(maxCol = as.integer(maxCol), - maxRow = as.integer(maxRow), - maxT= as.integer(maxT)) - + dataPlp <- NULL convertData <- function(batch, temporal=T, dataEnv) { if(temporal){ #dataPlp <<- map_python(matrix = dataPlp , @@ -312,8 +311,9 @@ toSparseTorchPython <- function(plpData,population, map=NULL, temporal=F, python if(temporal==T){ # add the age and non-temporal data timeIds <- unique(plpData$timeRef$timeId) + normFactors <- attr(plpData$covariateData, 'metaData')$normFactors for(timeId in timeIds){ - tempData <- addAgeTemp(timeId, newcovariateData) + tempData <- addAgeTemp(timeId, newcovariateData, plpData$timeRef, normFactors) if(!is.null(tempData)){ Andromeda::batchApply(tempData, convertData,temporal =T, batchSize = 100000, dataEnv=dataEnv) } @@ -415,30 +415,37 @@ reformatPerformance <- function(train, test, analysisId){ # helpers for converting temporal PLP data to matrix/tensor -addAgeTemp <- function(timeId, newcovariateData, timeRef){ +addAgeTemp <- function(time, newcovariateData, timeRef, normFactors){ - startDay <- as.data.frame(timeRef[timeRef$timeId==timeId,])$startDay + startDay <- as.data.frame(timeRef[timeRef$timeId==time,])$startDay ageId <- as.data.frame(newcovariateData$mapping %>% dplyr::filter(.data$oldCovariateId == 1002) %>% dplyr::select(.data$newCovariateId))$newCovariateId + if (!is.null(normFactors)) { + normFactorAge <- normFactors %>% filter(covariateId == 1002) %>% pull(maxValue) + } + else{ + normFactorAge <- 1 + } + ageData <- newcovariateData$covariates%>% # changed from plpData$covariateData dplyr::filter(.data$covariateId == ageId) %>% - dplyr::mutate(covariateValueNew = .data$covariateValue*365 + startDay, - timeId = timeId) %>% + dplyr::mutate(covariateValueNew = .data$covariateValue + startDay / (365 * normFactorAge), + timeId = time) %>% dplyr::select(- .data$covariateValue) %>% dplyr::rename(covariateValue = .data$covariateValueNew) %>% dplyr::select(.data$rowId,.data$covariateId,.data$covariateValue, .data$timeId) - if(nrow(ageData)==0){ + if(nrow(as.data.frame(ageData))==0){ return(NULL) } return(ageData) } -addNonAgeTemp <- function(timeId, newcovariateData){ +addNonAgeTemp <- function(time, newcovariateData){ ageId <- as.data.frame(newcovariateData$mapping %>% dplyr::filter(.data$oldCovariateId == 1002) %>% @@ -447,10 +454,10 @@ addNonAgeTemp <- function(timeId, newcovariateData){ otherTempCovs <- newcovariateData$covariates%>% dplyr::filter(is.na(.data$timeId)) %>% dplyr::filter(.data$covariateId != ageId) %>% - dplyr::mutate(timeId = timeId) %>% + dplyr::mutate(timeId = time) %>% dplyr::select(.data$rowId,.data$covariateId,.data$covariateValue,.data$timeId) - if(nrow(otherTempCovs)==0){ + if(nrow(as.data.frame(otherTempCovs))==0){ return(NULL) } return(otherTempCovs) diff --git a/inst/python/TorchMap.py b/inst/python/TorchMap.py index ef8feac..cf6cf0e 100644 --- a/inst/python/TorchMap.py +++ b/inst/python/TorchMap.py @@ -1,19 +1,17 @@ import torch -def map_python_initiate(maxCol,maxRow, maxT=None): - if maxT != None: - matrix = torch.sparse.FloatTensor(torch.LongTensor([[0,0],[0,1],[0,0]]), torch.FloatTensor([0.,0.]), torch.Size([maxRow,maxCol,maxT])) - else: - matrix = torch.sparse.FloatTensor(torch.LongTensor([[0,0],[0,1]]), torch.FloatTensor([0.,0.]), torch.Size([maxRow,maxCol])) - return matrix - -def map_python(matrix, datas, maxCol,maxRow, maxT=None): - if maxT != None: - indexes= datas[:,0:3]-1 - matrixt = torch.sparse.FloatTensor(torch.LongTensor(indexes.T), torch.FloatTensor(datas[:,3]), torch.Size([maxRow,maxCol, maxT])) - matrix = matrix.add(matrixt) - else: - indexes= datas[:,0:2]-1 - matrixt = torch.sparse.FloatTensor(torch.LongTensor(indexes.T), torch.FloatTensor(datas[:,2]), torch.Size([maxRow,maxCol])) - matrix = matrix.add(matrixt) - return matrix + +def map_python(datas, maxCol, maxRow, maxT=None, matrix=None): + if maxT is not None: + indexes = datas[:, 0:3] - 1 + matrixt = torch.sparse.FloatTensor(torch.LongTensor(indexes.T), torch.FloatTensor(datas[:, 3]), + torch.Size([maxRow, maxCol, maxT])) + else: + indexes = datas[:, 0:2] - 1 + matrixt = torch.sparse.FloatTensor(torch.LongTensor(indexes.T), torch.FloatTensor(datas[:, 2]), + torch.Size([maxRow, maxCol])) + if matrix is None: + return matrix + else: + matrix = matrix.add(matrixt) + return matrix From b644804fee23a52626f53b64927f0eb15f06c271 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Wed, 30 Jun 2021 15:20:57 +0200 Subject: [PATCH 003/140] add option to not add non-temporal covariates to sparse matrix --- R/Formatting.R | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/R/Formatting.R b/R/Formatting.R index 3ed62a3..ebaa7c1 100644 --- a/R/Formatting.R +++ b/R/Formatting.R @@ -309,6 +309,7 @@ toSparseTorchPython <- function(plpData,population, map=NULL, temporal=F, python } if(temporal==T){ + if (nonTemporalCovs==T) { # add the age and non-temporal data timeIds <- unique(plpData$timeRef$timeId) normFactors <- attr(plpData$covariateData, 'metaData')$normFactors @@ -324,7 +325,7 @@ toSparseTorchPython <- function(plpData,population, map=NULL, temporal=F, python } tempData <- NULL } - + } # add the rest tempData <- newcovariateData$covariates %>% dplyr::filter(.data$timeId!=0) %>% @@ -423,6 +424,7 @@ addAgeTemp <- function(time, newcovariateData, timeRef, normFactors){ dplyr::filter(.data$oldCovariateId == 1002) %>% dplyr::select(.data$newCovariateId))$newCovariateId + #check if age has been normalized if (!is.null(normFactors)) { normFactorAge <- normFactors %>% filter(covariateId == 1002) %>% pull(maxValue) } From 1c4118487117076f1db066bf29c428f7547f2455 Mon Sep 17 00:00:00 2001 From: ted9219 Date: Mon, 12 Jul 2021 10:51:14 +0000 Subject: [PATCH 004/140] write a generic code with torch --- .gitignore | 4 + R/DeepNNTorch.R | 495 ++++++++++++++++++++++++++++++++++++++++++++++++ R/Predict.R | 24 +++ R/SaveLoadPlp.R | 3 + 4 files changed, 526 insertions(+) create mode 100644 .gitignore create mode 100644 R/DeepNNTorch.R diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..5b6a065 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +.Rproj.user +.Rhistory +.RData +.Ruserdata diff --git a/R/DeepNNTorch.R b/R/DeepNNTorch.R new file mode 100644 index 0000000..812f3fb --- /dev/null +++ b/R/DeepNNTorch.R @@ -0,0 +1,495 @@ +# @file DeepNNTorch.R +# +# Copyright 2020 Observational Health Data Sciences and Informatics +# +# This file is part of PatientLevelPrediction +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#' Create setting for DeepNN model using Torch for R +#' +#' @param units The number of units of the deep network - as a list of vectors +#' @param layer_dropout The layer dropout rate (regularisation) +#' @param lr Learning rate +#' @param decay Learning rate decay over each update. +#' @param outcome_weight The weight of the outcome class in the loss function +#' @param batch_size The number of data points to use per training batch +#' @param epochs Number of times to iterate over dataset +#' @param seed Random seed used by deep learning model +#' +#' @examples +#' \dontrun{ +#' model <- setDeepNN() +#' } +#' @export +setDeepNNTorch <- function(units=list(c(128, 64), 128), layer_dropout=c(0.2), + lr =c(1e-4), decay=c(1e-5), outcome_weight = c(1.0), batch_size = c(100), + epochs= c(100), seed=NULL ){ + + # ensure_installed("torch") + + # if(class(indexFolder)!='character') + # stop('IndexFolder must be a character') + # if(length(indexFolder)>1) + # stop('IndexFolder must be one') + # + # if(class(units)!='numeric') + # stop('units must be a numeric value >0 ') + # if(units<1) + # stop('units must be a numeric value >0 ') + # + # #if(length(units)>1) + # # stop('units can only be a single value') + # + # if(class(recurrent_dropout)!='numeric') + # stop('dropout must be a numeric value >=0 and <1') + # if( (recurrent_dropout<0) | (recurrent_dropout>=1)) + # stop('dropout must be a numeric value >=0 and <1') + # if(class(layer_dropout)!='numeric') + # stop('layer_dropout must be a numeric value >=0 and <1') + # if( (layer_dropout<0) | (layer_dropout>=1)) + # stop('layer_dropout must be a numeric value >=0 and <1') + # if(class(lr)!='numeric') + # stop('lr must be a numeric value >0') + # if(lr<=0) + # stop('lr must be a numeric value >0') + # if(class(decay)!='numeric') + # stop('decay must be a numeric value >=0') + # if(decay<=0) + # stop('decay must be a numeric value >=0') + # if(class(outcome_weight)!='numeric') + # stop('outcome_weight must be a numeric value >=0') + # if(outcome_weight<=0) + # stop('outcome_weight must be a numeric value >=0') + # if(class(batch_size)!='numeric') + # stop('batch_size must be an integer') + # if(batch_size%%1!=0) + # stop('batch_size must be an integer') + # if(class(epochs)!='numeric') + # stop('epochs must be an integer') + # if(epochs%%1!=0) + # stop('epochs must be an integer') + # if(!class(seed)%in%c('numeric','NULL')) + # stop('Invalid seed') + #if(class(UsetidyCovariateData)!='logical') + # stop('UsetidyCovariateData must be an TRUE or FALSE') + + param <- expand.grid(units=units, + layer_dropout=layer_dropout, + lr =lr, decay=decay, outcome_weight=outcome_weight,epochs= epochs, + seed=ifelse(is.null(seed),'NULL', seed)) + param$units1=unlist(lapply(param$units, function(x) x[1])) + param$units2=unlist(lapply(param$units, function(x) x[2])) + param$units3=unlist(lapply(param$units, function(x) x[3])) + + result <- list(model='fitDeepNNTorch', param=split(param, + 1:(length(units)*length(layer_dropout)*length(lr)*length(decay)*length(outcome_weight)*length(epochs)*max(1,length(seed)))), + name='DeepNNTorch' + ) + + class(result) <- 'modelSettings' + return(result) +} + + +fitDeepNNTorch <- function(plpData,population, param, search='grid', quiet=F, + outcomeId, cohortId, ...){ + # check plpData is coo format: + if (!FeatureExtraction::isCovariateData(plpData$covariateData)){ + stop('DeepNNTorch requires correct covariateData') + } + if(!is.null(plpData$timeRef)){ + warning('Data temporal but current deepNNTorch uses non-temporal data...') + # This can be changed after supporting the temporal covariates. + } + + metaData <- attr(population, 'metaData') + if(!is.null(population$indexes)) + population <- population[population$indexes>0,] + attr(population, 'metaData') <- metaData + + start<-Sys.time() + + result<- toSparseM(plpData,population,map=NULL, temporal=F) + data <- result$data + + #one-hot encoding + y <- population$outcomeCount + y[y>0] <- 1 + population$y <- cbind(matrix(y), matrix(abs(y-1))) + + + # do cross validation to find hyperParameter + datas <- list(population=population, plpData=data) + hyperParamSel <- list() + + for(i in 1:length(param)){ + hyperParamSel[[i]] <- do.call(trainDeepNNTorch, c(param[i][[1]],datas,train = TRUE)) + } + + hyperSummary <- cbind(do.call(rbind, lapply(hyperParamSel, function(x) x$hyperSum))) + hyperSummary <- as.data.frame(hyperSummary) + hyperSummary$auc <- unlist(lapply(hyperParamSel, function (x) x$auc)) + hyperParamSel<-unlist(lapply(hyperParamSel, function(x) x$auc)) + + #now train the final model and return coef + bestInd <- which.max(abs(unlist(hyperParamSel)-0.5))[1] + finalModel<-do.call(trainDeepNNTorch, c(param[bestInd][[1]],datas, train=FALSE)) + + covariateRef <- as.data.frame(plpData$covariateData$covariateRef) + incs <- rep(1, nrow(covariateRef)) + covariateRef$included <- incs + covariateRef$covariateValue <- rep(0, nrow(covariateRef)) + + #modelTrained <- file.path(outLoc) + param.best <- param[bestInd][[1]] + + comp <- start-Sys.time() + + # train prediction + prediction <- finalModel$prediction + finalModel$prediction <- NULL + + # return model location + result <- list(model = finalModel$model, + trainCVAuc = -1, # ToDo decide on how to deal with this + hyperParamSearch = hyperSummary, + modelSettings = list(model='fitDeepNN',modelParameters=param.best), + metaData = plpData$metaData, + populationSettings = attr(population, 'metaData'), + outcomeId=outcomeId, + cohortId=cohortId, + varImp = covariateRef, + trainingTime =comp, + covariateMap=result$map, + predictionTrain = prediction + ) + class(result) <- 'plpModel' + attr(result, 'type') <- 'deepNNTorch' + attr(result, 'predictionType') <- 'binary' + + return(result) +} + +trainDeepNNTorch <-function(plpData, population, + units1=128, units2= NA, units3=NA, + layer_dropout=0.2, + lr =1e-4, decay=1e-5, outcome_weight = 1.0, batch_size = 100, + epochs= 100, seed=NULL, train=TRUE){ + + ParallelLogger::logInfo(paste('Training deep neural network using Torch with ',length(unique(population$indexes)),' fold CV')) + if(!is.null(population$indexes) && train==T){ + index_vect <- unique(population$indexes) + perform <- c() + + # create prediction matrix to store all predictions + predictionMat <- population + predictionMat$value <- 0 + attr(predictionMat, "metaData") <- list(predictionType = "binary") + + + for(index in 1:length(index_vect)){ + ParallelLogger::logInfo(paste('Fold ',index, ' -- with ', sum(population$indexes!=index),'train rows')) + + if(is.na(units2)){ + net <- torch::nn_module( + "classic_net", + + initialize = function(){ + self$linear1 = torch::nn_linear(ncol(plpData), units1) + self$linear2 = torch::nn_linear(units1, 2) + self$softmax = torch::nn_softmax(2) + }, + + forward = function(x){ + x %>% + self$linear1() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear2() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$softmax() + } + ) + model <- net() + } else if(is.na(units3)){ + net <- torch::nn_module( + "classic_net", + + initialize = function(){ + self$linear1 = torch::nn_linear(ncol(plpData), units1) + self$linear2 = torch::nn_linear(units1, units2) + self$linear3 = torch::nn_linear(units2, 2) + self$softmax = torch::nn_softmax(2) + }, + + forward = function(x){ + x %>% + self$linear1() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear2() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear3() %>% + self$softmax() + } + ) + model <- net() + } else{ + net <- torch::nn_module( + "classic_net", + + initialize = function(){ + self$linear1 = torch::nn_linear(ncol(plpData), units1) + self$linear2 = torch::nn_linear(units1, units2) + self$linear3 = torch::nn_linear(units2, units3) + self$linear4 = torch::nn_linear(units3, 2) + self$softmax = torch::nn_softmax(2) + }, + + forward = function(x){ + x %>% + self$linear1() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear2() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear3() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear4() %>% + self$softmax() + + } + ) + model <- net() + } + + + # Prepare model for training + data <- plpData[population$rowId[population$indexes!=index],] + + + #Extract validation set first - 10k people or 5% + valN <- min(10000,sum(population$indexes!=index)*0.05) + val_rows<-sample(1:sum(population$indexes!=index), valN, replace=FALSE) + train_rows <- c(1:sum(population$indexes!=index))[-val_rows] + + rows <- sample(train_rows, batch_size, replace = F) + y <- population$y[population$indexes!=index,1:2][rows,] + + x_train <- torch_tensor(as.matrix(data[rows,]), dtype = torch_float()) + y_train <- torch_tensor(y, dtype = torch_float()) + pred_temp <- model(x_train) + + print(model) + cat( + " Dimensions Prediction: ", pred_temp$shape," - Object type Prediction: ", as.character(pred_temp$dtype), "\n", + "Dimensions Label: ", y_train$shape," - Object type Label: ", as.character(y_train$dtype), "\n" + ) + + criterion = torch::nn_bce_loss() #Binary crossentropy only + optimizer = optim_adam(model$parameters, lr = lr) + + # Need earlyStopping + for(i in 1:epochs){ + optimizer$zero_grad() + y_pred = model(x_train) + loss = criterion(y_pred, y_train) + loss$backward() + optimizer$step() + + if(i%%10 == 0){ + # winners = y_pred$argmax(dim = 2) + 1 + # winners = y_pred + # corrects = (winners = y_train) + # accuracy = corrects$sum()$item() / y_train$size()[1] + # cat("Epoch:", i, "Loss:", loss$item(), " Accuracy:", accuracy, "\n") + + cat("Epoch:", i, "Loss:", loss$item(), "\n") + + } + } + + model$eval() + + maxVal <- sum(population$indexes == index) + batches <- lapply(1:ceiling(maxVal/batch_size), function(x) ((x-1)*batch_size+1):min((x*batch_size), maxVal)) + prediction <- population[population$indexes == index,] + prediction$value <- 0 + + for(batch in batches){ + b <- torch_tensor(as.matrix(plpData[population$rowId[population$indexes == index],][batch,,drop = F]), dtype = torch_float()) + pred <- model(b) + prediction$value[batch] <- as.array(pred$to())[,1] + } + + attr(prediction, "metaData") <- list(predictionType = "binary") + aucVal <- computeAuc(prediction) + perform <- c(perform,aucVal) + + predictionMat$value[population$indexes == index] <- prediction$value + } + + auc <- computeAuc(predictionMat) + foldPerm <- perform + + # Output ---------------------------------------------------------------- + param.val <- paste('units1: ',units1,'units2: ',units2,'units3: ',units3, + 'layer_dropout: ',layer_dropout,'-- lr: ', lr, + '-- decay: ', decay, '-- batch_size: ',batch_size, '-- epochs: ', epochs) + ParallelLogger::logInfo('==========================================') + ParallelLogger::logInfo(paste0('DeepNNTorch with parameters:', param.val,' obtained an AUC of ',auc)) + ParallelLogger::logInfo('==========================================') + + } else{ + if(is.na(units2)){ + net <- torch::nn_module( + "classic_net", + + initialize = function(){ + self$linear1 = torch::nn_linear(ncol(plpData), units1) + self$linear2 = torch::nn_linear(units1, 2) + self$softmax = torch::nn_softmax(2) + }, + + forward = function(x){ + x %>% + self$linear1() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear2() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$softmax() + } + ) + model <- net() + } else if(is.na(units3)){ + net <- torch::nn_module( + "classic_net", + + initialize = function(){ + self$linear1 = torch::nn_linear(ncol(plpData), units1) + self$linear2 = torch::nn_linear(units1, units2) + self$linear3 = torch::nn_linear(units2, 2) + self$softmax = torch::nn_softmax(2) + }, + + forward = function(x){ + x %>% + self$linear1() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear2() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear3() %>% + self$softmax() + } + ) + model <- net() + } else{ + net <- torch::nn_module( + "classic_net", + + initialize = function(){ + self$linear1 = torch::nn_linear(ncol(plpData), units1) + self$linear2 = torch::nn_linear(units1, units2) + self$linear3 = torch::nn_linear(units2, units3) + self$linear4 = torch::nn_linear(units3, 2) + self$softmax = torch::nn_softmax(2) + }, + + forward = function(x){ + x %>% + self$linear1() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear2() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear3() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear4() %>% + self$softmax() + + } + ) + model <- net() + } + + + # Prepare model for training + data <- plpData + + + #Extract validation set first - 10k people or 5% + valN <- min(10000,nrow(population)*0.05) + val_rows<-sample(1:nrow(population), valN, replace=FALSE) + train_rows <- c(1:nrow(population))[-val_rows] + + rows <- sample(train_rows, batch_size, replace = F) + y <- population$y[,1:2][rows,] + + x_train <- torch_tensor(as.matrix(data[rows,]), dtype = torch_float()) + y_train <- torch_tensor(y, dtype = torch_float()) + pred_temp <- model(x_train) + + print(model) + cat( + "Dimensions Prediction: ", pred_temp$shape, " - Object type Prediction: ", as.character(pred_temp$dtype), "\n", + "Dimensions Label: ", y_train$shape," - Object type Label: ", as.character(y_train$dtype), "\n" + ) + + criterion = torch::nn_bce_loss() #Binary crossentropy only + optimizer = optim_adam(model$parameters, lr = lr) + + # Need earlyStopping + for(i in 1:epochs){ + optimizer$zero_grad() + y_pred = model(x_train) + loss = criterion(y_pred, y_train) + loss$backward() + optimizer$step() + + if(i%%10 == 0){ + # winners = y_pred$argmax(dim = 2) + 1 + # winners = y_pred + # corrects = (winners = y_train) + # accuracy = corrects$sum()$item() / y_train$size()[1] + # cat("Epoch:", i, "Loss:", loss$item(), " Accuracy:", accuracy, "\n") + + cat("Epoch:", i, "Loss:", loss$item(), "\n") + + } + } + + model$eval() + + #batch prediction + maxVal <- nrow(population) + batches <- lapply(1:ceiling(maxVal/batch_size), function(x) ((x-1)*batch_size+1):min((x*batch_size), maxVal)) + prediction <- population + prediction$value <- 0 + + for(batch in batches){ + b <- torch_tensor(as.matrix(plpData[batch,,drop = F]), dtype = torch_float()) + pred <- model(b) + prediction$value[batch] <- as.array(pred$to())[,1] + } + + attr(prediction, "metaData") <- list(predictionType = "binary") + auc <- computeAuc(prediction) + foldPerm <- auc + predictionMat <- prediction + } + + result <- list(model=model, + auc=auc, + prediction = predictionMat, + hyperSum = unlist(list(units1=units1,units2=units2,units3=units3, + layer_dropout=layer_dropout,lr =lr, decay=decay, + batch_size = batch_size, epochs= epochs))) + return(result) +} diff --git a/R/Predict.R b/R/Predict.R index caad604..b65a1ee 100644 --- a/R/Predict.R +++ b/R/Predict.R @@ -535,6 +535,30 @@ predict.deepMulti <- function(plpModel, population, plpData, ...){ } } +predict.deepNNTorch <- function(plpModel, population, plpData, ...){ + # ensure_installed("torch") + + ParallelLogger::logDebug(paste0('timeRef null: ',is.null(plpData$timeRef))) + ParallelLogger::logTrace('temporal') + result<-toSparseM(plpData,population,map=plpModel$covariateMap) + + data <-result$data[population$rowId,,] + + batch_size <- min(10000, length(population$rowId)) + maxVal <- length(population$rowId) + batches <- lapply(1:ceiling(maxVal/batch_size), function(x) ((x-1)*batch_size+1):min((x*batch_size),maxVal)) + prediction <- population + prediction$value <- 0 + + for(batch in batches){ + b <- torch_tensor(as.matrix(data[batch,,]), dtype = torch_float()) + pred <- plpModel$model(b) + prediction$value[batch] <- as.array(pred$to())[,1] + } + + prediction <- prediction[,colnames(prediction)%in%c('rowId','subjectId','cohortStartDate','outcomeCount','indexes', 'value')] # need to fix no index issue + return(prediction) +} #' Create predictive probabilities #' diff --git a/R/SaveLoadPlp.R b/R/SaveLoadPlp.R index 4b7e6d6..274d9ad 100644 --- a/R/SaveLoadPlp.R +++ b/R/SaveLoadPlp.R @@ -458,6 +458,9 @@ savePlpModel <- function(plpModel, dirPath){ if(attr(plpModel, 'type')=='deepMulti'){ saveRDS(attr(plpModel, 'inputs'), file = file.path(dirPath, "inputs_attr.rds")) } + if(attr(plpModel, 'type')=='deepNNTorch'){ + torch::torch_save(model, file = file.path(dirPath, "model.rt")) + } } else if(attr(plpModel, 'type') == "xgboost"){ # fixing xgboost save/load issue xgboost::xgb.save(model = plpModel$model, fname = file.path(dirPath, "model")) From e3f8c3158b805ed09871153c44048f8d178c98e8 Mon Sep 17 00:00:00 2001 From: ted9219 Date: Mon, 12 Jul 2021 11:29:59 +0000 Subject: [PATCH 005/140] add library name for torch function --- R/DeepNNTorch.R | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/R/DeepNNTorch.R b/R/DeepNNTorch.R index 812f3fb..612d27d 100644 --- a/R/DeepNNTorch.R +++ b/R/DeepNNTorch.R @@ -284,8 +284,8 @@ trainDeepNNTorch <-function(plpData, population, rows <- sample(train_rows, batch_size, replace = F) y <- population$y[population$indexes!=index,1:2][rows,] - x_train <- torch_tensor(as.matrix(data[rows,]), dtype = torch_float()) - y_train <- torch_tensor(y, dtype = torch_float()) + x_train <- torch::torch_tensor(as.matrix(data[rows,]), dtype = torch::torch_float()) + y_train <- torch::torch_tensor(y, dtype = torch::torch_float()) pred_temp <- model(x_train) print(model) @@ -295,9 +295,11 @@ trainDeepNNTorch <-function(plpData, population, ) criterion = torch::nn_bce_loss() #Binary crossentropy only - optimizer = optim_adam(model$parameters, lr = lr) + optimizer = torch::optim_adam(model$parameters, lr = lr) # Need earlyStopping + # Need setting decay + for(i in 1:epochs){ optimizer$zero_grad() y_pred = model(x_train) @@ -325,7 +327,7 @@ trainDeepNNTorch <-function(plpData, population, prediction$value <- 0 for(batch in batches){ - b <- torch_tensor(as.matrix(plpData[population$rowId[population$indexes == index],][batch,,drop = F]), dtype = torch_float()) + b <- torch::torch_tensor(as.matrix(plpData[population$rowId[population$indexes == index],][batch,,drop = F]), dtype = torch::torch_float()) pred <- model(b) prediction$value[batch] <- as.array(pred$to())[,1] } @@ -432,8 +434,8 @@ trainDeepNNTorch <-function(plpData, population, rows <- sample(train_rows, batch_size, replace = F) y <- population$y[,1:2][rows,] - x_train <- torch_tensor(as.matrix(data[rows,]), dtype = torch_float()) - y_train <- torch_tensor(y, dtype = torch_float()) + x_train <- torch::torch_tensor(as.matrix(data[rows,]), dtype = torch::torch_float()) + y_train <- torch::torch_tensor(y, dtype = torch::torch_float()) pred_temp <- model(x_train) print(model) @@ -443,7 +445,7 @@ trainDeepNNTorch <-function(plpData, population, ) criterion = torch::nn_bce_loss() #Binary crossentropy only - optimizer = optim_adam(model$parameters, lr = lr) + optimizer = torch::optim_adam(model$parameters, lr = lr) # Need earlyStopping for(i in 1:epochs){ @@ -474,7 +476,7 @@ trainDeepNNTorch <-function(plpData, population, prediction$value <- 0 for(batch in batches){ - b <- torch_tensor(as.matrix(plpData[batch,,drop = F]), dtype = torch_float()) + b <- torch::torch_tensor(as.matrix(plpData[batch,,drop = F]), dtype = torch::torch_float()) pred <- model(b) prediction$value[batch] <- as.array(pred$to())[,1] } From 2d9494e0029e1e68f58765d32c4d69491d827fbe Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Mon, 12 Jul 2021 14:27:26 +0200 Subject: [PATCH 006/140] added resnet model with tokenized features --- R/ResNet.R | 284 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 284 insertions(+) create mode 100644 R/ResNet.R diff --git a/R/ResNet.R b/R/ResNet.R new file mode 100644 index 0000000..c7531d3 --- /dev/null +++ b/R/ResNet.R @@ -0,0 +1,284 @@ +# setResNet <- function(numLayers=4, sizeHidden=64, hiddenFactor=1, +# residualDropout=0.2, hiddenDropout=0.2, +# normalization='BatchNorm', activation='RelU', +# sizeEmbedding=64, weightDecay=1e-4, +# learningRate=3e-4, seed=42) { +# +# if (!is.null(seed)) { +# seed <- as.integer(sample(1e5, 1)) +# } +# +# param <- split(expand.grid(numLayers=numLayers, sizeHidden=sizeHidden, +# hiddenFactor=hiddenFactor, +# residualDropout=residualDropout, +# hiddenDropout=hiddenDropout, +# sizeEmbedding=sizeEmbedding, wei)) +# results <- list(model='fitResNet', param=param, name='ResNet') +# +# class(results) <- 'modelSettings' +# +# return(results) +# +# } + + +# fitResNet <- function(population, plpData, param, search='Random', numSearch=1, +# quiet=F) { +# +# toSparse <- toSparseM(plpData, population) +# sparseMatrix <- toSparse$data +# +# outLoc <- createTempModelLoc() +# #do cross validation to find hyperParameter +# hyperParamSel <- lapply(param, function(x) do.call(trainResNet, +# listAppend(x, list(plpData=sparseMatrix, +# population = population, +# train=TRUE, +# modelOutput=outLoc, +# quiet = quiet)) )) +# hyperSummary <- cbind(do.call(rbind, param), unlist(hyperParamSel)) +# +# +# +# +# } + +# # trainResNet <- function(population, plpData, modelOutput, train=T) { +# +# +# } + +ResLayer <- torch::nn_module( + name='ResLayer', + + initialize=function(sizeHidden, resHidden, normalization, + activation, hiddenDropout=NULL, residualDropout=NULL){ + self$norm <- normalization(sizeHidden) + self$linear0 <- torch::nn_linear(sizeHidden, resHidden) + self$linear1 <- torch::nn_linear(resHidden, sizeHidden) + + self$activation <- activation + self$hiddenDropout <- hiddenDropout + self$residualDropout <- residualDropout + + }, + + forward=function(x) { + z <- x + z <- self$norm(z) + z <- self$linear0(z) + z <- self$activation(z) + if (!is.null(self$hiddenDropout)) { + z <- torch::nnf_dropout(z, p=self$hiddenDropout) + } + z <- self$linear1(z) + if (!is.null(self$residualDropout)) { + z <- torch::nnf_dropout(z, p=self$residualDropout) + } + x <- z + x + return(x) + } +) + +ResNet <- torch::nn_module( + name='ResNet', + + initialize=function(n_features, sizeEmbedding, sizeHidden, numLayers, + hiddenFactor, activation, normalization, hiddenDropout=NULL, + residualDropout=NULL, d_out=1) { + # n_features - 1 because only binary features are embedded (not Age) + # ages is concatenated with the embedding output + # need to extend to support other numerical features + self$embedding <- torch::nn_linear(n_features - 1, sizeEmbedding, bias=F) + self$first_layer <- torch::nn_linear(sizeEmbedding + 1, sizeHidden) + + resHidden <- sizeHidden * hiddenFactor + + #TODO make this prettier , residualBlock class + #TODO + self$layers <- torch::nn_module_list(lapply(1:numLayers, + function (x) ResLayer(sizeHidden, resHidden, + normalization, activation, + hiddenDropout, + residualDropout))) + self$lastNorm <- normalization(sizeHidden) + self$head <- torch::nn_linear(sizeHidden, d_out) + + self$lastAct <- activation + + + + }, + + forward=function(x_num, x_cat) { + x_cat <- self$embedding(x_cat) + x <- torch::torch_cat(list(x_cat, x_num), dim=2L) + x <- self$first_layer(x) + + for (i in 1:length(self$layers)) { + x <- self$layers[[i]](x) + } + x <- self$lastNorm(x) + x <- self$lastAct(x) + x <- self$head(x) + x <- x$squeeze(-1) + return(x) + } +) + +Estimator <- torch::nn_module( + name = 'Estimator', + initialize = function(baseModel, modelParameters, fitParameters, + optimizer=torch::optim_adam, + criterion=torch::nn_bce_with_logits_loss, + device='cpu'){ + self$device <- device + self$model <- do.call(baseModel, modelParameters) + self$modelParameters <- modelParameters + + self$epochs <- self$item_or_defaults(fitParameters, 'epochs', 10) + self$learningRate <- self$item_or_defaults(fitParameters,'lr', 2e-4) + self$l2Norm <- self$item_or_defaults(fitParameters, 'l2', 1e-5) + + self$resultsDir <- self$item_or_defaults(fitParameters, 'resultsDir', './results') + dir.create(self$resultsDir) + self$prefix <- self$item_or_defaults(fitParameters, 'prefix', 'resnet') + + self$previousEpochs <- self$item_or_defaults(fitParameters, 'previousEpochs', 0) + + self$optimizer <- optimizer(params=self$model$parameters, + lr=self$learningRate, weight_decay=self$l2Norm) + self$criterion <- criterion() + }, + + # fits the estimator + fit = function(dataloader, testDataloader) { + valLosses = c() + valAUCs = c() + for (epoch in 1:self$epochs) { + self$fit_epoch(dataloader) + scores <- self$score_epoch(testDataloader) + + currentEpoch <- epoch + self$previousEpochs + + ParallelLogger::logInfo('Epochs: ', currentEpoch, ' | Val AUC: ', + round(scores$auc,3), ' | Val Loss: ', round(scores$loss,2), ' LR: ', + self$optimizer$param_groups[[1]]$lr) + valLosses <- c(valLosses, scores$loss) + valAUCs <- c(valAUCs, scores$auc) + + torch::torch_save(list( + modelState_dict=self$model$state_dict(), + modelHyperparameters=self$modelParameters), + file.path(self$resultsDir, paste0(self$prefix, '_epochs:', currentEpoch, + '_auc:', round(scores$auc,3), '_val_loss:', + round(scores$loss,2)))) + } + write.csv(data.frame(epochs=1:self$epochs, loss=valLosses, auc=valAUCs), + file.path(self$resultsDir, 'log.txt')) + }, + + # trains for one epoch + fit_epoch = function(dataloader){ + t = Sys.time() + batch_loss = 0 + i=1 + self$model$train() + for (b in torch::enumerate(dataloader)) { + cat = b[[1]]$to(device=self$device) + num = b[[2]]$to(device=self$device) + target= b[[3]]$to(device=self$device) + + out = self$model(num, cat) + loss = self$criterion(out, target) + + batch_loss = batch_loss + loss + if (i %% 10 == 0) { + elapsed_time <- Sys.time() - t + ParallelLogger::logInfo('Loss: ', round((batch_loss/10)$item(), 3), ' | Time: ', + round(elapsed_time,digits = 2), units(elapsed_time)) + t = Sys.time() + batch_loss = 0 + } + + loss$backward() + self$optimizer$step() + self$optimizer$zero_grad() + i = i + 1 + } + + }, + + # calculates loss and auc after training for one epoch + score_epoch = function(dataloader){ + torch::with_no_grad({ + loss = c() + predictions = c() + targets = c() + self$model$eval() + for (b in torch::enumerate(dataloader)) { + cat = b[[1]]$to(device=self$device) + num = b[[2]]$to(device=self$device) + target = b[[3]]$to(device=self$device) + + pred = self$model(num, cat) + predictions = c(predictions, as.array(pred)) + targets = c(targets, as.array(target)) + loss = c(loss, self$criterion(pred, target)$item()) + } + mean_loss = mean(loss) + predictionsClass = list(values=predictions, outcomeCount=targets) + attr(predictionsClass, 'metaData')$predictionType <-'binary' + auc = computeAuc(predictionsClass) + }) + return(list(loss=mean_loss, auc=auc)) + }, + + # predicts and outputs the probabilities + # predict_proba = function(dataloader) { + # + # }, + + # predicts and outputs the class + # predict = function(dataloader){ + # + # }, + + # select item from list, and if it's null sets a default + item_or_defaults = function (list, item, default = NULL) { + value = list[[item]] + if (is.null(value)) default else value + }, + +) + +Dataset <- torch::dataset( + name = 'Dataset', + + initialize=function(data, labels, indices, numericalIndex) { + matrix <- data[indices,] + + tensor <- torch::torch_tensor(as.matrix(matrix), dtype=torch::torch_float32()) + + self$labels <- torch::torch_tensor(labels)[indices] + + notNumIndex <- 1:tensor$shape[2] != numericalIndex + self$cat <- tensor[, notNumIndex] + self$num <- tensor[,numericalIndex, drop=F] + + }, + + .getitem = function(item) { + return(list(self$cat[item,], + self$num[item,], + self$labels[item])) + }, + + .length = function() { + self$labels$shape[1] + } +) + + + + From ed2116b750a497cfd43851533ad6c47a159ef447 Mon Sep 17 00:00:00 2001 From: Jenna Reps Date: Mon, 12 Jul 2021 08:46:25 -0400 Subject: [PATCH 007/140] updating temporal sql - adding vocab group code - adding start/end day (Q - do we want to include things that start before @start_day prior to index but end after - i.e., overlapping events?) --- .../sql_server/DomainConceptGroupTemporal.sql | 167 ++++++++++++++++++ inst/sql/sql_server/DomainConceptTemporal.sql | 8 + 2 files changed, 175 insertions(+) create mode 100644 inst/sql/sql_server/DomainConceptGroupTemporal.sql diff --git a/inst/sql/sql_server/DomainConceptGroupTemporal.sql b/inst/sql/sql_server/DomainConceptGroupTemporal.sql new file mode 100644 index 0000000..39de55e --- /dev/null +++ b/inst/sql/sql_server/DomainConceptGroupTemporal.sql @@ -0,0 +1,167 @@ +IF OBJECT_ID('tempdb..#groups', 'U') IS NOT NULL +DROP TABLE #groups; + +{@domain_table == 'drug_exposure' | @domain_table == 'drug_era'} ? { + SELECT DISTINCT descendant_concept_id, + ancestor_concept_id + INTO #groups + FROM @cdm_database_schema.concept_ancestor + INNER JOIN @cdm_database_schema.concept + ON ancestor_concept_id = concept_id + WHERE ((vocabulary_id = 'ATC' + AND LEN(concept_code) IN (1, 3, 4, 5)) + OR (standard_concept = 'S' + {@domain_table == 'drug_era'} ? { AND concept_class_id = 'Ingredient'} + AND domain_id = 'Drug')) + AND concept_id != 0 + {@excluded_concept_table != ''} ? { AND descendant_concept_id NOT IN (SELECT id FROM @excluded_concept_table)} + {@included_concept_table != ''} ? { AND descendant_concept_id IN (SELECT id FROM @included_concept_table)} + {@excluded_concept_table != ''} ? { AND ancestor_concept_id NOT IN (SELECT id FROM @excluded_concept_table)} + {@included_concept_table != ''} ? { AND ancestor_concept_id IN (SELECT id FROM @included_concept_table)} + ; +} + +{@domain_table == 'condition_occurrence' | @domain_table == 'condition_era'} ? { + SELECT DISTINCT descendant_concept_id, + ancestor_concept_id + INTO #groups + FROM @cdm_database_schema.concept_ancestor + INNER JOIN ( + SELECT concept_id + FROM @cdm_database_schema.concept + INNER JOIN ( + SELECT * + FROM @cdm_database_schema.concept_ancestor + WHERE ancestor_concept_id = 441840 /* SNOMED clinical finding */ + AND (min_levels_of_separation > 2 + OR descendant_concept_id IN (433736, 433595, 441408, 72404, 192671, 137977, 434621, 437312, 439847, 4171917, 438555, 4299449, 375258, 76784, 40483532, 4145627, 434157, 433778, 258449, 313878) + ) + ) temp + ON concept_id = descendant_concept_id + WHERE concept_name NOT LIKE '%finding' + AND concept_name NOT LIKE 'Disorder of%' + AND concept_name NOT LIKE 'Finding of%' + AND concept_name NOT LIKE 'Disease of%' + AND concept_name NOT LIKE 'Injury of%' + AND concept_name NOT LIKE '%by site' + AND concept_name NOT LIKE '%by body site' + AND concept_name NOT LIKE '%by mechanism' + AND concept_name NOT LIKE '%of body region' + AND concept_name NOT LIKE '%of anatomical site' + AND concept_name NOT LIKE '%of specific body structure%' + AND domain_id = 'Condition' + {@excluded_concept_table != ''} ? { AND concept_id NOT IN (SELECT id FROM @excluded_concept_table)} + {@included_concept_table != ''} ? { AND concept_id IN (SELECT id FROM @included_concept_table)} + ) valid_groups + ON ancestor_concept_id = valid_groups.concept_id + {@excluded_concept_table != '' | @included_concept_table != ''} ? { + WHERE + {@excluded_concept_table != ''} ? { + ancestor_concept_id NOT IN (SELECT id FROM @excluded_concept_table) + AND descendant_concept_id NOT IN (SELECT id FROM @excluded_concept_table) + } + {@included_concept_table != ''} ? { + {@excluded_concept_table != ''} ? { AND } : { }ancestor_concept_id IN (SELECT id FROM @included_concept_table) + AND descendant_concept_id IN (SELECT id FROM @included_concept_table) + } + } + ; +} + +-- Feature construction +SELECT +CAST(ancestor_concept_id AS BIGINT) * 1000 + @analysis_id AS covariate_id, + time_id, + duration, +{@aggregated} ? { + cohort_definition_id, + COUNT(*) AS sum_value +} : { + row_id, + 1 AS covariate_value +} +INTO @covariate_table +FROM ( + SELECT DISTINCT ancestor_concept_id, + + FLOOR(DATEDIFF(@time_part, @cdm_database_schema.@domain_table.@domain_start_date, cohort.cohort_start_date)*1.0/@time_interval ) as time_id, + {@aggregated} ? { + max( + } + + CASE WHEN @cdm_database_schema.@domain_table.@domain_start_date <= cohort.cohort_start_date + THEN + DATEDIFF(@time_part, @cdm_database_schema.@domain_table.@domain_start_date, @cdm_database_schema.@domain_table.@domain_start_date)*1.0/@time_interval + ELSE + DATEDIFF(@time_part, @cdm_database_schema.@domain_table.@domain_start_date, cohort.cohort_start_date)*1.0/@time_interval + END + {@aggregated} ? { + ) + } + as duration, + + {@aggregated} ? { + cohort_definition_id, + cohort.subject_id, + cohort.cohort_start_date + } : { + cohort.@row_id_field AS row_id + } + FROM @cohort_table cohort + INNER JOIN @cdm_database_schema.@domain_table + ON cohort.subject_id = @domain_table.person_id + INNER JOIN #groups + ON @domain_concept_id = descendant_concept_id + WHERE @domain_start_date <= DATEADD(DAY, @end_day, cohort.cohort_start_date) + AND @domain_start_date >= DATEADD(DAY, @start_day, cohort.cohort_start_date) + AND @domain_concept_id != 0 + {@sub_type == 'inpatient'} ? { AND condition_type_concept_id IN (38000183, 38000184, 38000199, 38000200)} + {@included_cov_table != ''} ? { AND CAST(ancestor_concept_id AS BIGINT) * 1000 + @analysis_id IN (SELECT id FROM @included_cov_table)} + {@cohort_definition_id != -1} ? { AND cohort.cohort_definition_id IN (@cohort_definition_id)} +) temp +{@aggregated} ? { + GROUP BY cohort_definition_id, + ancestor_concept_id, + time_id, + duration + } +} +; +TRUNCATE TABLE #groups; + +DROP TABLE #groups; + +-- Reference construction +INSERT INTO #cov_ref ( +covariate_id, +covariate_name, +analysis_id, +concept_id +) +SELECT covariate_id, + CAST(CONCAT('@domain_table group: ', CASE WHEN concept_name IS NULL THEN 'Unknown concept' ELSE concept_name END {@sub_type == 'inpatient'} ? {, ' (inpatient)'}) AS VARCHAR(512)) AS covariate_name, +@analysis_id AS analysis_id, +CAST((covariate_id - @analysis_id) / 1000 AS INT) AS concept_id +FROM ( + SELECT DISTINCT covariate_id + FROM @covariate_table +) t1 +LEFT JOIN @cdm_database_schema.concept +ON concept_id = CAST((covariate_id - @analysis_id) / 1000 AS INT); + +INSERT INTO #analysis_ref ( +analysis_id, +analysis_name, +domain_id, +start_day, +end_day, +is_binary, +missing_means_zero +) +SELECT @analysis_id AS analysis_id, +CAST('@analysis_name' AS VARCHAR(512)) AS analysis_name, +CAST('@domain_id' AS VARCHAR(20)) AS domain_id, +@start_day AS start_day, +@end_day AS end_day, +CAST('Y' AS VARCHAR(1)) AS is_binary, +CAST(NULL AS VARCHAR(1)) AS missing_means_zero; \ No newline at end of file diff --git a/inst/sql/sql_server/DomainConceptTemporal.sql b/inst/sql/sql_server/DomainConceptTemporal.sql index a3ebc2a..b49107b 100644 --- a/inst/sql/sql_server/DomainConceptTemporal.sql +++ b/inst/sql/sql_server/DomainConceptTemporal.sql @@ -1,3 +1,6 @@ +{DEFAULT @end_day == 0} +{DEFAULT @start_day == -99999} + -- Feature construction SELECT CAST(@domain_concept_id AS BIGINT) * 1000 + @analysis_id AS covariate_id, @@ -40,6 +43,7 @@ FROM ( ON cohort.subject_id = @domain_table.person_id WHERE @domain_start_date <= DATEADD(DAY, @end_day, cohort.cohort_start_date) + AND @domain_start_date >= DATEADD(DAY, @start_day, cohort.cohort_start_date) AND @domain_concept_id != 0 {@sub_type == 'inpatient'} ? { AND condition_type_concept_id IN (38000183, 38000184, 38000199, 38000200)} @@ -80,11 +84,15 @@ INSERT INTO #analysis_ref ( analysis_id, analysis_name, domain_id, +start_day, +end_day, is_binary, missing_means_zero ) SELECT @analysis_id AS analysis_id, CAST('@analysis_name' AS VARCHAR(512)) AS analysis_name, CAST('@domain_id' AS VARCHAR(20)) AS domain_id, +@start_day AS start_day, +@end_day AS end_day, CAST('Y' AS VARCHAR(1)) AS is_binary, CAST(NULL AS VARCHAR(1)) AS missing_means_zero; \ No newline at end of file From d72f35d1511634232d5b2ab65ba4749c1b47b230 Mon Sep 17 00:00:00 2001 From: Jenna Reps Date: Thu, 22 Jul 2021 12:20:26 -0400 Subject: [PATCH 008/140] updated for new FeatureExtraction - fixed description issue - added example.R in extras for initial testing (requires FeatureExtraction sequence branch) - merged in the data conversion code and updated sparseRTorch (how to use non-temporal features?) --- DESCRIPTION | 10 ++- NAMESPACE | 31 ++++++++++ R/sparseRTorch.R | 55 ++++++++++++++--- extras/example.R | 44 ++++++++++++++ man/getPlpData.Rd | 118 ++++++++++++++++++++++++++++++++++++ man/loadPlpData.Rd | 27 +++++++++ man/loadPlpFromCsv.Rd | 17 ++++++ man/loadPlpModel.Rd | 17 ++++++ man/loadPlpResult.Rd | 17 ++++++ man/loadPrediction.Rd | 17 ++++++ man/predictAndromeda.Rd | 36 +++++++++++ man/predictPlp.Rd | 27 +++++++++ man/predictProbabilities.Rd | 26 ++++++++ man/savePlpData.Rd | 29 +++++++++ man/savePlpModel.Rd | 19 ++++++ man/savePlpResult.Rd | 19 ++++++ man/savePlpToCsv.Rd | 19 ++++++ man/savePrediction.Rd | 21 +++++++ man/setCIReNN.Rd | 93 ++++++++++++++++++++++++++++ man/setCNNTorch.Rd | 36 +++++++++++ man/setCovNN.Rd | 48 +++++++++++++++ man/setCovNN2.Rd | 48 +++++++++++++++ man/setDeepNN.Rd | 42 +++++++++++++ man/setDeepNNTorch.Rd | 42 +++++++++++++ man/setRNNTorch.Rd | 35 +++++++++++ man/toSparseM.Rd | 38 ++++++++++++ man/toSparseRTorch.Rd | 37 +++++++++++ man/toSparseTorchPython.Rd | 49 +++++++++++++++ man/transferLearning.Rd | 54 +++++++++++++++++ 29 files changed, 1058 insertions(+), 13 deletions(-) create mode 100644 NAMESPACE create mode 100644 extras/example.R create mode 100644 man/getPlpData.Rd create mode 100644 man/loadPlpData.Rd create mode 100644 man/loadPlpFromCsv.Rd create mode 100644 man/loadPlpModel.Rd create mode 100644 man/loadPlpResult.Rd create mode 100644 man/loadPrediction.Rd create mode 100644 man/predictAndromeda.Rd create mode 100644 man/predictPlp.Rd create mode 100644 man/predictProbabilities.Rd create mode 100644 man/savePlpData.Rd create mode 100644 man/savePlpModel.Rd create mode 100644 man/savePlpResult.Rd create mode 100644 man/savePlpToCsv.Rd create mode 100644 man/savePrediction.Rd create mode 100644 man/setCIReNN.Rd create mode 100644 man/setCNNTorch.Rd create mode 100644 man/setCovNN.Rd create mode 100644 man/setCovNN2.Rd create mode 100644 man/setDeepNN.Rd create mode 100644 man/setDeepNNTorch.Rd create mode 100644 man/setRNNTorch.Rd create mode 100644 man/toSparseM.Rd create mode 100644 man/toSparseRTorch.Rd create mode 100644 man/toSparseTorchPython.Rd create mode 100644 man/transferLearning.Rd diff --git a/DESCRIPTION b/DESCRIPTION index 42e28a4..a6f0e51 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,6 @@ Package: DeepPatientLevelPrediction Type: Package -Title: Package for deep learning patient level prediction using data in the OMOP Common Data - Model +Title: Deep learning function for patient level prediction using data in the OMOP Common Data Model Version: 0.0.1 Date: 2021-06-07 Authors@R: c( @@ -9,10 +8,8 @@ Authors@R: c( person("Seng", "Chan You", role = c("aut")), person("Egill", "Friogeirsson", role = c("aut")) ) - Maintainer: Jenna Reps -Description: A package for creating deep learning patient level prediction models following -the OHDSI PatientLevelPrediction framework. +Description: A package for creating deep learning patient level prediction models following the OHDSI PatientLevelPrediction framework. License: Apache License 2.0 URL: https://ohdsi.github.io/PatientLevelPrediction, https://github.com/OHDSI/DeepPatientLevelPrediction BugReports: https://github.com/OHDSI/DeepPatientLevelPrediction/issues @@ -33,6 +30,7 @@ Imports: RSQLite, slam, SqlRender (>= 1.1.3), + torch, tibble, tidyr, Suggests: @@ -46,4 +44,4 @@ Remotes: LinkingTo: Rcpp NeedsCompilation: yes RoxygenNote: 7.1.1 -Encoding: UTF-8 +Encoding: UTF-8 \ No newline at end of file diff --git a/NAMESPACE b/NAMESPACE new file mode 100644 index 0000000..8c54858 --- /dev/null +++ b/NAMESPACE @@ -0,0 +1,31 @@ +# Generated by roxygen2: do not edit by hand + +S3method(print,plpData) +S3method(print,summary.plpData) +S3method(summary,plpData) +export(getPlpData) +export(loadPlpData) +export(loadPlpFromCsv) +export(loadPlpModel) +export(loadPlpResult) +export(loadPrediction) +export(predictAndromeda) +export(predictPlp) +export(predictProbabilities) +export(savePlpData) +export(savePlpModel) +export(savePlpResult) +export(savePlpToCsv) +export(savePrediction) +export(setCIReNN) +export(setCNNTorch) +export(setCovNN) +export(setCovNN2) +export(setDeepNN) +export(setDeepNNTorch) +export(setRNNTorch) +export(toSparseM) +export(toSparseRTorch) +export(toSparseTorchPython) +export(transferLearning) +importFrom(zeallot,"%<-%") diff --git a/R/sparseRTorch.R b/R/sparseRTorch.R index b03fdef..3548234 100644 --- a/R/sparseRTorch.R +++ b/R/sparseRTorch.R @@ -1,21 +1,62 @@ source('R/Formatting.R') +#' Convert the plpData in COO format into a sparse Torch tensor +#' +#' @description +#' Converts the standard plpData to a sparse tensor for Torch +#' +#' @details +#' This function converts the covariate file from COO format into a sparse Torch tensor +#' @param plpData An object of type \code{plpData} with covariate in coo format - the patient level prediction +#' data extracted from the CDM. +#' @param population The population to include in the matrix +#' @param map A covariate map (telling us the column number for covariates) +#' @param temporal Whether you want to convert temporal data +#' @examples +#' #TODO +#' +#' @return +#' Returns a list, containing the data as a sparse matrix, the plpData covariateRef +#' and a data.frame named map that tells us what covariate corresponds to each column +#' This object is a list with the following components: \describe{ +#' \item{data}{A sparse matrix with the rows corresponding to each person in the plpData and the columns corresponding to the covariates.} +#' \item{covariateRef}{The plpData covariateRef.} +#' \item{map}{A data.frame containing the data column ids and the corresponding covariateId from covariateRef.} +#' } +#' +#' @export toSparseRTorch <- function(plpData, population, map=NULL, temporal=T){ newCovariateData <- MapCovariates(plpData$covariateData, population, mapping=map) - indices <- newCovariateData$covariates %>% select(rowId, covariateId, timeId) %>% collect() %>% as.matrix() - values <- newCovariateData$covariates %>% select(covariateValue) %>% collect() %>% as.matrix() + if(temporal){ + indicesTemporal <- newCovariateData$covariates %>% filter(!is.na(.data$timeId)) %>% mutate(timeId = .data$timeId+1) %>% select(.data$rowId, .data$covariateId, .data$timeId) %>% collect() %>% as.matrix() + valuesTemporal <- newCovariateData$covariates %>% filter(!is.na(.data$timeId)) %>% select(.data$covariateValue) %>% collect() %>% as.matrix() + + indicesTensor <- torch::torch_tensor(indicesTemporal, dtype=torch::torch_long())$t() + valuesTensor <- torch::torch_tensor(valuesTemporal)$squeeze() + + sparseMatrixTemporal <- torch::torch_sparse_coo_tensor(indices=indicesTensor, + values=valuesTensor) + + indicesNonTemporal <- newCovariateData$covariates %>% filter(is.na(.data$timeId)) %>% select(.data$rowId, .data$covariateId) %>% collect() %>% as.matrix() + valuesNonTemporal <- newCovariateData$covariates %>% filter(is.na(.data$timeId)) %>% select(.data$covariateValue) %>% collect() %>% as.matrix() + + } else{ + sparseMatrixTemporal <- NULL + indicesNonTemporal <- newCovariateData$covariates %>% select(.data$rowId, .data$covariateId) %>% collect() %>% as.matrix() + valuesNonTemporal <- newCovariateData$covariates %>% select(.data$covariateValue) %>% collect() %>% as.matrix() + } - indicesTensor <- torch::torch_tensor(indices, dtype=torch::torch_long())$t() - valuesTensor <- torch::torch_tensor(values)$squeeze() - - sparseMatrix <- torch::torch_sparse_coo_tensor(indices=indicesTensor, + indicesTensor <- torch::torch_tensor(indicesNonTemporal, dtype=torch::torch_long())$t() + valuesTensor <- torch::torch_tensor(valuesNonTemporal)$squeeze() + sparseMatrixNonTemporal <- torch::torch_sparse_coo_tensor(indices=indicesTensor, values=valuesTensor) results = list( - data=sparseMatrix, + data = sparseMatrixNonTemporal, + dataTemporal = sparseMatrixTemporal, covariateRef=as.data.frame(newCovariateData$covariateRef), map=as.data.frame(newCovariateData$mapping)) diff --git a/extras/example.R b/extras/example.R new file mode 100644 index 0000000..a6cf56a --- /dev/null +++ b/extras/example.R @@ -0,0 +1,44 @@ +# testing code (requires sequential branch of FeatureExtraction): +library(FeatureExtraction) +library(DeepPatientLevelPrediction) +connectionDetails <- Eunomia::getEunomiaConnectionDetails() +Eunomia::createCohorts(connectionDetails) + +covSet <- createTemporalSequenceCovariateSettings(useDemographicsGender = T, + useDemographicsAge = T, + useDemographicsRace = T, + useDemographicsEthnicity = T, + useDemographicsAgeGroup = T, + useConditionEraGroupStart = T, + useDrugEraStart = T, + timePart = 'month', + timeInterval = 1, + sequenceEndDay = -1, + sequenceStartDay = -365*5) + + +plpData <- PatientLevelPrediction::getPlpData(connectionDetails = connectionDetails, + cdmDatabaseSchema = "main", + cohortId = 1, + outcomeIds = 3, + cohortDatabaseSchema = "main", + cohortTable = "cohort", + outcomeDatabaseSchema = "main", + outcomeTable = "cohort", + firstExposureOnly = T, + washoutPeriod = 365, + covariateSettings = covSet + ) + +population <- PatientLevelPrediction::createStudyPopulation(plpData = plpData, + outcomeId = 3, + requireTimeAtRisk = F, + riskWindowStart = 1, + riskWindowEnd = 365) + +sparseMat <- toSparseRTorch(plpData, population, map=NULL, temporal=T) + +# code to train models + + + diff --git a/man/getPlpData.Rd b/man/getPlpData.Rd new file mode 100644 index 0000000..6bea686 --- /dev/null +++ b/man/getPlpData.Rd @@ -0,0 +1,118 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/SaveLoadPlp.R +\name{getPlpData} +\alias{getPlpData} +\title{Get the patient level prediction data from the server} +\usage{ +getPlpData( + connectionDetails, + cdmDatabaseSchema, + oracleTempSchema = cdmDatabaseSchema, + cohortId, + outcomeIds, + studyStartDate = "", + studyEndDate = "", + cohortDatabaseSchema = cdmDatabaseSchema, + cohortTable = "cohort", + outcomeDatabaseSchema = cdmDatabaseSchema, + outcomeTable = "cohort", + cdmVersion = "5", + firstExposureOnly = FALSE, + washoutPeriod = 0, + sampleSize = NULL, + covariateSettings, + excludeDrugsFromCovariates = FALSE +) +} +\arguments{ +\item{connectionDetails}{An R object of type\cr\code{connectionDetails} created using the +function \code{createConnectionDetails} in the +\code{DatabaseConnector} package.} + +\item{cdmDatabaseSchema}{The name of the database schema that contains the OMOP CDM +instance. Requires read permissions to this database. On SQL +Server, this should specifiy both the database and the schema, +so for example 'cdm_instance.dbo'.} + +\item{oracleTempSchema}{For Oracle only: the name of the database schema where you want +all temporary tables to be managed. Requires create/insert +permissions to this database.} + +\item{cohortId}{A unique identifier to define the at risk cohort. CohortId is +used to select the cohort_concept_id in the cohort-like table.} + +\item{outcomeIds}{A list of cohort_definition_ids used to define outcomes (-999 mean no outcome gets downloaded).} + +\item{studyStartDate}{A calendar date specifying the minimum date that a cohort index +date can appear. Date format is 'yyyymmdd'.} + +\item{studyEndDate}{A calendar date specifying the maximum date that a cohort index +date can appear. Date format is 'yyyymmdd'. Important: the study +end data is also used to truncate risk windows, meaning no outcomes +beyond the study end date will be considered.} + +\item{cohortDatabaseSchema}{The name of the database schema that is the location where the +cohort data used to define the at risk cohort is available. +Requires read permissions to this database.} + +\item{cohortTable}{The tablename that contains the at risk cohort. cohortTable has +format of COHORT table: cohort_concept_id, SUBJECT_ID, +COHORT_START_DATE, COHORT_END_DATE.} + +\item{outcomeDatabaseSchema}{The name of the database schema that is the location where +the data used to define the outcome cohorts is available. +Requires read permissions to this database.} + +\item{outcomeTable}{The tablename that contains the outcome cohorts. Expectation is +outcomeTable has format of COHORT table: +COHORT_DEFINITION_ID, SUBJECT_ID, COHORT_START_DATE, +COHORT_END_DATE.} + +\item{cdmVersion}{Define the OMOP CDM version used: currently support "4", "5" and "6".} + +\item{firstExposureOnly}{Should only the first exposure per subject be included? Note that +this is typically done in the \code{createStudyPopulation} function, +but can already be done here for efficiency reasons.} + +\item{washoutPeriod}{The mininum required continuous observation time prior to index +date for a person to be included in the at risk cohort. Note that +this is typically done in the \code{createStudyPopulation} function, +but can already be done here for efficiency reasons.} + +\item{sampleSize}{If not NULL, only this number of people will be sampled from the target population (Default NULL)} + +\item{covariateSettings}{An object of type \code{covariateSettings} as created using the +\code{createCovariateSettings} function in the +\code{FeatureExtraction} package.} + +\item{excludeDrugsFromCovariates}{A redundant option} +} +\value{ +Returns an object of type \code{plpData}, containing information on the cohorts, their +outcomes, and baseline covariates. Information about multiple outcomes can be captured at once for +efficiency reasons. This object is a list with the following components: \describe{ +\item{outcomes}{A data frame listing the outcomes per person, including the time to event, and +the outcome id. Outcomes are not yet filtered based on risk window, since this is done at +a later stage.} \item{cohorts}{A data frame listing the persons in each cohort, listing their +exposure status as well as the time to the end of the observation period and time to the end of the +cohort (usually the end of the exposure era).} \item{covariates}{An ffdf object listing the +baseline covariates per person in the two cohorts. This is done using a sparse representation: +covariates with a value of 0 are omitted to save space.} \item{covariateRef}{An ffdf object describing the covariates that have been extracted.} +\item{metaData}{A list of objects with information on how the cohortMethodData object was +constructed.} } The generic \code{()} and \code{summary()} functions have been implemented for this object. +} +\description{ +This function executes a large set of SQL statements against the database in OMOP CDM format to +extract the data needed to perform the analysis. +} +\details{ +Based on the arguments, the at risk cohort data is retrieved, as well as outcomes +occurring in these subjects. The at risk cohort is identified through +user-defined cohorts in a cohort table either inside the CDM instance or in a separate schema. +Similarly, outcomes are identified +through user-defined cohorts in a cohort table either inside the CDM instance or in a separate +schema. Covariates are automatically extracted from the appropriate tables within the CDM. +If you wish to exclude concepts from covariates you will need to +manually add the concept_ids and descendants to the \code{excludedCovariateConceptIds} of the +\code{covariateSettings} argument. +} diff --git a/man/loadPlpData.Rd b/man/loadPlpData.Rd new file mode 100644 index 0000000..2240222 --- /dev/null +++ b/man/loadPlpData.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/SaveLoadPlp.R +\name{loadPlpData} +\alias{loadPlpData} +\title{Load the cohort data from a folder} +\usage{ +loadPlpData(file, readOnly = TRUE) +} +\arguments{ +\item{file}{The name of the folder containing the data.} + +\item{readOnly}{If true, the data is opened read only.} +} +\value{ +An object of class plpData. +} +\description{ +\code{loadPlpData} loads an object of type plpData from a folder in the file +system. +} +\details{ +The data will be written to a set of files in the folder specified by the user. +} +\examples{ +# todo + +} diff --git a/man/loadPlpFromCsv.Rd b/man/loadPlpFromCsv.Rd new file mode 100644 index 0000000..1808d66 --- /dev/null +++ b/man/loadPlpFromCsv.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/SaveLoadPlp.R +\name{loadPlpFromCsv} +\alias{loadPlpFromCsv} +\title{Loads parts of the plp result saved as csv files for transparent sharing} +\usage{ +loadPlpFromCsv(dirPath) +} +\arguments{ +\item{dirPath}{The directory with the results as csv files} +} +\description{ +Loads parts of the plp result saved as csv files for transparent sharing +} +\details{ +Load the main results from csv files into a runPlp object +} diff --git a/man/loadPlpModel.Rd b/man/loadPlpModel.Rd new file mode 100644 index 0000000..c3ebe52 --- /dev/null +++ b/man/loadPlpModel.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/SaveLoadPlp.R +\name{loadPlpModel} +\alias{loadPlpModel} +\title{loads the plp model} +\usage{ +loadPlpModel(dirPath) +} +\arguments{ +\item{dirPath}{The location of the model} +} +\description{ +loads the plp model +} +\details{ +Loads a plp model that was saved using \code{savePlpModel()} +} diff --git a/man/loadPlpResult.Rd b/man/loadPlpResult.Rd new file mode 100644 index 0000000..3e6fb69 --- /dev/null +++ b/man/loadPlpResult.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/SaveLoadPlp.R +\name{loadPlpResult} +\alias{loadPlpResult} +\title{Loads the evalaution dataframe} +\usage{ +loadPlpResult(dirPath) +} +\arguments{ +\item{dirPath}{The directory where the evaluation was saved} +} +\description{ +Loads the evalaution dataframe +} +\details{ +Loads the evaluation +} diff --git a/man/loadPrediction.Rd b/man/loadPrediction.Rd new file mode 100644 index 0000000..06e51ca --- /dev/null +++ b/man/loadPrediction.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/SaveLoadPlp.R +\name{loadPrediction} +\alias{loadPrediction} +\title{Loads the prediciton dataframe to csv} +\usage{ +loadPrediction(fileLocation) +} +\arguments{ +\item{fileLocation}{The location with the saved prediction} +} +\description{ +Loads the prediciton dataframe to csv +} +\details{ +Loads the prediciton RDS file +} diff --git a/man/predictAndromeda.Rd b/man/predictAndromeda.Rd new file mode 100644 index 0000000..0903869 --- /dev/null +++ b/man/predictAndromeda.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/Predict.R +\name{predictAndromeda} +\alias{predictAndromeda} +\title{Generated predictions from a regression model} +\usage{ +predictAndromeda( + coefficients, + population, + covariateData, + modelType = "logistic" +) +} +\arguments{ +\item{coefficients}{A names numeric vector where the names are the covariateIds, except for the +first value which is expected to be the intercept.} + +\item{population}{A data frame containing the population to do the prediction for} + +\item{covariateData}{An andromeda object containing the covariateData with predefined columns +(see below).} + +\item{modelType}{Current supported types are "logistic", "poisson", "cox" or "survival".} +} +\description{ +Generated predictions from a regression model +} +\details{ +These columns are expected in the outcome object: \tabular{lll}{ \verb{rowId} \tab(integer) \tab +Row ID is used to link multiple covariates (x) to a single outcome (y) \cr \verb{time} \tab(real) +\tab For models that use time (e.g. Poisson or Cox regression) this contains time \cr \tab +\tab(e.g. number of days) \cr } These columns are expected in the covariates object: \tabular{lll}{ +\verb{rowId} \tab(integer) \tab Row ID is used to link multiple covariates (x) to a single outcome +(y) \cr \verb{covariateId} \tab(integer) \tab A numeric identifier of a covariate \cr +\verb{covariateValue} \tab(real) \tab The value of the specified covariate \cr } +} diff --git a/man/predictPlp.Rd b/man/predictPlp.Rd new file mode 100644 index 0000000..f1718a2 --- /dev/null +++ b/man/predictPlp.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/Predict.R +\name{predictPlp} +\alias{predictPlp} +\title{predictPlp} +\usage{ +predictPlp(plpModel, population, plpData, index = NULL) +} +\arguments{ +\item{plpModel}{An object of type \code{plpModel} - a patient level prediction model} + +\item{population}{The population created using createStudyPopulation() who will have their risks predicted} + +\item{plpData}{An object of type \code{plpData} - the patient level prediction +data extracted from the CDM.} + +\item{index}{A data frame containing rowId: a vector of rowids and index: a vector of doubles the same length as the rowIds. If used, only the rowIds with a negative index value are used to calculate the prediction.} +} +\value{ +A dataframe containing the prediction for each person in the population with an attribute metaData containing prediction details. +} +\description{ +Predict the risk of the outcome using the input plpModel for the input plpData +} +\details{ +The function applied the trained model on the plpData to make predictions +} diff --git a/man/predictProbabilities.Rd b/man/predictProbabilities.Rd new file mode 100644 index 0000000..affba63 --- /dev/null +++ b/man/predictProbabilities.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/Predict.R +\name{predictProbabilities} +\alias{predictProbabilities} +\title{Create predictive probabilities} +\usage{ +predictProbabilities(predictiveModel, population, covariateData) +} +\arguments{ +\item{predictiveModel}{An object of type \code{predictiveModel} as generated using +\code{\link{fitPlp}}.} + +\item{population}{The population to calculate the prediction for} + +\item{covariateData}{The covariateData containing the covariates for the population} +} +\value{ +The value column in the result data.frame is: logistic: probabilities of the outcome, poisson: +Poisson rate (per day) of the outome, survival: hazard rate (per day) of the outcome. +} +\description{ +Create predictive probabilities +} +\details{ +Generates predictions for the population specified in plpData given the model. +} diff --git a/man/savePlpData.Rd b/man/savePlpData.Rd new file mode 100644 index 0000000..2562b0f --- /dev/null +++ b/man/savePlpData.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/SaveLoadPlp.R +\name{savePlpData} +\alias{savePlpData} +\title{Save the cohort data to folder} +\usage{ +savePlpData(plpData, file, envir = NULL, overwrite = F) +} +\arguments{ +\item{plpData}{An object of type \code{plpData} as generated using +\code{getPlpData}.} + +\item{file}{The name of the folder where the data will be written. The folder should +not yet exist.} + +\item{envir}{The environment for to evaluate variables when saving} + +\item{overwrite}{Whether to force overwrite an existing file} +} +\description{ +\code{savePlpData} saves an object of type plpData to folder. +} +\details{ +The data will be written to a set of files in the folder specified by the user. +} +\examples{ +# todo + +} diff --git a/man/savePlpModel.Rd b/man/savePlpModel.Rd new file mode 100644 index 0000000..3ef435a --- /dev/null +++ b/man/savePlpModel.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/SaveLoadPlp.R +\name{savePlpModel} +\alias{savePlpModel} +\title{Saves the plp model} +\usage{ +savePlpModel(plpModel, dirPath) +} +\arguments{ +\item{plpModel}{A trained classifier returned by running \code{runPlp()$model}} + +\item{dirPath}{A location to save the model to} +} +\description{ +Saves the plp model +} +\details{ +Saves the plp model to a user specificed folder +} diff --git a/man/savePlpResult.Rd b/man/savePlpResult.Rd new file mode 100644 index 0000000..133bd18 --- /dev/null +++ b/man/savePlpResult.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/SaveLoadPlp.R +\name{savePlpResult} +\alias{savePlpResult} +\title{Saves the result from runPlp into the location directory} +\usage{ +savePlpResult(result, dirPath) +} +\arguments{ +\item{result}{The result of running runPlp()} + +\item{dirPath}{The directory to save the csv} +} +\description{ +Saves the result from runPlp into the location directory +} +\details{ +Saves the result from runPlp into the location directory +} diff --git a/man/savePlpToCsv.Rd b/man/savePlpToCsv.Rd new file mode 100644 index 0000000..018c0aa --- /dev/null +++ b/man/savePlpToCsv.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/SaveLoadPlp.R +\name{savePlpToCsv} +\alias{savePlpToCsv} +\title{Save parts of the plp result as a csv for transparent sharing} +\usage{ +savePlpToCsv(result, dirPath) +} +\arguments{ +\item{result}{An object of class runPlp with development or validation results} + +\item{dirPath}{The directory the save the results as csv files} +} +\description{ +Save parts of the plp result as a csv for transparent sharing +} +\details{ +Saves the main results as a csv (these files can be read by the shiny app) +} diff --git a/man/savePrediction.Rd b/man/savePrediction.Rd new file mode 100644 index 0000000..2e7b4a2 --- /dev/null +++ b/man/savePrediction.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/SaveLoadPlp.R +\name{savePrediction} +\alias{savePrediction} +\title{Saves the prediction dataframe to RDS} +\usage{ +savePrediction(prediction, dirPath, fileName = "prediction.rds") +} +\arguments{ +\item{prediction}{The prediciton data.frame} + +\item{dirPath}{The directory to save the prediction RDS} + +\item{fileName}{The name of the RDS file that will be saved in dirPath} +} +\description{ +Saves the prediction dataframe to RDS +} +\details{ +Saves the prediction data frame returned by predict.R to an RDS file and returns the fileLocation where the prediction is saved +} diff --git a/man/setCIReNN.Rd b/man/setCIReNN.Rd new file mode 100644 index 0000000..4fa2654 --- /dev/null +++ b/man/setCIReNN.Rd @@ -0,0 +1,93 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/CIReNN.R +\name{setCIReNN} +\alias{setCIReNN} +\title{Create setting for CIReNN model} +\usage{ +setCIReNN( + numberOfRNNLayer = c(1), + units = c(128, 64), + recurrentDropout = c(0.2), + layerDropout = c(0.2), + lr = c(1e-04), + decay = c(1e-05), + outcomeWeight = c(0), + batchSize = c(100), + epochs = c(100), + earlyStoppingMinDelta = c(1e-04), + earlyStoppingPatience = c(10), + bayes = T, + useDeepEnsemble = F, + numberOfEnsembleNetwork = 5, + useVae = T, + vaeDataSamplingProportion = 0.1, + vaeValidationSplit = 0.2, + vaeBatchSize = 100L, + vaeLatentDim = 10L, + vaeIntermediateDim = 256L, + vaeEpoch = 100L, + vaeEpislonStd = 1, + useGPU = FALSE, + maxGPUs = 2, + seed = 1234 +) +} +\arguments{ +\item{numberOfRNNLayer}{The number of RNN layer, only 1, 2, or 3 layers available now. eg. 1, c(1,2), c(1,2,3)} + +\item{units}{The number of units of RNN layer - as a list of vectors} + +\item{recurrentDropout}{The reccurrent dropout rate (regularisation)} + +\item{layerDropout}{The layer dropout rate (regularisation)} + +\item{lr}{Learning rate} + +\item{decay}{Learning rate decay over each update.} + +\item{outcomeWeight}{The weight of the outcome class in the loss function. Default is 0, which will be replaced by balanced weight.} + +\item{batchSize}{The number of data points to use per training batch} + +\item{epochs}{Number of times to iterate over dataset} + +\item{earlyStoppingMinDelta}{minimum change in the monitored quantity to qualify as an improvement for early stopping, i.e. an absolute change of less than min_delta in loss of validation data, will count as no improvement.} + +\item{earlyStoppingPatience}{Number of epochs with no improvement after which training will be stopped.} + +\item{bayes}{logical (either TRUE or FALSE) value for using Bayesian Drop Out Layer to measure uncertainty. If it is TRUE, both Epistemic and Aleatoric uncertainty will be measured through Bayesian Drop Out layer} + +\item{useDeepEnsemble}{logical (either TRUE or FALSE) value for using Deep Ensemble (Lakshminarayanan et al., 2017) to measure uncertainty. It cannot be used together with Bayesian deep learing.} + +\item{numberOfEnsembleNetwork}{Integer. Number of network used for Deep Ensemble (Lakshminarayanan et al recommended 5).} + +\item{useVae}{logical (either TRUE or FALSE) value for using Variational AutoEncoder before RNN} + +\item{vaeDataSamplingProportion}{Data sampling proportion for VAE} + +\item{vaeValidationSplit}{Validation split proportion for VAE} + +\item{vaeBatchSize}{batch size for VAE} + +\item{vaeLatentDim}{Number of latent dimesion for VAE} + +\item{vaeIntermediateDim}{Number of intermediate dimesion for VAE} + +\item{vaeEpoch}{Number of times to interate over dataset for VAE} + +\item{vaeEpislonStd}{Epsilon} + +\item{useGPU}{logical (either TRUE or FALSE) value. If you have GPUs in your machine, and want to use multiple GPU for deep learning, set this value as TRUE} + +\item{maxGPUs}{Integer, If you will use GPU, how many GPUs will be used for deep learning in VAE? GPU parallelisation for deep learning will be activated only when parallel vae is true. Integer >= 2 or list of integers, number of GPUs or list of GPU IDs on which to create model replicas.} + +\item{seed}{Random seed used by deep learning model} +} +\description{ +Create setting for CIReNN model +} +\examples{ +\dontrun{ +model.CIReNN <- setCIReNN() +} +} diff --git a/man/setCNNTorch.Rd b/man/setCNNTorch.Rd new file mode 100644 index 0000000..9f7309b --- /dev/null +++ b/man/setCNNTorch.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/CNNTorch.R +\name{setCNNTorch} +\alias{setCNNTorch} +\title{Create setting for CNN model with python} +\usage{ +setCNNTorch( + nbfilters = c(16, 32), + epochs = c(20, 50), + seed = 0, + class_weight = 0, + type = "CNN" +) +} +\arguments{ +\item{nbfilters}{The number of filters} + +\item{epochs}{The number of epochs} + +\item{seed}{A seed for the model} + +\item{class_weight}{The class weight used for imbalanced data: + 0: Inverse ratio between positives and negatives +-1: Focal loss} + +\item{type}{It can be normal 'CNN', 'CNN_LSTM', CNN_MLF' with multiple kernels with different kernel size, +'CNN_MIX', 'ResNet' and 'CNN_MULTI'} +} +\description{ +Create setting for CNN model with python +} +\examples{ +\dontrun{ +model.cnnTorch <- setCNNTorch() +} +} diff --git a/man/setCovNN.Rd b/man/setCovNN.Rd new file mode 100644 index 0000000..4ebb5ec --- /dev/null +++ b/man/setCovNN.Rd @@ -0,0 +1,48 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/CovNN.R +\name{setCovNN} +\alias{setCovNN} +\title{Create setting for multi-resolution CovNN model (stucture based on https://arxiv.org/pdf/1608.00647.pdf CNN1)} +\usage{ +setCovNN( + batchSize = 1000, + outcomeWeight = 1, + lr = 1e-05, + decay = 1e-06, + dropout = 0, + epochs = 10, + filters = 3, + kernelSize = 10, + loss = "binary_crossentropy", + seed = NULL +) +} +\arguments{ +\item{batchSize}{The number of samples to used in each batch during model training} + +\item{outcomeWeight}{The weight assined to the outcome (make greater than 1 to reduce unballanced label issue)} + +\item{lr}{The learning rate} + +\item{decay}{The decay of the learning rate} + +\item{dropout}{[currently not used] the dropout rate for regularisation} + +\item{epochs}{The number of times data is used to train the model (e.g., epoches=1 means data only used once to train)} + +\item{filters}{The number of columns output by each convolution} + +\item{kernelSize}{The number of time dimensions used for each convolution} + +\item{loss}{The loss function implemented} + +\item{seed}{The random seed} +} +\description{ +Create setting for multi-resolution CovNN model (stucture based on https://arxiv.org/pdf/1608.00647.pdf CNN1) +} +\examples{ +\dontrun{ +model.CovNN <- setCovNN() +} +} diff --git a/man/setCovNN2.Rd b/man/setCovNN2.Rd new file mode 100644 index 0000000..76d4376 --- /dev/null +++ b/man/setCovNN2.Rd @@ -0,0 +1,48 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/CovNN2.R +\name{setCovNN2} +\alias{setCovNN2} +\title{Create setting for CovNN2 model - convolution across input and time - https://arxiv.org/pdf/1608.00647.pdf} +\usage{ +setCovNN2( + batchSize = 1000, + outcomeWeight = 1, + lr = 1e-05, + decay = 1e-06, + dropout = 0, + epochs = 10, + filters = 3, + kernelSize = 10, + loss = "binary_crossentropy", + seed = NULL +) +} +\arguments{ +\item{batchSize}{The number of samples to used in each batch during model training} + +\item{outcomeWeight}{The weight assined to the outcome (make greater than 1 to reduce unballanced label issue)} + +\item{lr}{The learning rate} + +\item{decay}{The decay of the learning rate} + +\item{dropout}{[currently not used] the dropout rate for regularisation} + +\item{epochs}{The number of times data is used to train the model (e.g., epoches=1 means data only used once to train)} + +\item{filters}{The number of columns output by each convolution} + +\item{kernelSize}{The number of time dimensions used for each convolution} + +\item{loss}{The loss function implemented} + +\item{seed}{The random seed} +} +\description{ +Create setting for CovNN2 model - convolution across input and time - https://arxiv.org/pdf/1608.00647.pdf +} +\examples{ +\dontrun{ +model.CovNN <- setCovNN() +} +} diff --git a/man/setDeepNN.Rd b/man/setDeepNN.Rd new file mode 100644 index 0000000..a676bc6 --- /dev/null +++ b/man/setDeepNN.Rd @@ -0,0 +1,42 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/DeepNN.R +\name{setDeepNN} +\alias{setDeepNN} +\title{Create setting for DeepNN model} +\usage{ +setDeepNN( + units = list(c(128, 64), 128), + layer_dropout = c(0.2), + lr = c(1e-04), + decay = c(1e-05), + outcome_weight = c(1), + batch_size = c(100), + epochs = c(100), + seed = NULL +) +} +\arguments{ +\item{units}{The number of units of the deep network - as a list of vectors} + +\item{layer_dropout}{The layer dropout rate (regularisation)} + +\item{lr}{Learning rate} + +\item{decay}{Learning rate decay over each update.} + +\item{outcome_weight}{The weight of the outcome class in the loss function} + +\item{batch_size}{The number of data points to use per training batch} + +\item{epochs}{Number of times to iterate over dataset} + +\item{seed}{Random seed used by deep learning model} +} +\description{ +Create setting for DeepNN model +} +\examples{ +\dontrun{ +model <- setDeepNN() +} +} diff --git a/man/setDeepNNTorch.Rd b/man/setDeepNNTorch.Rd new file mode 100644 index 0000000..9a6a3a1 --- /dev/null +++ b/man/setDeepNNTorch.Rd @@ -0,0 +1,42 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/DeepNNTorch.R +\name{setDeepNNTorch} +\alias{setDeepNNTorch} +\title{Create setting for DeepNN model using Torch for R} +\usage{ +setDeepNNTorch( + units = list(c(128, 64), 128), + layer_dropout = c(0.2), + lr = c(1e-04), + decay = c(1e-05), + outcome_weight = c(1), + batch_size = c(100), + epochs = c(100), + seed = NULL +) +} +\arguments{ +\item{units}{The number of units of the deep network - as a list of vectors} + +\item{layer_dropout}{The layer dropout rate (regularisation)} + +\item{lr}{Learning rate} + +\item{decay}{Learning rate decay over each update.} + +\item{outcome_weight}{The weight of the outcome class in the loss function} + +\item{batch_size}{The number of data points to use per training batch} + +\item{epochs}{Number of times to iterate over dataset} + +\item{seed}{Random seed used by deep learning model} +} +\description{ +Create setting for DeepNN model using Torch for R +} +\examples{ +\dontrun{ +model <- setDeepNN() +} +} diff --git a/man/setRNNTorch.Rd b/man/setRNNTorch.Rd new file mode 100644 index 0000000..00550db --- /dev/null +++ b/man/setRNNTorch.Rd @@ -0,0 +1,35 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/RNNTorch.R +\name{setRNNTorch} +\alias{setRNNTorch} +\title{Create setting for RNN model with python} +\usage{ +setRNNTorch( + hidden_size = c(50, 100), + epochs = c(20, 50), + seed = 0, + class_weight = 0, + type = "RNN" +) +} +\arguments{ +\item{hidden_size}{The hidden size} + +\item{epochs}{The number of epochs} + +\item{seed}{A seed for the model} + +\item{class_weight}{The class weight used for imbalanced data: + 0: Inverse ratio between positives and negatives +-1: Focal loss} + +\item{type}{It can be normal 'RNN', 'BiRNN' (bidirectional RNN) and 'GRU'} +} +\description{ +Create setting for RNN model with python +} +\examples{ +\dontrun{ +model.rnnTorch <- setRNNTorch() +} +} diff --git a/man/toSparseM.Rd b/man/toSparseM.Rd new file mode 100644 index 0000000..7b04fee --- /dev/null +++ b/man/toSparseM.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/Formatting.R +\name{toSparseM} +\alias{toSparseM} +\title{Convert the plpData in COO format into a sparse R matrix} +\usage{ +toSparseM(plpData, population, map = NULL, temporal = F) +} +\arguments{ +\item{plpData}{An object of type \code{plpData} with covariate in coo format - the patient level prediction +data extracted from the CDM.} + +\item{population}{The population to include in the matrix} + +\item{map}{A covariate map (telling us the column number for covariates)} + +\item{temporal}{Whether you want to convert temporal data} +} +\value{ +Returns a list, containing the data as a sparse matrix, the plpData covariateRef +and a data.frame named map that tells us what covariate corresponds to each column +This object is a list with the following components: \describe{ +\item{data}{A sparse matrix with the rows corresponding to each person in the plpData and the columns corresponding to the covariates.} +\item{covariateRef}{The plpData covariateRef.} +\item{map}{A data.frame containing the data column ids and the corresponding covariateId from covariateRef.} +} +} +\description{ +Converts the standard plpData to a sparse matrix +} +\details{ +This function converts the covariate file from ffdf in COO format into a sparse matrix from +the package Matrix +} +\examples{ +#TODO + +} diff --git a/man/toSparseRTorch.Rd b/man/toSparseRTorch.Rd new file mode 100644 index 0000000..f112ea6 --- /dev/null +++ b/man/toSparseRTorch.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/sparseRTorch.R +\name{toSparseRTorch} +\alias{toSparseRTorch} +\title{Convert the plpData in COO format into a sparse Torch tensor} +\usage{ +toSparseRTorch(plpData, population, map = NULL, temporal = T) +} +\arguments{ +\item{plpData}{An object of type \code{plpData} with covariate in coo format - the patient level prediction +data extracted from the CDM.} + +\item{population}{The population to include in the matrix} + +\item{map}{A covariate map (telling us the column number for covariates)} + +\item{temporal}{Whether you want to convert temporal data} +} +\value{ +Returns a list, containing the data as a sparse matrix, the plpData covariateRef +and a data.frame named map that tells us what covariate corresponds to each column +This object is a list with the following components: \describe{ +\item{data}{A sparse matrix with the rows corresponding to each person in the plpData and the columns corresponding to the covariates.} +\item{covariateRef}{The plpData covariateRef.} +\item{map}{A data.frame containing the data column ids and the corresponding covariateId from covariateRef.} +} +} +\description{ +Converts the standard plpData to a sparse tensor for Torch +} +\details{ +This function converts the covariate file from COO format into a sparse Torch tensor +} +\examples{ +#TODO + +} diff --git a/man/toSparseTorchPython.Rd b/man/toSparseTorchPython.Rd new file mode 100644 index 0000000..fd32393 --- /dev/null +++ b/man/toSparseTorchPython.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/Formatting.R +\name{toSparseTorchPython} +\alias{toSparseTorchPython} +\title{Convert the plpData in COO format into a sparse python matrix using torch.sparse} +\usage{ +toSparseTorchPython( + plpData, + population, + map = NULL, + temporal = F, + pythonExePath = NULL, + nonTemporalCovs = F +) +} +\arguments{ +\item{plpData}{An object of type \code{plpData} with covariate in coo format - the patient level prediction +data extracted from the CDM.} + +\item{population}{The population to include in the matrix} + +\item{map}{A covariate map (telling us the column number for covariates)} + +\item{temporal}{Whether to include timeId into tensor} + +\item{pythonExePath}{Location of python exe you want to use} + +\item{nonTemporalCovs}{If non-temporal covariates (such as age or sex) should be included in temporal sparse matrix} +} +\value{ +Returns a list, containing the python object name of the sparse matrix, the plpData covariateRef +and a data.frame named map that tells us what covariate corresponds to each column +This object is a list with the following components: \describe{ +\item{data}{The python object name containing a sparse matrix with the rows corresponding to each person in the plpData and the columns corresponding to the covariates.} +\item{covariateRef}{The plpData covariateRef.} +\item{map}{A data.frame containing the data column ids and the corresponding covariateId from covariateRef.} +} +} +\description{ +Converts the standard plpData to a sparse matrix firectly into python +} +\details{ +This function converts the covariate file from ffdf in COO format into a sparse matrix from +the package Matrix +} +\examples{ +#TODO + +} diff --git a/man/transferLearning.Rd b/man/transferLearning.Rd new file mode 100644 index 0000000..1842179 --- /dev/null +++ b/man/transferLearning.Rd @@ -0,0 +1,54 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/DeepNN.R +\name{transferLearning} +\alias{transferLearning} +\title{[Under development] Transfer learning} +\usage{ +transferLearning( + plpResult, + plpData, + population, + fixLayers = T, + includeTop = F, + addLayers = c(100, 10), + layerDropout = c(T, T), + layerActivation = c("relu", "softmax"), + outcomeWeight = 1, + batchSize = 10000, + epochs = 20 +) +} +\arguments{ +\item{plpResult}{The plp result when training a kersa deep learning model on big data} + +\item{plpData}{The new data to fine tune the model on} + +\item{population}{The population for the new data} + +\item{fixLayers}{boolean specificying whether to fix weights in model being transferred} + +\item{includeTop}{If TRUE the final layer of the model being transferred is removed} + +\item{addLayers}{vector specifying nodes in each layer to add e.g. c(100,10) will add another layer with 100 nodels and then a final layer with 10} + +\item{layerDropout}{Add dropout to each new layer (binary vector length of addLayers)} + +\item{layerActivation}{Activation function for each new layer (string vector length of addLayers)} + +\item{outcomeWeight}{The weight to assign the class 1 when training the model} + +\item{batchSize}{Size of each batch for updating layers} + +\item{epochs}{Number of epoches to run} +} +\description{ +[Under development] Transfer learning +} +\examples{ +\dontrun{ +modelSet <- setDeepNN() +plpResult <- runPlp(plpData, population, modelSettings = modelSet, ...) + +transferLearning(...) +} +} From d76fcbc1f92913df0cbbaf48b7e4ba3006dd9bbc Mon Sep 17 00:00:00 2001 From: Jenna Reps Date: Fri, 23 Jul 2021 15:50:52 -0400 Subject: [PATCH 009/140] restructuring code - restructuring updates to DeepNNTorch - added example code to initially test --- NAMESPACE | 1 + R/DeepNNTorch.R | 309 ++++++++++++++--------------------------------- R/Topolgies.R | 79 ++++++++++++ R/helpers.R | 68 +++++++++++ extras/example.R | 38 +++++- 5 files changed, 273 insertions(+), 222 deletions(-) create mode 100644 R/Topolgies.R create mode 100644 R/helpers.R diff --git a/NAMESPACE b/NAMESPACE index 8c54858..bd822c7 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -3,6 +3,7 @@ S3method(print,plpData) S3method(print,summary.plpData) S3method(summary,plpData) +export(fitDeepNNTorch) export(getPlpData) export(loadPlpData) export(loadPlpFromCsv) diff --git a/R/DeepNNTorch.R b/R/DeepNNTorch.R index 612d27d..a6f11e2 100644 --- a/R/DeepNNTorch.R +++ b/R/DeepNNTorch.R @@ -101,7 +101,7 @@ setDeepNNTorch <- function(units=list(c(128, 64), 128), layer_dropout=c(0.2), return(result) } - +#' @export fitDeepNNTorch <- function(plpData,population, param, search='grid', quiet=F, outcomeId, cohortId, ...){ # check plpData is coo format: @@ -112,6 +112,11 @@ fitDeepNNTorch <- function(plpData,population, param, search='grid', quiet=F, warning('Data temporal but current deepNNTorch uses non-temporal data...') # This can be changed after supporting the temporal covariates. } + if(!is.null(plpData$metaData$call$covariateSettings$temporalSequence)){ + if(plpData$metaData$call$covariateSettings$temporalSequence){ + warning('Data temporal but current deepNNTorch uses non-temporal data...') + # This can be changed after supporting the temporal covariates. + }} metaData <- attr(population, 'metaData') if(!is.null(population$indexes)) @@ -184,12 +189,13 @@ fitDeepNNTorch <- function(plpData,population, param, search='grid', quiet=F, trainDeepNNTorch <-function(plpData, population, units1=128, units2= NA, units3=NA, layer_dropout=0.2, - lr =1e-4, decay=1e-5, outcome_weight = 1.0, batch_size = 100, - epochs= 100, seed=NULL, train=TRUE){ + lr =1e-4, decay=1e-5, outcome_weight = 1.0, batch_size = 10000, + epochs= 100, seed=NULL, train=TRUE,...){ - ParallelLogger::logInfo(paste('Training deep neural network using Torch with ',length(unique(population$indexes)),' fold CV')) if(!is.null(population$indexes) && train==T){ index_vect <- unique(population$indexes) + ParallelLogger::logInfo(paste('Training deep neural network using Torch with ',length(index_vect ),' fold CV')) + perform <- c() # create prediction matrix to store all predictions @@ -202,97 +208,28 @@ trainDeepNNTorch <-function(plpData, population, ParallelLogger::logInfo(paste('Fold ',index, ' -- with ', sum(population$indexes!=index),'train rows')) if(is.na(units2)){ - net <- torch::nn_module( - "classic_net", - - initialize = function(){ - self$linear1 = torch::nn_linear(ncol(plpData), units1) - self$linear2 = torch::nn_linear(units1, 2) - self$softmax = torch::nn_softmax(2) - }, - - forward = function(x){ - x %>% - self$linear1() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear2() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$softmax() - } - ) - model <- net() + model <- singleLayerNN(inputN = ncol(plpData), + layer1 = units1, + outputN = 2, + layer_dropout = layer_dropout) + } else if(is.na(units3)){ - net <- torch::nn_module( - "classic_net", - - initialize = function(){ - self$linear1 = torch::nn_linear(ncol(plpData), units1) - self$linear2 = torch::nn_linear(units1, units2) - self$linear3 = torch::nn_linear(units2, 2) - self$softmax = torch::nn_softmax(2) - }, - - forward = function(x){ - x %>% - self$linear1() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear2() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear3() %>% - self$softmax() - } - ) - model <- net() + model <- doubleLayerNN(inputN = ncol(plpData), + layer1 = units1, + layer2 = units2, + outputN = 2, + layer_dropout = layer_dropout) } else{ - net <- torch::nn_module( - "classic_net", - - initialize = function(){ - self$linear1 = torch::nn_linear(ncol(plpData), units1) - self$linear2 = torch::nn_linear(units1, units2) - self$linear3 = torch::nn_linear(units2, units3) - self$linear4 = torch::nn_linear(units3, 2) - self$softmax = torch::nn_softmax(2) - }, - - forward = function(x){ - x %>% - self$linear1() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear2() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear3() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear4() %>% - self$softmax() - - } - ) - model <- net() + model <- tripleLayerNN(inputN = ncol(plpData), + layer1 = units1, + layer2 = units2, + layer3 = units3, + outputN = 2, + layer_dropout = layer_dropout) } - - # Prepare model for training - data <- plpData[population$rowId[population$indexes!=index],] - - - #Extract validation set first - 10k people or 5% - valN <- min(10000,sum(population$indexes!=index)*0.05) - val_rows<-sample(1:sum(population$indexes!=index), valN, replace=FALSE) - train_rows <- c(1:sum(population$indexes!=index))[-val_rows] - - rows <- sample(train_rows, batch_size, replace = F) - y <- population$y[population$indexes!=index,1:2][rows,] - - x_train <- torch::torch_tensor(as.matrix(data[rows,]), dtype = torch::torch_float()) - y_train <- torch::torch_tensor(y, dtype = torch::torch_float()) - pred_temp <- model(x_train) - - print(model) - cat( - " Dimensions Prediction: ", pred_temp$shape," - Object type Prediction: ", as.character(pred_temp$dtype), "\n", - "Dimensions Label: ", y_train$shape," - Object type Label: ", as.character(y_train$dtype), "\n" - ) + # get the rowIds for the train/test/earlyStopping + rowIdSet <- rowIdSets(population, index) criterion = torch::nn_bce_loss() #Binary crossentropy only optimizer = torch::optim_adam(model$parameters, lr = lr) @@ -300,10 +237,18 @@ trainDeepNNTorch <-function(plpData, population, # Need earlyStopping # Need setting decay + # create batch sets + batches <- split(rowIdSet$trainRowIds, ceiling(seq_along(rowIdSet$trainRowIds)/batch_size)) + for(i in 1:epochs){ + for(batchRowIds in batches){ + trainDataBatch <- convertToTorchData(plpData, + population$y, + rowIds = batchRowIds) + optimizer$zero_grad() - y_pred = model(x_train) - loss = criterion(y_pred, y_train) + y_pred = model(trainDataBatch$x) + loss = criterion(y_pred, trainDataBatch$y) loss$backward() optimizer$step() @@ -317,26 +262,23 @@ trainDeepNNTorch <-function(plpData, population, cat("Epoch:", i, "Loss:", loss$item(), "\n") } + } } model$eval() - maxVal <- sum(population$indexes == index) - batches <- lapply(1:ceiling(maxVal/batch_size), function(x) ((x-1)*batch_size+1):min((x*batch_size), maxVal)) - prediction <- population[population$indexes == index,] - prediction$value <- 0 + # batch predict + prediction <- batchPredict(model, + plpData, + population, + predictRowIds = rowIdSet$testRowIds, + batch_size ) - for(batch in batches){ - b <- torch::torch_tensor(as.matrix(plpData[population$rowId[population$indexes == index],][batch,,drop = F]), dtype = torch::torch_float()) - pred <- model(b) - prediction$value[batch] <- as.array(pred$to())[,1] - } - - attr(prediction, "metaData") <- list(predictionType = "binary") aucVal <- computeAuc(prediction) perform <- c(perform,aucVal) - predictionMat$value[population$indexes == index] <- prediction$value + predictionMat <- updatePredictionMat(predictionMat, + prediction) } auc <- computeAuc(predictionMat) @@ -351,137 +293,64 @@ trainDeepNNTorch <-function(plpData, population, ParallelLogger::logInfo('==========================================') } else{ + if(is.na(units2)){ - net <- torch::nn_module( - "classic_net", - - initialize = function(){ - self$linear1 = torch::nn_linear(ncol(plpData), units1) - self$linear2 = torch::nn_linear(units1, 2) - self$softmax = torch::nn_softmax(2) - }, - - forward = function(x){ - x %>% - self$linear1() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear2() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$softmax() - } - ) - model <- net() + model <- singleLayerNN(inputN = ncol(plpData), + layer1 = units1, + outputN = 2, + layer_dropout = layer_dropout) + } else if(is.na(units3)){ - net <- torch::nn_module( - "classic_net", - - initialize = function(){ - self$linear1 = torch::nn_linear(ncol(plpData), units1) - self$linear2 = torch::nn_linear(units1, units2) - self$linear3 = torch::nn_linear(units2, 2) - self$softmax = torch::nn_softmax(2) - }, - - forward = function(x){ - x %>% - self$linear1() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear2() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear3() %>% - self$softmax() - } - ) - model <- net() + model <- doubleLayerNN(inputN = ncol(plpData), + layer1 = units1, + layer2 = units2, + outputN = 2, + layer_dropout = layer_dropout) } else{ - net <- torch::nn_module( - "classic_net", - - initialize = function(){ - self$linear1 = torch::nn_linear(ncol(plpData), units1) - self$linear2 = torch::nn_linear(units1, units2) - self$linear3 = torch::nn_linear(units2, units3) - self$linear4 = torch::nn_linear(units3, 2) - self$softmax = torch::nn_softmax(2) - }, - - forward = function(x){ - x %>% - self$linear1() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear2() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear3() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear4() %>% - self$softmax() - - } - ) - model <- net() + model <- tripleLayerNN(inputN = ncol(plpData), + layer1 = units1, + layer2 = units2, + layer3 = units3, + outputN = 2, + layer_dropout = layer_dropout) } - - # Prepare model for training - data <- plpData - - - #Extract validation set first - 10k people or 5% - valN <- min(10000,nrow(population)*0.05) - val_rows<-sample(1:nrow(population), valN, replace=FALSE) - train_rows <- c(1:nrow(population))[-val_rows] - - rows <- sample(train_rows, batch_size, replace = F) - y <- population$y[,1:2][rows,] - - x_train <- torch::torch_tensor(as.matrix(data[rows,]), dtype = torch::torch_float()) - y_train <- torch::torch_tensor(y, dtype = torch::torch_float()) - pred_temp <- model(x_train) - - print(model) - cat( - "Dimensions Prediction: ", pred_temp$shape, " - Object type Prediction: ", as.character(pred_temp$dtype), "\n", - "Dimensions Label: ", y_train$shape," - Object type Label: ", as.character(y_train$dtype), "\n" - ) + # get the rowIds for the train/earlyStopping + rowIdSet <- rowIdSets(population, index = NULL) criterion = torch::nn_bce_loss() #Binary crossentropy only optimizer = torch::optim_adam(model$parameters, lr = lr) - # Need earlyStopping + # create batch sets + batches <- split(rowIdSet$trainRowIds, ceiling(seq_along(rowIdSet$trainRowIds)/batch_size)) + for(i in 1:epochs){ - optimizer$zero_grad() - y_pred = model(x_train) - loss = criterion(y_pred, y_train) - loss$backward() - optimizer$step() - - if(i%%10 == 0){ - # winners = y_pred$argmax(dim = 2) + 1 - # winners = y_pred - # corrects = (winners = y_train) - # accuracy = corrects$sum()$item() / y_train$size()[1] - # cat("Epoch:", i, "Loss:", loss$item(), " Accuracy:", accuracy, "\n") - - cat("Epoch:", i, "Loss:", loss$item(), "\n") + for(batchRowIds in batches){ + trainDataBatch <- convertToTorchData(plpData, + population$y, + rowIds = batchRowIds) + optimizer$zero_grad() + y_pred = model(trainDataBatch$x) + loss = criterion(y_pred, trainDataBatch$y) + loss$backward() + optimizer$step() + + if(i%%10 == 0){ + cat("Epoch:", i, "Loss:", loss$item(), "\n") + } + } } - model$eval() - #batch prediction - maxVal <- nrow(population) - batches <- lapply(1:ceiling(maxVal/batch_size), function(x) ((x-1)*batch_size+1):min((x*batch_size), maxVal)) - prediction <- population - prediction$value <- 0 - - for(batch in batches){ - b <- torch::torch_tensor(as.matrix(plpData[batch,,drop = F]), dtype = torch::torch_float()) - pred <- model(b) - prediction$value[batch] <- as.array(pred$to())[,1] - } + # batch predict + prediction <- batchPredict(model, + plpData, + population, + predictRowIds = population$rowId, + batch_size ) - attr(prediction, "metaData") <- list(predictionType = "binary") auc <- computeAuc(prediction) foldPerm <- auc predictionMat <- prediction diff --git a/R/Topolgies.R b/R/Topolgies.R new file mode 100644 index 0000000..16d76a2 --- /dev/null +++ b/R/Topolgies.R @@ -0,0 +1,79 @@ +singleLayerNN <- function(inputN, layer1, outputN = 2, layer_dropout){ + net <- torch::nn_module( + "classic_net", + + initialize = function(){ + self$linear1 = torch::nn_linear(inputN, layer1) + self$linear2 = torch::nn_linear(layer1, outputN) + self$softmax = torch::nn_softmax(outputN) + }, + + forward = function(x){ + x %>% + self$linear1() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear2() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$softmax() + } + ) + return(net()) +} + + +doubleLayerNN <- function(inputN, layer1, + layer2, outputN, + layer_dropout){ + net <- torch::nn_module( + "classic_net", + + initialize = function(){ + self$linear1 = torch::nn_linear(inputN, layer1) + self$linear2 = torch::nn_linear(layer1, layer2) + self$linear3 = torch::nn_linear(layer2, outputN) + self$softmax = torch::nn_softmax(outputN) + }, + + forward = function(x){ + x %>% + self$linear1() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear2() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear3() %>% + self$softmax() + } + ) + return(net()) +} + + +tripleLayerNN <- function(inputN, layer1, + layer2, layer3, + outputN, layer_dropout){ + net <- torch::nn_module( + "classic_net", + + initialize = function(){ + self$linear1 = torch::nn_linear(inputN, layer1) + self$linear2 = torch::nn_linear(layer1, layer2) + self$linear3 = torch::nn_linear(layer2, layer3) + self$linear4 = torch::nn_linear(layer3, outputN) + self$softmax = torch::nn_softmax(outputN) + }, + + forward = function(x){ + x %>% + self$linear1() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear2() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear3() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear4() %>% + self$softmax() + + } + ) + model <- net() +} \ No newline at end of file diff --git a/R/helpers.R b/R/helpers.R new file mode 100644 index 0000000..2065f88 --- /dev/null +++ b/R/helpers.R @@ -0,0 +1,68 @@ +rowIdSets <- function(population, + index){ + + if(!is.null(index)){ + testRowIds <- population$rowId[population$indexes==index] + trainRowIds <- population$rowId[population$indexes!=index] + + valN <- min(10000,length(trainRowIds)*0.05) + valSamp <- sample(1:length(trainRowIds), valN, replace=FALSE) + earlyStopRowIds <- trainRowIds[valSamp] + trainRowIds <- trainRowIds[-valSamp] + + datas <- list(testRowIds = testRowIds, + trainRowIds = trainRowIds, + earlyStopRowIds = earlyStopRowIds + ) + }else{ + trainRowIds <- population$rowId + + valN <- min(10000,length(trainRowIds)*0.05) + valSamp <- sample(1:length(trainRowIds), valN, replace=FALSE) + earlyStopRowIds <- trainRowIds[valSamp] + trainRowIds <- trainRowIds[-valSamp] + + datas <- list(trainRowIds = trainRowIds, + earlyStopRowIds = earlyStopRowIds + ) + + } + + return(datas) +} + +convertToTorchData <- function(data, label, rowIds){ + x <- torch::torch_tensor(as.matrix(data[rowIds,]), dtype = torch::torch_float()) + y <- torch::torch_tensor(label, dtype = torch::torch_float()) + return(list(x=x, + y=y)) +} + +batchPredict <- function(model, + plpData, + population, + predictRowIds, + batch_size ){ + maxVal <- length(predictRowIds) + batches <- lapply(1:ceiling(maxVal/batch_size), function(x) ((x-1)*batch_size+1):min((x*batch_size), maxVal)) + prediction <- population[predictRowIds,] + prediction$value <- 0 + + for(batch in batches){ + b <- torch::torch_tensor(as.matrix(plpData[predictRowIds,][batch,,drop = F]), dtype = torch::torch_float()) + pred <- model(b) + prediction$value[batch] <- as.array(pred$to())[,1] + } + attr(prediction, "metaData") <- list(predictionType = "binary") + return(prediction) +} + +updatePredictionMat <- function(predictionMat,prediction){ + predictionMat$value[prediction$rowIds] <- prediction$value +} + + + + + + diff --git a/extras/example.R b/extras/example.R index a6cf56a..976b767 100644 --- a/extras/example.R +++ b/extras/example.R @@ -1,10 +1,21 @@ # testing code (requires sequential branch of FeatureExtraction): +rm(list = ls()) library(FeatureExtraction) library(DeepPatientLevelPrediction) connectionDetails <- Eunomia::getEunomiaConnectionDetails() Eunomia::createCohorts(connectionDetails) -covSet <- createTemporalSequenceCovariateSettings(useDemographicsGender = T, +covSet <- createCovariateSettings(useDemographicsGender = T, + useDemographicsAge = T, + useDemographicsRace = T, + useDemographicsEthnicity = T, + useDemographicsAgeGroup = T, + useConditionGroupEraLongTerm = T, + useDrugEraStartLongTerm = T, + endDays = -1 + ) + +covSetT <- createTemporalSequenceCovariateSettings(useDemographicsGender = T, useDemographicsAge = T, useDemographicsRace = T, useDemographicsEthnicity = T, @@ -36,9 +47,32 @@ population <- PatientLevelPrediction::createStudyPopulation(plpData = plpData, riskWindowStart = 1, riskWindowEnd = 365) -sparseMat <- toSparseRTorch(plpData, population, map=NULL, temporal=T) +##sparseMat <- toSparseRTorch(plpData, population, map=NULL, temporal=T) # code to train models +deepset <- setDeepNNTorch(units=list(c(128, 64), 128), layer_dropout=c(0.2), + lr =c(1e-4), decay=c(1e-5), outcome_weight = c(1.0), batch_size = c(100), + epochs= c(1), seed=NULL ) + +library(PatientLevelPrediction) + +#debug(fitDeepNNTorch) +res <- runPlp(population = population, + plpData = plpData, + nfold = 3, + modelSettings = deepset, + savePlpData = F, + savePlpResult = F, + savePlpPlots = F, + saveEvaluation = F) +##predict.customLibrary(libraryName, predictionFunction, inputList){ +## libraryName <- 'PatientLevelPrediction' +## predictionFunction <- "createStudyPopulation" +## predictFun <- get(predictionFunction, envir = rlang::search_envs()[grep(paste0('package:', libraryName), search())][[1]]) +## +## prediction <- do.call(predictFun, inputList) +## return(prediction) +##} From 9aad8593c25f302c5a5d6121fdc7c23d91de33d0 Mon Sep 17 00:00:00 2001 From: Jenna Reps Date: Fri, 23 Jul 2021 17:46:53 -0400 Subject: [PATCH 010/140] updated sparseMDeep to not run in batches - removed batch process from sparseMDeep ( assume people running deep learning have large RAM) --- NAMESPACE | 3 +- R/DeepNN.R | 2 +- R/Formatting.R | 366 +++---------------------- extras/example.R | 22 +- man/{toSparseM.Rd => toSparseMDeep.Rd} | 6 +- man/toSparseTorchPython.Rd | 49 ---- 6 files changed, 61 insertions(+), 387 deletions(-) rename man/{toSparseM.Rd => toSparseMDeep.Rd} (92%) delete mode 100644 man/toSparseTorchPython.Rd diff --git a/NAMESPACE b/NAMESPACE index bd822c7..062102d 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -25,8 +25,7 @@ export(setCovNN2) export(setDeepNN) export(setDeepNNTorch) export(setRNNTorch) -export(toSparseM) +export(toSparseMDeep) export(toSparseRTorch) -export(toSparseTorchPython) export(transferLearning) importFrom(zeallot,"%<-%") diff --git a/R/DeepNN.R b/R/DeepNN.R index 14c57b0..19a5dbe 100644 --- a/R/DeepNN.R +++ b/R/DeepNN.R @@ -119,7 +119,7 @@ fitDeepNN <- function(plpData,population, param, search='grid', quiet=F, start<-Sys.time() - result<- toSparseM(plpData,population,map=NULL, temporal=F) + result<- toSparseMDeep(plpData,population,map=NULL, temporal=F) data <- result$data #one-hot encoding diff --git a/R/Formatting.R b/R/Formatting.R index ebaa7c1..d631edd 100644 --- a/R/Formatting.R +++ b/R/Formatting.R @@ -42,18 +42,12 @@ #' } #' #' @export -toSparseM <- function(plpData,population, map=NULL, temporal=F){ +toSparseMDeep <- function(plpData, + population, + map=NULL, + temporal=F){ # check logger - if(length(ParallelLogger::getLoggers())==0){ - logger <- ParallelLogger::createLogger(name = "SIMPLE", - threshold = "INFO", - appenders = list(ParallelLogger::createConsoleAppender(layout = ParallelLogger::layoutTimestamp))) - ParallelLogger::registerLogger(logger) - } - ParallelLogger::logInfo(paste0('starting toSparseM')) - - ParallelLogger::logDebug(paste0('covariates nrow: ', nrow(plpData$covariateData$covariates))) ParallelLogger::logDebug(paste0('covariateRef nrow: ', nrow(plpData$covariateData$covariateRef))) @@ -74,95 +68,51 @@ toSparseM <- function(plpData,population, map=NULL, temporal=F){ # chunk then add if(!temporal){ - ParallelLogger::logInfo(paste0('toSparseM non temporal used')) - data <- Matrix::sparseMatrix(i=1, - j=1, - x=0, - dims=c(maxX,maxY)) - - dataEnv <- environment() - convertData1 <- function(batch,dataEnv) { - data <- get("data", envir = dataEnv) - data <- data + Matrix::sparseMatrix(i=as.data.frame(batch %>% dplyr::select(.data$rowId))$rowId, - j=as.data.frame(batch %>% dplyr::select(.data$covariateId))$covariateId, - x=as.data.frame(batch %>% dplyr::select(.data$covariateValue))$covariateValue, - dims=c(maxX,maxY)) - assign("data", data, envir = dataEnv) - return(NULL) - } - Andromeda::batchApply(newcovariateData$covariates, convertData1, batchSize = 100000, dataEnv = dataEnv) - + + ParallelLogger::logInfo(paste0('Casting plpData into vectors and creating sparseMatrix - you need suffciently large RAM for this ')) + ParallelLogger::logInfo(paste0('toSparseMDeep non temporal used')) + + data <- Matrix::sparseMatrix(i=as.data.frame(newcovariateData$covariates %>% dplyr::select(.data$rowId))$rowId, + j=as.data.frame(newcovariateData$covariates %>% dplyr::select(.data$covariateId))$covariateId, + x=as.data.frame(newcovariateData$covariates %>% dplyr::select(.data$covariateValue))$covariateValue, + dims=c(maxX,maxY)) } else { - ParallelLogger::logInfo(paste0('toSparseM temporal used')) + ParallelLogger::logInfo(paste0('toSparseMDeep temporal used')) - ParallelLogger::logTrace(paste0('Min time:', min(plpData$timeRef$timeId))) - ParallelLogger::logTrace(paste0('Max time:', max(plpData$timeRef$timeId))) + minT <- min(newcovariateData$covariates %>% dplyr::select(.data$timeId) %>% collect(), na.rm = T) + maxT <- max(newcovariateData$covariates %>% dplyr::select(.data$timeId) %>% collect(), na.rm = T) + + ParallelLogger::logTrace(paste0('Min time:', minT)) + ParallelLogger::logTrace(paste0('Max time:', maxT)) # do we want to use for(i in sort(plpData$timeRef$timeId)){ ? - for(i in min(plpData$timeRef$timeId):max(plpData$timeRef$timeId)){ + for(i in minT:maxT){ - if(nrow(newcovariateData$covariates %>% dplyr::filter(.data$timeId==i))>0){ - ParallelLogger::logTrace(paste0('Found covariates for timeId ', i)) - - - # initiate the sparse matrix - dataPlp <- Matrix::sparseMatrix(i=1, - j=1, - x=0, - dims=c(maxX, maxY)) - dataEnv <- environment() - ParallelLogger::logTrace(paste0('Initiated Mapping covariates for timeId ', i)) - - - # add function to batch creating matrix from Andromeda data - convertData <- function(batch, dataEnv) { - dataPlp <- get("dataPlp", envir = dataEnv) - dataPlp <- dataPlp + Matrix::sparseMatrix(i=as.double(as.character(batch$rowId)), - j=as.double(as.character(batch$covariateId)), - x=batch$covariateValue, - dims=c(maxX,maxY)) - - assign("dataPlp", dataPlp, envir = dataEnv) - return(NULL) - } - - # add age for each time - tempCovs <- addAgeTemp(timeId = i,newcovariateData, plpData$timeRef) # EDITED adding newCov - if(!is.null(tempCovs)){ - Andromeda::batchApply(tempCovs, convertData, batchSize = 100000, dataEnv=dataEnv) - ParallelLogger::logTrace(paste0('Added any age covariates for timeId ', i)) - } - - # add non age temporal covs for each time - tempCovs <- addNonAgeTemp(timeId = i,newcovariateData) - if(!is.null(tempCovs)){ - Andromeda::batchApply(tempCovs, convertData, batchSize = 100000, dataEnv=dataEnv) - ParallelLogger::logTrace(paste0('Added non-age non-temporal covariates for timeId ', i)) - } - - # add non temporal covs - tempCovs <- newcovariateData$covariates %>% - dplyr::filter(!is.na(.data$timeId)) %>% - dplyr::filter(.data$timeId == i) - Andromeda::batchApply(tempCovs, convertData, batchSize = 100000, dataEnv=dataEnv) + if(newcovariateData$covariates %>% dplyr::filter(.data$timeId==i) %>% dplyr::summarise(n=n()) %>% dplyr::collect() > 0 ){ + ParallelLogger::logInfo(paste0('Found covariates for timeId ', i)) + + dataPlp <- Matrix::sparseMatrix(i= as.integer(as.character(as.data.frame(newcovariateData$covariates %>% dplyr::filter(.data$timeId==i) %>% dplyr::select(.data$rowId))$rowId)), + j= as.integer(as.character(as.data.frame(newcovariateData$covariates %>% dplyr::filter(.data$timeId==i) %>% dplyr::select(.data$covariateId))$covariateId)), + x= as.double(as.character(as.data.frame(newcovariateData$covariates %>% dplyr::filter(.data$timeId==i) %>% dplyr::select(.data$covariateValue))$covariateValue)), + dims=c(maxX,maxY)) data_array <- slam::as.simple_sparse_array(dataPlp) - # remove dataPlp - #dataPlp <<- NULL - ParallelLogger::logTrace(paste0('Dim of data_array: ', paste0(dim(data_array), collapse='-'))) - #extending one more dimesion to the array - data_array<-slam::extend_simple_sparse_array(data_array, MARGIN =c(1L)) - ParallelLogger::logTrace(paste0('Finished Mapping covariates for timeId ', i)) + data_array <- slam::extend_simple_sparse_array(data_array, MARGIN =c(1L)) + ParallelLogger::logInfo(paste0('Finished Mapping covariates for timeId ', i)) + } else { data_array <- tryCatch(slam::simple_sparse_array(i=matrix(c(1,1,1), ncol = 3), - v=0, - dim=c(maxX,1, maxY)) + v=0, + dim=c(maxX,1, maxY)) ) } - #binding arrays along the dimesion - if(i==min(plpData$timeRef$timeId)) { + + # add na timeIds - how? + + #binding arrays along the dimesion + if(i==minT) { result_array <- data_array }else{ result_array <- slam::abind_simple_sparse_array(result_array,data_array,MARGIN=2L) @@ -220,247 +170,3 @@ MapCovariates <- function(covariateData,population, mapping=NULL){ return(newCovariateData) } - - -#' Convert the plpData in COO format into a sparse python matrix using torch.sparse -#' -#' @description -#' Converts the standard plpData to a sparse matrix firectly into python -#' -#' @details -#' This function converts the covariate file from ffdf in COO format into a sparse matrix from -#' the package Matrix -#' @param plpData An object of type \code{plpData} with covariate in coo format - the patient level prediction -#' data extracted from the CDM. -#' @param population The population to include in the matrix -#' @param map A covariate map (telling us the column number for covariates) -#' @param temporal Whether to include timeId into tensor -#' @param pythonExePath Location of python exe you want to use -#' @param nonTemporalCovs If non-temporal covariates (such as age or sex) should be included in temporal sparse matrix -#' @examples -#' #TODO -#' -#' @return -#' Returns a list, containing the python object name of the sparse matrix, the plpData covariateRef -#' and a data.frame named map that tells us what covariate corresponds to each column -#' This object is a list with the following components: \describe{ -#' \item{data}{The python object name containing a sparse matrix with the rows corresponding to each person in the plpData and the columns corresponding to the covariates.} -#' \item{covariateRef}{The plpData covariateRef.} -#' \item{map}{A data.frame containing the data column ids and the corresponding covariateId from covariateRef.} -#' } -#' -#' @export -toSparseTorchPython <- function(plpData,population, map=NULL, temporal=F, pythonExePath=NULL, - nonTemporalCovs=F){ - - map_python_initiate <- map_python <- function(){return(NULL)} - - # check logger - if(length(ParallelLogger::getLoggers())==0){ - logger <- ParallelLogger::createLogger(name = "SIMPLE", - threshold = "INFO", - appenders = list(ParallelLogger::createConsoleAppender(layout = 'layoutTimestamp'))) - ParallelLogger::registerLogger(logger) - } - - newcovariateData <- MapCovariates(plpData$covariateData, - population, - mapping=map) - - ParallelLogger::logDebug(paste0('Max ',as.data.frame(newcovariateData$covariates %>% dplyr::summarise(max = max(.data$covariateId, na.rm=T))))) - ParallelLogger::logDebug(paste0('# cols: ', nrow(newcovariateData$covariateRef))) - ParallelLogger::logDebug(paste0('Max rowId: ', as.data.frame(newcovariateData$covariates %>% dplyr::summarise(max = max(.data$rowId, na.rm=T))))) - - ParallelLogger::logTrace(paste0('Converting data into python sparse matrix...')) - - maxT <- NULL - if(temporal){ - maxT <- as.data.frame(newcovariateData$covariates %>% dplyr::summarise(max = max(timeId, na.rm = TRUE)))$max - ParallelLogger::logDebug(paste0('Max timeId: ', maxT)) - } - - maxCol <- as.data.frame(newcovariateData$mapping %>% dplyr::summarise(max=max(.data$newCovariateId,na.rm = TRUE)))$max - maxRow <- max(population$rowId) - - # source the python fucntion - e <- environment() - reticulate::source_python(system.file(package='PatientLevelPrediction','python','TorchMap.py'), envir = e) - - dataEnv <- e # adding to remove <<- - #dataPlp <<- map_python_initiate(maxCol = as.integer(maxCol), - dataPlp <- NULL - convertData <- function(batch, temporal=T, dataEnv) { - if(temporal){ - #dataPlp <<- map_python(matrix = dataPlp , - dataEnv$dataPlp <- map_python(matrix = dataEnv$dataPlp, - datas = as.matrix(as.data.frame(batch %>% dplyr::select(.data$rowId,.data$covariateId,.data$timeId,.data$covariateValue))), - maxCol = as.integer(maxCol), - maxRow = as.integer(maxRow), - maxT = as.integer(maxT)) - }else{ - # dataPlp <<- map_python(matrix = dataPlp , - dataEnv$dataPlp <- map_python(matrix = dataEnv$dataPlp, - datas = as.matrix(as.data.frame(batch %>% dplyr::select(.data$rowId,.data$covariateId,.data$covariateValue))), - maxCol = as.integer(maxCol), - maxRow = as.integer(maxRow), - maxT = NULL) - } - return(NULL) - } - - if(temporal==T){ - if (nonTemporalCovs==T) { - # add the age and non-temporal data - timeIds <- unique(plpData$timeRef$timeId) - normFactors <- attr(plpData$covariateData, 'metaData')$normFactors - for(timeId in timeIds){ - tempData <- addAgeTemp(timeId, newcovariateData, plpData$timeRef, normFactors) - if(!is.null(tempData)){ - Andromeda::batchApply(tempData, convertData,temporal =T, batchSize = 100000, dataEnv=dataEnv) - } - #tempData <- addNonAgeTemp(timeId,plpData.mapped) - what is plpData.mapped? - tempData <- addNonAgeTemp(timeId, newcovariateData) - if(!is.null(tempData)){ - Andromeda::batchApply(tempData, convertData,temporal =T, batchSize = 100000, dataEnv=dataEnv) - } - tempData <- NULL - } - } - # add the rest - tempData <- newcovariateData$covariates %>% - dplyr::filter(.data$timeId!=0) %>% - dplyr::filter(!is.na(.data$timeId)) - Andromeda::batchApply(tempData, convertData,temporal =T, batchSize = 100000, dataEnv=dataEnv) - tempData <- NULL - } else { - Andromeda::batchApply(newcovariateData$covariates, convertData, - temporal =F, batchSize = 100000, dataEnv=dataEnv) - } - ##result <- dataEnv$dataPlp - ##dataPlp <<- NULL - ##dataEnv$dataPlp <- NULL - ParallelLogger::logTrace(paste0('Sparse python tensor converted')) - - result <- list(data=dataPlp, - covariateRef=as.data.frame(newcovariateData$covariateRef), - map=as.data.frame(newcovariateData$mapping)) - return(result) -} - - -# reformat the evaluation -reformatPerformance <- function(train, test, analysisId){ - - ParallelLogger::logInfo(paste0('starting reformatPerformance')) - - nr1 <- length(unlist(train$evaluationStatistics[-1])) - nr2 <- length(unlist(test$evaluationStatistics[-1])) - evaluationStatistics <- cbind(analysisId=rep(analysisId,nr1+nr2), - Eval=c(rep('train', nr1),rep('test', nr2)), - Metric = names(c(unlist(train$evaluationStatistics[-1]), - unlist(test$evaluationStatistics[-1]))), - Value = c(unlist(train$evaluationStatistics[-1]), - unlist(test$evaluationStatistics[-1])) - ) - - - if(!is.null(test$thresholdSummary) & !is.null(train$thresholdSummary)){ - nr1 <- nrow(train$thresholdSummary) - nr2 <- nrow(test$thresholdSummary) - thresholdSummary <- rbind(cbind(analysisId=rep(analysisId,nr1),Eval=rep('train', nr1), - train$thresholdSummary), - cbind(analysisId=rep(analysisId,nr2),Eval=rep('test', nr2), - test$thresholdSummary)) - } else{ - thresholdSummary <- NULL - } - - - if(!is.null(train$demographicSummary) & !is.null(test$demographicSummary)){ - nr1 <- nrow(train$demographicSummary) - nr2 <- nrow(test$demographicSummary) - demographicSummary <- rbind(cbind(analysisId=rep(analysisId,nr1),Eval=rep('train', nr1), - train$demographicSummary), - cbind(analysisId=rep(analysisId,nr2),Eval=rep('test', nr2), - test$demographicSummary)) - } else{ - demographicSummary <- NULL - } - - nr1 <- nrow(train$calibrationSummary) - nr2 <- nrow(test$calibrationSummary) - calibrationSummary <- rbind(cbind(analysisId=rep(analysisId,nr1),Eval=rep('train', nr1), - train$calibrationSummary), - cbind(analysisId=rep(analysisId,nr2),Eval=rep('test', nr2), - test$calibrationSummary)) - - if(!is.null(train$predictionDistribution) & !is.null(test$predictionDistribution)){ - nr1 <- nrow(train$predictionDistribution) - nr2 <- nrow(test$predictionDistribution) - predictionDistribution <- rbind(cbind(analysisId=rep(analysisId,nr1),Eval=rep('train', nr1), - train$predictionDistribution), - cbind(analysisId=rep(analysisId,nr2),Eval=rep('test', nr2), - test$predictionDistribution)) - } else { - predictionDistribution <- NULL - } - - result <- list(evaluationStatistics=evaluationStatistics, - thresholdSummary=thresholdSummary, - demographicSummary =demographicSummary, - calibrationSummary=calibrationSummary, - predictionDistribution=predictionDistribution) - - return(result) -} - - -# helpers for converting temporal PLP data to matrix/tensor -addAgeTemp <- function(time, newcovariateData, timeRef, normFactors){ - - startDay <- as.data.frame(timeRef[timeRef$timeId==time,])$startDay - - ageId <- as.data.frame(newcovariateData$mapping %>% - dplyr::filter(.data$oldCovariateId == 1002) %>% - dplyr::select(.data$newCovariateId))$newCovariateId - - #check if age has been normalized - if (!is.null(normFactors)) { - normFactorAge <- normFactors %>% filter(covariateId == 1002) %>% pull(maxValue) - } - else{ - normFactorAge <- 1 - } - - ageData <- newcovariateData$covariates%>% # changed from plpData$covariateData - dplyr::filter(.data$covariateId == ageId) %>% - dplyr::mutate(covariateValueNew = .data$covariateValue + startDay / (365 * normFactorAge), - timeId = time) %>% - dplyr::select(- .data$covariateValue) %>% - dplyr::rename(covariateValue = .data$covariateValueNew) %>% - dplyr::select(.data$rowId,.data$covariateId,.data$covariateValue, .data$timeId) - - if(nrow(as.data.frame(ageData))==0){ - return(NULL) - } - return(ageData) -} - - -addNonAgeTemp <- function(time, newcovariateData){ - - ageId <- as.data.frame(newcovariateData$mapping %>% - dplyr::filter(.data$oldCovariateId == 1002) %>% - dplyr::select(.data$newCovariateId))$newCovariateId - - otherTempCovs <- newcovariateData$covariates%>% - dplyr::filter(is.na(.data$timeId)) %>% - dplyr::filter(.data$covariateId != ageId) %>% - dplyr::mutate(timeId = time) %>% - dplyr::select(.data$rowId,.data$covariateId,.data$covariateValue,.data$timeId) - - if(nrow(as.data.frame(otherTempCovs))==0){ - return(NULL) - } - return(otherTempCovs) -} diff --git a/extras/example.R b/extras/example.R index 976b767..3d8fcc4 100644 --- a/extras/example.R +++ b/extras/example.R @@ -27,8 +27,19 @@ covSetT <- createTemporalSequenceCovariateSettings(useDemographicsGender = T, sequenceEndDay = -1, sequenceStartDay = -365*5) - plpData <- PatientLevelPrediction::getPlpData(connectionDetails = connectionDetails, + cdmDatabaseSchema = "main", + cohortId = 1, + outcomeIds = 3, + cohortDatabaseSchema = "main", + cohortTable = "cohort", + outcomeDatabaseSchema = "main", + outcomeTable = "cohort", + firstExposureOnly = T, + washoutPeriod = 365, + covariateSettings = covSet +) +plpDataT <- PatientLevelPrediction::getPlpData(connectionDetails = connectionDetails, cdmDatabaseSchema = "main", cohortId = 1, outcomeIds = 3, @@ -38,7 +49,7 @@ plpData <- PatientLevelPrediction::getPlpData(connectionDetails = connectionDeta outcomeTable = "cohort", firstExposureOnly = T, washoutPeriod = 365, - covariateSettings = covSet + covariateSettings = covSetT ) population <- PatientLevelPrediction::createStudyPopulation(plpData = plpData, @@ -48,6 +59,13 @@ population <- PatientLevelPrediction::createStudyPopulation(plpData = plpData, riskWindowEnd = 365) ##sparseMat <- toSparseRTorch(plpData, population, map=NULL, temporal=T) +x <- toSparseMDeep(plpData ,population, + map=NULL, + temporal=F) + +x2 <- toSparseMDeep(plpDataT ,population, + map=NULL, + temporal=T) # code to train models deepset <- setDeepNNTorch(units=list(c(128, 64), 128), layer_dropout=c(0.2), diff --git a/man/toSparseM.Rd b/man/toSparseMDeep.Rd similarity index 92% rename from man/toSparseM.Rd rename to man/toSparseMDeep.Rd index 7b04fee..4af9719 100644 --- a/man/toSparseM.Rd +++ b/man/toSparseMDeep.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/Formatting.R -\name{toSparseM} -\alias{toSparseM} +\name{toSparseMDeep} +\alias{toSparseMDeep} \title{Convert the plpData in COO format into a sparse R matrix} \usage{ -toSparseM(plpData, population, map = NULL, temporal = F) +toSparseMDeep(plpData, population, map = NULL, temporal = F) } \arguments{ \item{plpData}{An object of type \code{plpData} with covariate in coo format - the patient level prediction diff --git a/man/toSparseTorchPython.Rd b/man/toSparseTorchPython.Rd deleted file mode 100644 index fd32393..0000000 --- a/man/toSparseTorchPython.Rd +++ /dev/null @@ -1,49 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/Formatting.R -\name{toSparseTorchPython} -\alias{toSparseTorchPython} -\title{Convert the plpData in COO format into a sparse python matrix using torch.sparse} -\usage{ -toSparseTorchPython( - plpData, - population, - map = NULL, - temporal = F, - pythonExePath = NULL, - nonTemporalCovs = F -) -} -\arguments{ -\item{plpData}{An object of type \code{plpData} with covariate in coo format - the patient level prediction -data extracted from the CDM.} - -\item{population}{The population to include in the matrix} - -\item{map}{A covariate map (telling us the column number for covariates)} - -\item{temporal}{Whether to include timeId into tensor} - -\item{pythonExePath}{Location of python exe you want to use} - -\item{nonTemporalCovs}{If non-temporal covariates (such as age or sex) should be included in temporal sparse matrix} -} -\value{ -Returns a list, containing the python object name of the sparse matrix, the plpData covariateRef -and a data.frame named map that tells us what covariate corresponds to each column -This object is a list with the following components: \describe{ -\item{data}{The python object name containing a sparse matrix with the rows corresponding to each person in the plpData and the columns corresponding to the covariates.} -\item{covariateRef}{The plpData covariateRef.} -\item{map}{A data.frame containing the data column ids and the corresponding covariateId from covariateRef.} -} -} -\description{ -Converts the standard plpData to a sparse matrix firectly into python -} -\details{ -This function converts the covariate file from ffdf in COO format into a sparse matrix from -the package Matrix -} -\examples{ -#TODO - -} From 0fc571aeb8f93037d26a9e3f8d83c4e28d12998c Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Mon, 26 Jul 2021 14:52:33 +0200 Subject: [PATCH 011/140] Started to integrate the model into existing code --- R/ResNet.R | 237 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 174 insertions(+), 63 deletions(-) diff --git a/R/ResNet.R b/R/ResNet.R index c7531d3..a5fa7d5 100644 --- a/R/ResNet.R +++ b/R/ResNet.R @@ -1,52 +1,156 @@ -# setResNet <- function(numLayers=4, sizeHidden=64, hiddenFactor=1, -# residualDropout=0.2, hiddenDropout=0.2, -# normalization='BatchNorm', activation='RelU', -# sizeEmbedding=64, weightDecay=1e-4, -# learningRate=3e-4, seed=42) { -# -# if (!is.null(seed)) { -# seed <- as.integer(sample(1e5, 1)) -# } -# -# param <- split(expand.grid(numLayers=numLayers, sizeHidden=sizeHidden, -# hiddenFactor=hiddenFactor, -# residualDropout=residualDropout, -# hiddenDropout=hiddenDropout, -# sizeEmbedding=sizeEmbedding, wei)) -# results <- list(model='fitResNet', param=param, name='ResNet') -# -# class(results) <- 'modelSettings' -# -# return(results) -# -# } +setResNet <- function(numLayers=1:16, sizeHidden=2^(6:10), hiddenFactor=1:4, + residualDropout=seq(0,0.3,0.05), hiddenDropout=seq(0,0.3,0.05), + normalization='BatchNorm', activation='RelU', + sizeEmbedding=2^(6:9), weightDecay=c(1e-6, 1e-3), + learningRate=c(1e-2,1e-5), seed=42, hyperParamSearch='random', + randomSample=100, device='cpu', batch_size=1024) { + if (!is.null(seed)) { + seed <- as.integer(sample(1e5, 1)) + } + + param <- expand.grid(numLayers=numLayers, sizeHidden=sizeHidden, + hiddenFactor=hiddenFactor, + residualDropout=residualDropout, + hiddenDropout=hiddenDropout, + sizeEmbedding=sizeEmbedding, weightDecay=weightDecay, + learningRate=learningRate) + if (hyperParamSearch=='random'){ + param <- param[sample(nrow(param), randomSample),] + } + + results <- list(model='fitResNet', param=param, name='ResNet') + + class(results) <- 'modelSettings' + + return(results) + +} + + +fitResNet <- function(population, plpData, param, + quiet=F) { + + toSparse <- toSparseM(plpData, population) + sparseMatrix <- toSparse$data + + outLoc <- createTempModelLoc() + #do cross validation to find hyperParameter + hyperParamSel <- lapply(param, function(x) do.call(trainResNet, + listAppend(x, list(plpData=sparseMatrix, + population = population, + train=TRUE, + modelOutput=outLoc, + quiet = quiet)) )) + hyperSummary <- cbind(do.call(rbind, param), unlist(hyperParamSel)) -# fitResNet <- function(population, plpData, param, search='Random', numSearch=1, -# quiet=F) { -# -# toSparse <- toSparseM(plpData, population) -# sparseMatrix <- toSparse$data -# -# outLoc <- createTempModelLoc() -# #do cross validation to find hyperParameter -# hyperParamSel <- lapply(param, function(x) do.call(trainResNet, -# listAppend(x, list(plpData=sparseMatrix, -# population = population, -# train=TRUE, -# modelOutput=outLoc, -# quiet = quiet)) )) -# hyperSummary <- cbind(do.call(rbind, param), unlist(hyperParamSel)) -# -# -# -# -# } + #now train the final model and return coef + bestInd <- which.max(abs(unlist(hyperParamSel)-0.5))[1] + finalModel <- do.call(trainCNNTorch, listAppend(param[[bestInd]], + list(plpData = result$data, + population = Population, + train=FALSE, + modelOutput=outLoc))) + covariateRef <- as.data.frame(plpData$covariateData$covariateRef) + incs <- rep(1, nrow(covariateRef)) + covariateRef$included <- incs + covariateRef$covariateValue <- rep(0, nrow(covariateRef)) + + modelTrained <- file.path(outLoc) + param.best <- param[[bestInd]] + + comp <- start-Sys.time() + + # train prediction + pred <- as.matrix(finalModel) + pred[,1] <- pred[,1] + colnames(pred) <- c('rowId','outcomeCount','indexes', 'value') + pred <- as.data.frame(pred) + attr(pred, "metaData") <- list(predictionType="binary") + + pred$value <- 1-pred$value + prediction <- merge(population, pred[,c('rowId','value')], by='rowId') + + # return model location + result <- list(model = modelTrained, + trainCVAuc = -1, # ToDo decide on how to deal with this + hyperParamSearch = hyperSummary, + modelSettings = list(model='fitResNet',modelParameters=param.best), + metaData = plpData$metaData, + populationSettings = attr(population, 'metaData'), + outcomeId=outcomeId, + cohortId=cohortId, + varImp = covariateRef, + trainingTime =comp, + dense=1, + covariateMap=result$map, # I think this is need for new data to map the same? + predictionTrain = prediction + ) + class(result) <- 'plpModel' + attr(result, 'predictionType') <- 'binary' + + return(result) +} -# # trainResNet <- function(population, plpData, modelOutput, train=T) { -# -# -# } +trainResNet <- function(population, plpData, modelOutput, train=T, ...) { + + param <- list(...) + + modelParamNames <- c("numLayers", "sizeHidden", "hiddenFactor", + "residualDropout", "hiddenDropout", "sizeEmbedding") + # TODO can I use lapply here instead of for loops? + modelParam <- list() + for (i in 1:length(modelParamNames)){ + modelParam[[i]] <- param[,modelParamNames[[i]]] + } + names(modelParam) <- modelParamNames + + fitParamNames <- c("weightDecay", "learningRate") + fitParams <- list() + for (i in 1:length(fitParamNames)) { + fitParams[[i]] <- param[, fitParamNames[[i]]] + } + names(fitParams) <- fitParamNames + + fitParams$resultDir <- modelOutput + + sparseM <- toSparseM(plpData, population, temporal=F) + n_features <- nrow(sparseM$data) + modelParams$n_features <- n_features + + # TODO make more general for other variables than only age + numericalIndex <- sparseM$map$newCovariateId[sparseM$map$oldCovariateId==1002] + + if(!is.null(population$indexes) && train==T){ + index_vect <- unique(population$index) + ParallelLogger::logInfo(paste('Training deep neural network using Torch with ',length(index_vect[index_vect>0]),' fold CV')) + + foldAuc <- c() + for(index in 1:length(index_vect)){ + ParallelLogger::logInfo(paste('Fold ',index, ' -- with ', sum(population$indexes!=index),'train rows')) + estimator <- Estimator(baseModel=ResNet, modelParameters=modelParam, + fitParameters=fitParams, device=param$device) + testIndices <- population$rowId[population$index==index] + trainIndices <- population$rowId[(population$index!=index) & (population$index > 0)] + trainDataset <- Dataset(sparseM$data, population$outcomeCount, indices=trainIndices, numericalIndex=numericalIndex) + testDataset <- Dataset(sparseM$data, population$outcomeCount, indices=trainIndices, numericalIndex=numericalIndex) + trainDataloader <- torch::dataloader(trainDataset, batch_size=param$batch_size, shuffle=T, drop_last=TRUE) + testDataloader <- torch::dataloader(testDataset, batch_size=param$batch_size, shuffle=F) + + estimator.fit(trainDataloader, testDataloader) + score <- estimator.score(testDataloader) + + auc <- score$auc + foldAuc <- c(foldAuc, auc) + } + } + + result <- list(model=estimator, + auc = mean(foldauc), + prediction = NULL, + hyperSum = c(modelParam, fitParams)) + return(result) + } ResLayer <- torch::nn_module( name='ResLayer', @@ -58,8 +162,17 @@ ResLayer <- torch::nn_module( self$linear1 <- torch::nn_linear(resHidden, sizeHidden) self$activation <- activation - self$hiddenDropout <- hiddenDropout - self$residualDropout <- residualDropout + if (!is.null(hiddenDropout)){ + self$hiddenDropout <- torch::nn_dropout(p=hiddenDropout) + } + if (!is.null(residualDropout)) + { + self$residualDropout <- torch::nn_dropout(p=residualDropout) + } + + self$activation <- activation() + + }, @@ -69,11 +182,11 @@ ResLayer <- torch::nn_module( z <- self$linear0(z) z <- self$activation(z) if (!is.null(self$hiddenDropout)) { - z <- torch::nnf_dropout(z, p=self$hiddenDropout) + z <- self$hiddenDropout(z) } z <- self$linear1(z) if (!is.null(self$residualDropout)) { - z <- torch::nnf_dropout(z, p=self$residualDropout) + z <- self$residualDropout(z) } x <- z + x return(x) @@ -104,9 +217,7 @@ ResNet <- torch::nn_module( self$lastNorm <- normalization(sizeHidden) self$head <- torch::nn_linear(sizeHidden, d_out) - self$lastAct <- activation - - + self$lastAct <- activation() }, @@ -137,8 +248,8 @@ Estimator <- torch::nn_module( self$modelParameters <- modelParameters self$epochs <- self$item_or_defaults(fitParameters, 'epochs', 10) - self$learningRate <- self$item_or_defaults(fitParameters,'lr', 2e-4) - self$l2Norm <- self$item_or_defaults(fitParameters, 'l2', 1e-5) + self$learningRate <- self$item_or_defaults(fitParameters,'learningRate', 1e-3) + self$l2Norm <- self$item_or_defaults(fitParameters, 'weightDecay', 1e-5) self$resultsDir <- self$item_or_defaults(fitParameters, 'resultsDir', './results') dir.create(self$resultsDir) @@ -184,7 +295,7 @@ Estimator <- torch::nn_module( batch_loss = 0 i=1 self$model$train() - for (b in torch::enumerate(dataloader)) { + coro::loop(for (b in dataloader) { cat = b[[1]]$to(device=self$device) num = b[[2]]$to(device=self$device) target= b[[3]]$to(device=self$device) @@ -193,9 +304,9 @@ Estimator <- torch::nn_module( loss = self$criterion(out, target) batch_loss = batch_loss + loss - if (i %% 10 == 0) { + if (i %% 1 == 10) { elapsed_time <- Sys.time() - t - ParallelLogger::logInfo('Loss: ', round((batch_loss/10)$item(), 3), ' | Time: ', + ParallelLogger::logInfo('Loss: ', round((batch_loss/1)$item(), 3), ' | Time: ', round(elapsed_time,digits = 2), units(elapsed_time)) t = Sys.time() batch_loss = 0 @@ -205,7 +316,7 @@ Estimator <- torch::nn_module( self$optimizer$step() self$optimizer$zero_grad() i = i + 1 - } + }) }, @@ -216,16 +327,16 @@ Estimator <- torch::nn_module( predictions = c() targets = c() self$model$eval() - for (b in torch::enumerate(dataloader)) { + coro::loop(for (b in dataloader) { cat = b[[1]]$to(device=self$device) num = b[[2]]$to(device=self$device) target = b[[3]]$to(device=self$device) pred = self$model(num, cat) - predictions = c(predictions, as.array(pred)) - targets = c(targets, as.array(target)) + predictions = c(predictions, as.array(pred$cpu())) + targets = c(targets, as.array(target$cpu())) loss = c(loss, self$criterion(pred, target)$item()) - } + }) mean_loss = mean(loss) predictionsClass = list(values=predictions, outcomeCount=targets) attr(predictionsClass, 'metaData')$predictionType <-'binary' From c3073f5ea71d4dfa1ac595aa8f1cb9a456623c39 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Mon, 9 Aug 2021 09:50:12 +0200 Subject: [PATCH 012/140] More work on resnet model and added a predict function for estimator class --- DESCRIPTION | 9 +- R/Predict.R | 18 +++ R/ResNet.R | 344 +++++++++++++++++++++++++++++++++++----------------- 3 files changed, 255 insertions(+), 116 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 42e28a4..4110255 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -7,12 +7,11 @@ Date: 2021-06-07 Authors@R: c( person("Jenna", "Reps", email = "jreps@its.jnj.com", role = c("aut", "cre")), person("Seng", "Chan You", role = c("aut")), - person("Egill", "Friogeirsson", role = c("aut")) + person("Egill", "Fridgeirsson", role = c("aut")) ) - Maintainer: Jenna Reps Description: A package for creating deep learning patient level prediction models following -the OHDSI PatientLevelPrediction framework. + the OHDSI PatientLevelPrediction framework. License: Apache License 2.0 URL: https://ohdsi.github.io/PatientLevelPrediction, https://github.com/OHDSI/DeepPatientLevelPrediction BugReports: https://github.com/OHDSI/DeepPatientLevelPrediction/issues @@ -43,7 +42,5 @@ Suggests: testthat Remotes: ohdsi/FeatureExtraction -LinkingTo: Rcpp -NeedsCompilation: yes RoxygenNote: 7.1.1 -Encoding: UTF-8 +Encoding: UTF-8 \ No newline at end of file diff --git a/R/Predict.R b/R/Predict.R index caad604..1897490 100644 --- a/R/Predict.R +++ b/R/Predict.R @@ -237,6 +237,24 @@ predict.knn <- function(plpData, population, plpModel, ...){ return(prediction) } +#' predict.deepEstimator +#' @description prediction function for models using estimator class +#' @usage predict.deepEstimator(x) +#' @export predict.deepEstimator +predict.deepEstimator <- function(plpModel, population, plpData, ...) { + + sparseMatrix <- toSparseM(plpData, population) + indices <- population$rowId + numericalIndex <- sparseMatrix$map$newCovariateId[sparseMatrix$map$oldCovariateId==1002] + dataset <- Dataset(sparseMatrix$data, population$outcomeCount,indices=indices, + numericalIndex = numericalIndex) + dataloader <- torch::dataloader(dataset, + batch_size=plpModel$modelSettings$modelParameters$batch_size, + shuffle=FALSE, drop_last = FALSE) + prediction <- plpModel$model$predictProba(dataloader) + prediction <- population %>% mutate(value=prediction) + return(prediction) +} predict.deep <- function(plpModel, population, plpData, ...){ ensure_installed("plyr") diff --git a/R/ResNet.R b/R/ResNet.R index a5fa7d5..a1c3cd4 100644 --- a/R/ResNet.R +++ b/R/ResNet.R @@ -1,9 +1,10 @@ +#' @export setResNet <- function(numLayers=1:16, sizeHidden=2^(6:10), hiddenFactor=1:4, residualDropout=seq(0,0.3,0.05), hiddenDropout=seq(0,0.3,0.05), normalization='BatchNorm', activation='RelU', sizeEmbedding=2^(6:9), weightDecay=c(1e-6, 1e-3), learningRate=c(1e-2,1e-5), seed=42, hyperParamSearch='random', - randomSample=100, device='cpu', batch_size=1024) { + randomSample=100, device='cpu', batch_size=1024, epochs=10) { if (!is.null(seed)) { seed <- as.integer(sample(1e5, 1)) @@ -18,7 +19,10 @@ setResNet <- function(numLayers=1:16, sizeHidden=2^(6:10), hiddenFactor=1:4, if (hyperParamSearch=='random'){ param <- param[sample(nrow(param), randomSample),] } - + param$device <- device + param$batch_size <- batch_size + param$epochs <- epochs + results <- list(model='fitResNet', param=param, name='ResNet') class(results) <- 'modelSettings' @@ -27,28 +31,40 @@ setResNet <- function(numLayers=1:16, sizeHidden=2^(6:10), hiddenFactor=1:4, } - +#' @export fitResNet <- function(population, plpData, param, - quiet=F) { - - toSparse <- toSparseM(plpData, population) - sparseMatrix <- toSparse$data - - outLoc <- createTempModelLoc() - #do cross validation to find hyperParameter - hyperParamSel <- lapply(param, function(x) do.call(trainResNet, - listAppend(x, list(plpData=sparseMatrix, - population = population, - train=TRUE, - modelOutput=outLoc, - quiet = quiet)) )) - hyperSummary <- cbind(do.call(rbind, param), unlist(hyperParamSel)) + quiet=F, outcomeId, cohortId, ...) { + + start <- Sys.time() + sparseMatrix <- toSparseM(plpData, population) - #now train the final model and return coef - bestInd <- which.max(abs(unlist(hyperParamSel)-0.5))[1] - finalModel <- do.call(trainCNNTorch, listAppend(param[[bestInd]], - list(plpData = result$data, - population = Population, + + # TODO where to save results? + outLoc <- './results' + + #do cross validation to find hyperParameters + hyperParamSel <- list() + for (i in 1:nrow(param)) { + outLocHP <- file.path(outLoc, paste0('Iteration_', i)) + hyperParamSel[[i]] <- do.call(trainResNet, listAppend(param[i,], list(sparseMatrix =sparseMatrix, + population = population, + train=TRUE, + modelOutput=outLocHP, + quiet = quiet))) + } + hyperSummary <-as.data.frame(cbind(do.call(rbind, lapply(hyperParamSel, function(x) x$hyperSum)))) + hyperSummary$auc <- unlist(lapply(hyperParamSel, function(x) x$auc)) + + scores <- unlist(lapply(hyperParamSel, function(x) x$auc)) + + # now train the final model and return coef + bestInd <- which.max(abs(unlist(scores)-0.5))[1] + uniqueEpochs <- unique(hyperSummary$bestEpochs[[bestInd]]) + param$epochs <- uniqueEpochs[which.max(tabulate(match(hyperSummary$bestEpochs[[bestInd]], uniqueEpochs)))] + outLoc <- file.path(outLoc, paste0('whole_training_set')) + finalModel <- do.call(trainResNet, listAppend(param[bestInd,], + list(sparseMatrix = sparseMatrix, + population = population, train=FALSE, modelOutput=outLoc))) covariateRef <- as.data.frame(plpData$covariateData$covariateRef) @@ -57,23 +73,12 @@ fitResNet <- function(population, plpData, param, covariateRef$covariateValue <- rep(0, nrow(covariateRef)) modelTrained <- file.path(outLoc) - param.best <- param[[bestInd]] - - comp <- start-Sys.time() + param.best <- param[bestInd,] - # train prediction - pred <- as.matrix(finalModel) - pred[,1] <- pred[,1] - colnames(pred) <- c('rowId','outcomeCount','indexes', 'value') - pred <- as.data.frame(pred) - attr(pred, "metaData") <- list(predictionType="binary") - - pred$value <- 1-pred$value - prediction <- merge(population, pred[,c('rowId','value')], by='rowId') - - # return model location - result <- list(model = modelTrained, - trainCVAuc = -1, # ToDo decide on how to deal with this + comp <- Sys.time() - start + # return model location + result <- list(model = finalModel$model, + trainCVAuc = scores[bestInd], hyperParamSearch = hyperSummary, modelSettings = list(model='fitResNet',modelParameters=param.best), metaData = plpData$metaData, @@ -82,73 +87,96 @@ fitResNet <- function(population, plpData, param, cohortId=cohortId, varImp = covariateRef, trainingTime =comp, - dense=1, - covariateMap=result$map, # I think this is need for new data to map the same? - predictionTrain = prediction + covariateMap=sparseMatrix$map, # I think this is need for new data to map the same? + predictionTrain = finalModel$prediction ) class(result) <- 'plpModel' + attr(result, 'type') <- 'deepEstimator' attr(result, 'predictionType') <- 'binary' - return(result) } -trainResNet <- function(population, plpData, modelOutput, train=T, ...) { +#' @export +trainResNet <- function(sparseMatrix, population,...,train=T) { param <- list(...) modelParamNames <- c("numLayers", "sizeHidden", "hiddenFactor", "residualDropout", "hiddenDropout", "sizeEmbedding") + # TODO can I use lapply here instead of for loops? modelParam <- list() for (i in 1:length(modelParamNames)){ - modelParam[[i]] <- param[,modelParamNames[[i]]] + modelParam[i] <- param[modelParamNames[i]] } names(modelParam) <- modelParamNames - fitParamNames <- c("weightDecay", "learningRate") + fitParamNames <- c("weightDecay", "learningRate", "epochs") fitParams <- list() for (i in 1:length(fitParamNames)) { - fitParams[[i]] <- param[, fitParamNames[[i]]] + fitParams[i] <- param[fitParamNames[i]] } names(fitParams) <- fitParamNames - fitParams$resultDir <- modelOutput - sparseM <- toSparseM(plpData, population, temporal=F) - n_features <- nrow(sparseM$data) - modelParams$n_features <- n_features + n_features <- ncol(sparseMatrix$data) + modelParam$n_features <- n_features # TODO make more general for other variables than only age - numericalIndex <- sparseM$map$newCovariateId[sparseM$map$oldCovariateId==1002] + numericalIndex <- sparseMatrix$map$newCovariateId[sparseMatrix$map$oldCovariateId==1002] - if(!is.null(population$indexes) && train==T){ - index_vect <- unique(population$index) - ParallelLogger::logInfo(paste('Training deep neural network using Torch with ',length(index_vect[index_vect>0]),' fold CV')) - + index_vect <- unique(population$indexes[population$indexes > 0]) + if(train==T){ + ParallelLogger::logInfo(paste('Training deep neural network using Torch with ',length(index_vect),' fold CV')) foldAuc <- c() + foldEpochs <- c() for(index in 1:length(index_vect)){ + fitParams$resultsDir <- file.path(param$modelOutput, paste0('fold_', index)) ParallelLogger::logInfo(paste('Fold ',index, ' -- with ', sum(population$indexes!=index),'train rows')) estimator <- Estimator(baseModel=ResNet, modelParameters=modelParam, fitParameters=fitParams, device=param$device) - testIndices <- population$rowId[population$index==index] - trainIndices <- population$rowId[(population$index!=index) & (population$index > 0)] - trainDataset <- Dataset(sparseM$data, population$outcomeCount, indices=trainIndices, numericalIndex=numericalIndex) - testDataset <- Dataset(sparseM$data, population$outcomeCount, indices=trainIndices, numericalIndex=numericalIndex) + testIndices <- population$rowId[population$indexes==index] + trainIndices <- population$rowId[(population$indexes!=index) & (population$indexes > 0)] + trainDataset <- Dataset(sparseMatrix$data, population$outcomeCount, indices=trainIndices, numericalIndex=numericalIndex) + testDataset <- Dataset(sparseMatrix$data, population$outcomeCount, indices=testIndices, numericalIndex=numericalIndex) trainDataloader <- torch::dataloader(trainDataset, batch_size=param$batch_size, shuffle=T, drop_last=TRUE) testDataloader <- torch::dataloader(testDataset, batch_size=param$batch_size, shuffle=F) - estimator.fit(trainDataloader, testDataloader) - score <- estimator.score(testDataloader) - + score <- estimator$fit(trainDataloader, testDataloader)$score(testDataloader) + bestEpoch <- estimator$bestEpoch auc <- score$auc foldAuc <- c(foldAuc, auc) + foldEpochs <- c(foldEpochs, bestEpoch) } + auc <- mean(foldAuc) + predictions <- NULL + bestEpochs <- list(bestEpochs=foldEpochs) } - - result <- list(model=estimator, - auc = mean(foldauc), - prediction = NULL, - hyperSum = c(modelParam, fitParams)) + else { + ParallelLogger::logInfo('Training deep neural network using Torch on whole training set') + fitParams$resultsDir <- param$modelOutput + estimator <- Estimator(baseModel = ResNet, modelParameters = modelParam, + fitParameters = fitParams, device=param$device) + trainIndices <- population$rowId[population$indexes > 0] + + trainDataset <- Dataset(sparseMatrix$data, population$outcomeCount, indices=trainIndices, numericalIndex=numericalIndex) + trainDataloader <- torch::dataloader(trainDataset, batch_size=param$batch_size, shuffle=T, drop_last=TRUE) + + estimator$fitWholeTrainingSet(trainDataloader, param$epochs) + dataloader <- torch::dataloader(trainDataset, batch_size = param$batch_size, shuffle=F, drop_last=FALSE) + predictions <- population[trainIndices, ] + predictions$value <- estimator$predictProba(dataloader) + predictionsClass <- list(value=predictions$value, outcomeCount=as.array(trainDataset$labels)) + attr(predictionsClass, 'metaData')$predictionType <-'binary' + auc <- computeAuc(predictionsClass) + bestEpochs <- NULL + } + + result <- list(model=estimator, + auc = auc, + prediction = predictions, + hyperSum = c(modelParam, fitParams, bestEpochs)) + return(result) } @@ -197,18 +225,17 @@ ResNet <- torch::nn_module( name='ResNet', initialize=function(n_features, sizeEmbedding, sizeHidden, numLayers, - hiddenFactor, activation, normalization, hiddenDropout=NULL, + hiddenFactor, activation=torch::nn_relu, + normalization=torch::nn_batch_norm1d, hiddenDropout=NULL, residualDropout=NULL, d_out=1) { # n_features - 1 because only binary features are embedded (not Age) # ages is concatenated with the embedding output - # need to extend to support other numerical features + # TODO need to extend to support other numerical features self$embedding <- torch::nn_linear(n_features - 1, sizeEmbedding, bias=F) self$first_layer <- torch::nn_linear(sizeEmbedding + 1, sizeHidden) resHidden <- sizeHidden * hiddenFactor - #TODO make this prettier , residualBlock class - #TODO self$layers <- torch::nn_module_list(lapply(1:numLayers, function (x) ResLayer(sizeHidden, resHidden, normalization, activation, @@ -247,50 +274,75 @@ Estimator <- torch::nn_module( self$model <- do.call(baseModel, modelParameters) self$modelParameters <- modelParameters - self$epochs <- self$item_or_defaults(fitParameters, 'epochs', 10) - self$learningRate <- self$item_or_defaults(fitParameters,'learningRate', 1e-3) - self$l2Norm <- self$item_or_defaults(fitParameters, 'weightDecay', 1e-5) + self$epochs <- self$itemOrDefaults(fitParameters, 'epochs', 10) + self$learningRate <- self$itemOrDefaults(fitParameters,'learningRate', 1e-3) + self$l2Norm <- self$itemOrDefaults(fitParameters, 'weightDecay', 1e-5) - self$resultsDir <- self$item_or_defaults(fitParameters, 'resultsDir', './results') - dir.create(self$resultsDir) - self$prefix <- self$item_or_defaults(fitParameters, 'prefix', 'resnet') + self$resultsDir <- self$itemOrDefaults(fitParameters, 'resultsDir', './results') + dir.create(self$resultsDir, recursive=TRUE, showWarnings=FALSE) + self$prefix <- self$itemOrDefaults(fitParameters, 'prefix', 'resnet') - self$previousEpochs <- self$item_or_defaults(fitParameters, 'previousEpochs', 0) + self$previousEpochs <- self$itemOrDefaults(fitParameters, 'previousEpochs', 0) self$optimizer <- optimizer(params=self$model$parameters, lr=self$learningRate, weight_decay=self$l2Norm) self$criterion <- criterion() + self$model$to(device=self$device) }, # fits the estimator fit = function(dataloader, testDataloader) { - valLosses = c() - valAUCs = c() + valLosses <- c() + valAUCs <- c() + lr <- c() for (epoch in 1:self$epochs) { - self$fit_epoch(dataloader) - scores <- self$score_epoch(testDataloader) + self$fitEpoch(dataloader) + scores <- self$score(testDataloader) currentEpoch <- epoch + self$previousEpochs - + lr <- c(lr, self$optimizer$param_groups[[1]]$lr) ParallelLogger::logInfo('Epochs: ', currentEpoch, ' | Val AUC: ', - round(scores$auc,3), ' | Val Loss: ', round(scores$loss,2), ' LR: ', + round(scores$auc,3), ' | Val Loss: ', + round(scores$loss,3), ' | LR: ', self$optimizer$param_groups[[1]]$lr) valLosses <- c(valLosses, scores$loss) valAUCs <- c(valAUCs, scores$auc) torch::torch_save(list( - modelState_dict=self$model$state_dict(), - modelHyperparameters=self$modelParameters), + modelStateDict=self$model$state_dict(), + modelHyperparameters=self$modelParameters, + epoch=currentEpoch), file.path(self$resultsDir, paste0(self$prefix, '_epochs:', currentEpoch, - '_auc:', round(scores$auc,3), '_val_loss:', - round(scores$loss,2)))) + '_auc:', round(scores$auc,4), '_val_loss:', + round(scores$loss,4)))) } write.csv(data.frame(epochs=1:self$epochs, loss=valLosses, auc=valAUCs), file.path(self$resultsDir, 'log.txt')) + + #TODO here I should extract best epoch from the saved checkpoints + bestModelFile <- self$extractBestModel(metric='val_loss') + bestModel <- torch::torch_load(bestModelFile) + bestModelStateDict <- bestModel$modelStateDict + self$model$load_state_dict(bestModelStateDict) + bestEpoch <- bestModel$epoch + ParallelLogger::logInfo(paste0('Loaded best model from epoch ', bestEpoch)) + self$bestEpoch <- bestEpoch + + invisible(self) + }, + + # Fits whole training set on a specific number of epochs + # TODO What happens when learning rate changes per epochs? + # Ideally I would copy the learning rate strategy from before + fitWholeTrainingSet = function(dataloader, epochs) { + for (epoch in 1:epochs) { + self$fitEpoch(dataloader) + } + }, # trains for one epoch - fit_epoch = function(dataloader){ + fitEpoch = function(dataloader){ t = Sys.time() batch_loss = 0 i=1 @@ -299,11 +351,10 @@ Estimator <- torch::nn_module( cat = b[[1]]$to(device=self$device) num = b[[2]]$to(device=self$device) target= b[[3]]$to(device=self$device) - out = self$model(num, cat) loss = self$criterion(out, target) - batch_loss = batch_loss + loss + batch_loss = batch_loss + loss if (i %% 1 == 10) { elapsed_time <- Sys.time() - t ParallelLogger::logInfo('Loss: ', round((batch_loss/1)$item(), 3), ' | Time: ', @@ -321,42 +372,109 @@ Estimator <- torch::nn_module( }, # calculates loss and auc after training for one epoch - score_epoch = function(dataloader){ + score = function(dataloader){ torch::with_no_grad({ loss = c() predictions = c() targets = c() self$model$eval() coro::loop(for (b in dataloader) { - cat = b[[1]]$to(device=self$device) - num = b[[2]]$to(device=self$device) - target = b[[3]]$to(device=self$device) + b <- self$batchToDevice(b) + cat <- b$cat + num <- b$num + target <- b$target - pred = self$model(num, cat) - predictions = c(predictions, as.array(pred$cpu())) - targets = c(targets, as.array(target$cpu())) - loss = c(loss, self$criterion(pred, target)$item()) + pred <- self$model(num, cat) + predictions <- c(predictions, as.array(pred$cpu())) + targets <- c(targets, as.array(target$cpu())) + loss <- c(loss, self$criterion(pred, target)$item()) }) - mean_loss = mean(loss) - predictionsClass = list(values=predictions, outcomeCount=targets) + mean_loss <- mean(loss) + predictionsClass <- list(value=predictions, outcomeCount=targets) attr(predictionsClass, 'metaData')$predictionType <-'binary' - auc = computeAuc(predictionsClass) + auc <- computeAuc(predictionsClass) }) return(list(loss=mean_loss, auc=auc)) }, # predicts and outputs the probabilities - # predict_proba = function(dataloader) { - # - # }, + predictProba = function(dataloader) { + torch::with_no_grad({ + predictions <- c() + self$model$eval() + coro::loop(for (b in dataloader){ + b <- self$batchToDevice(b) + cat <- b$cat + num <- b$num + target <- b$target + pred <- self$model(num,cat) + predictions <- c(predictions, as.array(torch::torch_sigmoid(pred$cpu()))) + }) + }) + return(predictions) + }, + # predicts and outputs the class - # predict = function(dataloader){ - # - # }, + predict = function(dataloader){ + predictions <- self$predict_proba(dataloader) + predicted_class <- torch::torch_argmax(torch::torch_unsqueeze(torch::torch_tensor(predictions), dim=2),dim=2) + return(predicted_class) + }, + + load_best_weight = function(){ + best_model_file <- self$extract_best_model(self$resultsDor) + best_model <- torch::torch_load(best_model_file) + state_dict <- best_model$model_state_dict + epoch <- best_model$epoch + self$model$load_state_dict(state_dict) + ParallelLogger::logInfo(paste('Loaded best model from epoch: ', epoch)) + }, + # extracts best model from the results directory + extractBestModel = function(metric='val_loss'){ + + if (metric=='val_loss') + { + direction = 'min' + } + else + { + direction = 'max' + } + + # goes over checkpoints in folder and extracts metric value from name + checkpoints <- Sys.glob(file.path(self$resultsDir, paste0('*', metric, '*'))) + metric_value <- c() + for (file in checkpoints) { + fileName <- basename(file) + metric_value <- c(metric_value, as.double(strsplit(strsplit(fileName, paste0(metric, ':'))[[1]][2], '_')[[1]][1])) + } + + if (direction == 'max') { + best_index <- which.max(metric_value) + } else if (direction == 'min') { + best_index <- which.min(metric_value) + } + bestModel <- checkpoints[[best_index]] + + return(bestModel) + }, + + + # sends a batch of data to device + ## TODO make agnostic of the form of batch + batchToDevice = function(batch) { + cat <- batch[[1]]$to(device=self$device) + num <- batch[[2]]$to(device=self$device) + target <- batch[[3]]$to(device=self$device) + + result <- list(cat=cat, num=num, target=target) + return(result) + }, + # select item from list, and if it's null sets a default - item_or_defaults = function (list, item, default = NULL) { + itemOrDefaults = function (list, item, default = NULL) { value = list[[item]] if (is.null(value)) default else value }, @@ -371,7 +489,13 @@ Dataset <- torch::dataset( tensor <- torch::torch_tensor(as.matrix(matrix), dtype=torch::torch_float32()) - self$labels <- torch::torch_tensor(labels)[indices] + # if labels have already been restricted to population + if (max(indices)>length(labels)) { + self$labels <- torch::torch_tensor(labels) + } + else { + self$labels <- torch::torch_tensor(labels)[indices] + } notNumIndex <- 1:tensor$shape[2] != numericalIndex self$cat <- tensor[, notNumIndex] From 140e97ccfe80caba2ae049c0d8787f94ac0139e4 Mon Sep 17 00:00:00 2001 From: ted9219 Date: Mon, 23 Aug 2021 13:45:13 +0000 Subject: [PATCH 013/140] add CNNTorch topologies --- DESCRIPTION | 1 + R/Topolgies.R | 79 -------------------- R/Topologies.R | 191 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 192 insertions(+), 79 deletions(-) delete mode 100644 R/Topolgies.R create mode 100644 R/Topologies.R diff --git a/DESCRIPTION b/DESCRIPTION index e91b331..2fdbaf0 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -7,6 +7,7 @@ Authors@R: c( person("Jenna", "Reps", email = "jreps@its.jnj.com", role = c("aut", "cre")), person("Seng", "Chan You", role = c("aut")), person("Egill", "Fridgeirsson", role = c("aut")) + person("Chungsoo", "Kim", role = c("aut")) ) Maintainer: Jenna Reps Description: A package for creating deep learning patient level prediction models following the OHDSI PatientLevelPrediction framework. diff --git a/R/Topolgies.R b/R/Topolgies.R deleted file mode 100644 index 16d76a2..0000000 --- a/R/Topolgies.R +++ /dev/null @@ -1,79 +0,0 @@ -singleLayerNN <- function(inputN, layer1, outputN = 2, layer_dropout){ - net <- torch::nn_module( - "classic_net", - - initialize = function(){ - self$linear1 = torch::nn_linear(inputN, layer1) - self$linear2 = torch::nn_linear(layer1, outputN) - self$softmax = torch::nn_softmax(outputN) - }, - - forward = function(x){ - x %>% - self$linear1() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear2() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$softmax() - } - ) - return(net()) -} - - -doubleLayerNN <- function(inputN, layer1, - layer2, outputN, - layer_dropout){ - net <- torch::nn_module( - "classic_net", - - initialize = function(){ - self$linear1 = torch::nn_linear(inputN, layer1) - self$linear2 = torch::nn_linear(layer1, layer2) - self$linear3 = torch::nn_linear(layer2, outputN) - self$softmax = torch::nn_softmax(outputN) - }, - - forward = function(x){ - x %>% - self$linear1() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear2() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear3() %>% - self$softmax() - } - ) - return(net()) -} - - -tripleLayerNN <- function(inputN, layer1, - layer2, layer3, - outputN, layer_dropout){ - net <- torch::nn_module( - "classic_net", - - initialize = function(){ - self$linear1 = torch::nn_linear(inputN, layer1) - self$linear2 = torch::nn_linear(layer1, layer2) - self$linear3 = torch::nn_linear(layer2, layer3) - self$linear4 = torch::nn_linear(layer3, outputN) - self$softmax = torch::nn_softmax(outputN) - }, - - forward = function(x){ - x %>% - self$linear1() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear2() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear3() %>% - torch::nnf_dropout(p = layer_dropout) %>% - self$linear4() %>% - self$softmax() - - } - ) - model <- net() -} \ No newline at end of file diff --git a/R/Topologies.R b/R/Topologies.R new file mode 100644 index 0000000..7ce98b5 --- /dev/null +++ b/R/Topologies.R @@ -0,0 +1,191 @@ +singleLayerNN <- function(inputN, layer1, outputN = 2, layer_dropout){ + net <- torch::nn_module( + "classic_net", + + initialize = function(){ + self$linear1 = torch::nn_linear(inputN, layer1) + self$linear2 = torch::nn_linear(layer1, outputN) + self$softmax = torch::nn_softmax(outputN) + }, + + forward = function(x){ + x %>% + self$linear1() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear2() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$softmax() + } + ) + return(net()) +} + + +doubleLayerNN <- function(inputN, layer1, + layer2, outputN, + layer_dropout){ + net <- torch::nn_module( + "classic_net", + + initialize = function(){ + self$linear1 = torch::nn_linear(inputN, layer1) + self$linear2 = torch::nn_linear(layer1, layer2) + self$linear3 = torch::nn_linear(layer2, outputN) + self$softmax = torch::nn_softmax(outputN) + }, + + forward = function(x){ + x %>% + self$linear1() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear2() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear3() %>% + self$softmax() + } + ) + return(net()) +} + + +tripleLayerNN <- function(inputN, layer1, + layer2, layer3, + outputN, layer_dropout){ + net <- torch::nn_module( + "classic_net", + + initialize = function(){ + self$linear1 = torch::nn_linear(inputN, layer1) + self$linear2 = torch::nn_linear(layer1, layer2) + self$linear3 = torch::nn_linear(layer2, layer3) + self$linear4 = torch::nn_linear(layer3, outputN) + self$softmax = torch::nn_softmax(outputN) + }, + + forward = function(x){ + x %>% + self$linear1() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear2() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear3() %>% + torch::nnf_dropout(p = layer_dropout) %>% + self$linear4() %>% + self$softmax() + + } + ) + model <- net() +} + +# Multi-resolution CNN1 model +# Stucture based on https://arxiv.org/pdf/1608.00647.pdf CNN1 + +MRCovNN_submodel1 <- function(kernel_size){ + net <- torch::nn_module( + "MRCovNN_submodel1", + + initialize = function(){ + self$conv1 <- torch::nn_conv2d(in_channel = 1, + out_channel = 1, + kernel_size = kernel_size) + + }, + + forward = function(x){ + x %>% + torch::nnf_max_pool2d(kernel_size) %>% + self$conv1() %>% + torch::nnf_relu() + } + ) + return(net()) +} + +MRCovNN_submodel2 <- function(kernel_size){ + net <- torch::nn_module( + "MRCovNN_submodel2", + + initialize = function(){ + self$maxPool <- torch::nn_max_pool2d(kernel_size = kernel_size) + self$conv1 <- torch::nn_conv2d(in_channels = 1, + out_channels = 1, + kernel_size = kernel_size) + }, + + forward = function(x){ + x %>% + self$maxPool() %>% + self$conv1() %>% + torch::nnf_relu() + } + ) + return(net()) +} + + +MRCovNN_submodel3 <- function(kernel_size){ + net <- torch::nn_module( + "MRCovNN_submodel3", + + initialize = function(){ + self$maxPool <- torch::nn_max_pool2d(kernel_size = kernel_size) + self$conv1 <- torch::nn_conv2d(in_channels = 1, + out_channels = 1, + kernel_size = kernel_size) + + self$conv2 <- torch::nn_conv2d(in_channels = 1, + out_channels = 1, + kernel_size = kernel_size) + }, + + forward = function(x){ + x %>% + self$conv1() %>% + torch::nnf_relu() %>% + self$maxPool() %>% + self$conv2() %>% + torch::nnf_relu() + } + ) + return(net()) +} + + +# submodel1 = MRCovNN_submodel1(kernelSize = c(4,1)) +# submodel1 = submodel1(x) +# modelList = list(submodel1, submodel2, submodel3) + +MultiResolutionCovNN <- function(modelList = list(submodel1, submodel2, submodel3), + kernelSize, + dropout, + inputN, + layer1, + layer2, + outputN = 2){ + net <- torch::nn_module( + "MultiResolutionCovNN", + + initialize = function(){ + self$linear1 <- torch::nn_linear(inputN, layer1) + self$linear2 <- torch::nn_linear(layer1, layer2) + self$linear3 <- torch::nn_linear(layer2, outputN) + }, + + forward = function(){ + + torch::torch_cat(modelList, 3) %>% + torch::nnf_dropout(p = dropout) %>% + self$linear1() %>% + torch::nnf_relu() %>% + torch::nnf_dropout(p = dropout) %>% + self$linear2() %>% + torch::nnf_relu() %>% + torch::nnf_dropout(p = dropout) %>% + self$linear3() %>% + torch::nnf_softmax(outputN) + + } + ) + return(net()) +} From 49bd2347cabdc0f2b53b7ed464eba8bf4d9f8e7b Mon Sep 17 00:00:00 2001 From: jreps Date: Mon, 4 Oct 2021 09:50:23 -0400 Subject: [PATCH 014/140] buildable - package can be built in R --- DESCRIPTION | 4 +- NAMESPACE | 4 + R/ResNet.R | 228 ++++++++++++++++++----------------- extras/example.R | 56 +++++++-- man/predict.deepEstimator.Rd | 11 ++ 5 files changed, 180 insertions(+), 123 deletions(-) create mode 100644 man/predict.deepEstimator.Rd diff --git a/DESCRIPTION b/DESCRIPTION index e91b331..08549b5 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -32,7 +32,7 @@ Imports: SqlRender (>= 1.1.3), torch, tibble, - tidyr, + tidyr Suggests: devtools, keras, @@ -42,4 +42,4 @@ Suggests: Remotes: ohdsi/FeatureExtraction RoxygenNote: 7.1.1 -Encoding: UTF-8 \ No newline at end of file +Encoding: UTF-8 diff --git a/NAMESPACE b/NAMESPACE index 062102d..7e09f9e 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -4,12 +4,14 @@ S3method(print,plpData) S3method(print,summary.plpData) S3method(summary,plpData) export(fitDeepNNTorch) +export(fitResNet) export(getPlpData) export(loadPlpData) export(loadPlpFromCsv) export(loadPlpModel) export(loadPlpResult) export(loadPrediction) +export(predict.deepEstimator) export(predictAndromeda) export(predictPlp) export(predictProbabilities) @@ -25,7 +27,9 @@ export(setCovNN2) export(setDeepNN) export(setDeepNNTorch) export(setRNNTorch) +export(setResNet) export(toSparseMDeep) export(toSparseRTorch) +export(trainResNet) export(transferLearning) importFrom(zeallot,"%<-%") diff --git a/R/ResNet.R b/R/ResNet.R index a1c3cd4..baebf13 100644 --- a/R/ResNet.R +++ b/R/ResNet.R @@ -36,11 +36,14 @@ fitResNet <- function(population, plpData, param, quiet=F, outcomeId, cohortId, ...) { start <- Sys.time() - sparseMatrix <- toSparseM(plpData, population) + #sparseMatrix <- toSparseM(plpData, population) + sparseMatrix <- toSparseMDeep(plpData ,population, + map=NULL, + temporal=F) - # TODO where to save results? - outLoc <- './results' + outLoc <- tempfile(pattern = 'resNet') + dir.create(outLoc) #do cross validation to find hyperParameters hyperParamSel <- list() @@ -103,22 +106,11 @@ trainResNet <- function(sparseMatrix, population,...,train=T) { modelParamNames <- c("numLayers", "sizeHidden", "hiddenFactor", "residualDropout", "hiddenDropout", "sizeEmbedding") - - # TODO can I use lapply here instead of for loops? - modelParam <- list() - for (i in 1:length(modelParamNames)){ - modelParam[i] <- param[modelParamNames[i]] - } - names(modelParam) <- modelParamNames + modelParam <- param[modelParamNames] fitParamNames <- c("weightDecay", "learningRate", "epochs") - fitParams <- list() - for (i in 1:length(fitParamNames)) { - fitParams[i] <- param[fitParamNames[i]] - } - names(fitParams) <- fitParamNames - - + fitParams <- param[fitParamNames] + n_features <- ncol(sparseMatrix$data) modelParam$n_features <- n_features @@ -132,15 +124,33 @@ trainResNet <- function(sparseMatrix, population,...,train=T) { foldEpochs <- c() for(index in 1:length(index_vect)){ fitParams$resultsDir <- file.path(param$modelOutput, paste0('fold_', index)) - ParallelLogger::logInfo(paste('Fold ',index, ' -- with ', sum(population$indexes!=index),'train rows')) - estimator <- Estimator(baseModel=ResNet, modelParameters=modelParam, - fitParameters=fitParams, device=param$device) + + if(!dir.exists(file.path(param$modelOutput, paste0('fold_', index)))){ + dir.create(file.path(param$modelOutput, paste0('fold_', index)), recursive = T) + } + + ParallelLogger::logInfo(paste('Fold ',index, ' -- with ', sum(population$indexes!=index & population$indexes > 0),'train rows')) + estimator <- Estimator(baseModel=ResNet, + modelParameters=modelParam, + fitParameters=fitParams, + device=param$device) testIndices <- population$rowId[population$indexes==index] trainIndices <- population$rowId[(population$indexes!=index) & (population$indexes > 0)] - trainDataset <- Dataset(sparseMatrix$data, population$outcomeCount, indices=trainIndices, numericalIndex=numericalIndex) - testDataset <- Dataset(sparseMatrix$data, population$outcomeCount, indices=testIndices, numericalIndex=numericalIndex) - trainDataloader <- torch::dataloader(trainDataset, batch_size=param$batch_size, shuffle=T, drop_last=TRUE) - testDataloader <- torch::dataloader(testDataset, batch_size=param$batch_size, shuffle=F) + trainDataset <- Dataset(sparseMatrix$data[population$rowId,], + population$outcomeCount, + indices= population$rowId%in%trainIndices, + numericalIndex=numericalIndex) + testDataset <- Dataset(sparseMatrix$data[population$rowId,], + population$outcomeCount, + indices = population$rowId%in%testIndices, + numericalIndex = numericalIndex) + trainDataloader <- torch::dataloader(trainDataset, + batch_size=param$batch_size, + shuffle=T, + drop_last=TRUE) + testDataloader <- torch::dataloader(testDataset, + batch_size=param$batch_size, + shuffle=F) score <- estimator$fit(trainDataloader, testDataloader)$score(testDataloader) bestEpoch <- estimator$bestEpoch @@ -149,32 +159,48 @@ trainResNet <- function(sparseMatrix, population,...,train=T) { foldEpochs <- c(foldEpochs, bestEpoch) } auc <- mean(foldAuc) - predictions <- NULL + prediction <- NULL bestEpochs <- list(bestEpochs=foldEpochs) } else { ParallelLogger::logInfo('Training deep neural network using Torch on whole training set') fitParams$resultsDir <- param$modelOutput - estimator <- Estimator(baseModel = ResNet, modelParameters = modelParam, - fitParameters = fitParams, device=param$device) + estimator <- Estimator(baseModel = ResNet, + modelParameters = modelParam, + fitParameters = fitParams, + device=param$device) + trainIndices <- population$rowId[population$indexes > 0] - trainDataset <- Dataset(sparseMatrix$data, population$outcomeCount, indices=trainIndices, numericalIndex=numericalIndex) - trainDataloader <- torch::dataloader(trainDataset, batch_size=param$batch_size, shuffle=T, drop_last=TRUE) - - estimator$fitWholeTrainingSet(trainDataloader, param$epochs) - dataloader <- torch::dataloader(trainDataset, batch_size = param$batch_size, shuffle=F, drop_last=FALSE) - predictions <- population[trainIndices, ] - predictions$value <- estimator$predictProba(dataloader) - predictionsClass <- list(value=predictions$value, outcomeCount=as.array(trainDataset$labels)) - attr(predictionsClass, 'metaData')$predictionType <-'binary' - auc <- computeAuc(predictionsClass) + trainDataset <- Dataset(sparseMatrix$data[population$rowId,], + population$outcomeCount, + indices=population$rowId%in%trainIndices, + numericalIndex=numericalIndex) + trainDataloader <- torch::dataloader(trainDataset, + batch_size=param$batch_size, + shuffle=T, + drop_last=TRUE) + estimator$fitWholeTrainingSet(trainDataloader, + param$epochs) + # get predictions + dataloader <- torch::dataloader(trainDataset, + batch_size = param$batch_size, + shuffle=F, + drop_last=FALSE) + prediction <- population[population$rowId%in%trainIndices, ] + prediction$value <- estimator$predictProba(dataloader) + + #predictionsClass <- data.frame(value=predictions$value, + # outcomeCount=as.array(trainDataset$labels)) + + attr(prediction, 'metaData')$predictionType <-'binary' + auc <- computeAuc(prediction) bestEpochs <- NULL } - result <- list(model=estimator, + result <- list(model = estimator, auc = auc, - prediction = predictions, + prediction = prediction, hyperSum = c(modelParam, fitParams, bestEpochs)) return(result) @@ -200,8 +226,6 @@ ResLayer <- torch::nn_module( self$activation <- activation() - - }, forward=function(x) { @@ -285,7 +309,8 @@ Estimator <- torch::nn_module( self$previousEpochs <- self$itemOrDefaults(fitParameters, 'previousEpochs', 0) self$optimizer <- optimizer(params=self$model$parameters, - lr=self$learningRate, weight_decay=self$l2Norm) + lr=self$learningRate, + weight_decay=self$l2Norm) self$criterion <- criterion() self$model$to(device=self$device) }, @@ -294,12 +319,23 @@ Estimator <- torch::nn_module( fit = function(dataloader, testDataloader) { valLosses <- c() valAUCs <- c() + + modelStateDict <- list() + modelHyperparameters <- list() + epoch <- list() + lr <- c() - for (epoch in 1:self$epochs) { + for (epochI in 1:self$epochs) { + + # fit the model self$fitEpoch(dataloader) + + print(self$model$state_dict()$first_layer.weight[1,1:10]) # viewing + + # predict on test data scores <- self$score(testDataloader) - currentEpoch <- epoch + self$previousEpochs + currentEpoch <- epochI + self$previousEpochs lr <- c(lr, self$optimizer$param_groups[[1]]$lr) ParallelLogger::logInfo('Epochs: ', currentEpoch, ' | Val AUC: ', round(scores$auc,3), ' | Val Loss: ', @@ -308,25 +344,25 @@ Estimator <- torch::nn_module( valLosses <- c(valLosses, scores$loss) valAUCs <- c(valAUCs, scores$auc) - torch::torch_save(list( - modelStateDict=self$model$state_dict(), - modelHyperparameters=self$modelParameters, - epoch=currentEpoch), - file.path(self$resultsDir, paste0(self$prefix, '_epochs:', currentEpoch, - '_auc:', round(scores$auc,4), '_val_loss:', - round(scores$loss,4)))) - } - write.csv(data.frame(epochs=1:self$epochs, loss=valLosses, auc=valAUCs), - file.path(self$resultsDir, 'log.txt')) + # here it saves the results to lists rather than files + modelStateDict[[epochI]] <- self$model$state_dict() + modelHyperparameters[[epochI]] <- self$modelParameters + epoch[[epochI]] <- currentEpoch + + } + - #TODO here I should extract best epoch from the saved checkpoints - bestModelFile <- self$extractBestModel(metric='val_loss') - bestModel <- torch::torch_load(bestModelFile) - bestModelStateDict <- bestModel$modelStateDict + #extract best epoch from the saved checkpoints + bestEpochInd <- which.min(valLosses) # change this if a different metric is used + + bestModelStateDict <- modelStateDict[[bestEpochInd]] self$model$load_state_dict(bestModelStateDict) - bestEpoch <- bestModel$epoch - ParallelLogger::logInfo(paste0('Loaded best model from epoch ', bestEpoch)) - self$bestEpoch <- bestEpoch + + bestEpoch <- epoch[[bestEpochInd]] + ParallelLogger::logInfo(paste0('Loaded best model (based on loss) from epoch ', bestEpoch)) + ParallelLogger::logInfo(paste0('ValLoss: ', valLosses[bestEpochInd])) + ParallelLogger::logInfo(paste0('valAUC: ', valAUCs[bestEpochInd])) + self$bestEpoch <- bestEpoch invisible(self) }, @@ -346,16 +382,21 @@ Estimator <- torch::nn_module( t = Sys.time() batch_loss = 0 i=1 + self$model$train() + + print('testing') + coro::loop(for (b in dataloader) { cat = b[[1]]$to(device=self$device) num = b[[2]]$to(device=self$device) - target= b[[3]]$to(device=self$device) + target = b[[3]]$to(device=self$device) out = self$model(num, cat) - loss = self$criterion(out, target) + loss = self$criterion(out, target) + batch_loss = batch_loss + loss - if (i %% 1 == 10) { + if (i %% 10 == 0) { elapsed_time <- Sys.time() - t ParallelLogger::logInfo('Loss: ', round((batch_loss/1)$item(), 3), ' | Time: ', round(elapsed_time,digits = 2), units(elapsed_time)) @@ -390,7 +431,7 @@ Estimator <- torch::nn_module( loss <- c(loss, self$criterion(pred, target)$item()) }) mean_loss <- mean(loss) - predictionsClass <- list(value=predictions, outcomeCount=targets) + predictionsClass <- data.frame(value=predictions, outcomeCount=targets) attr(predictionsClass, 'metaData')$predictionType <-'binary' auc <- computeAuc(predictionsClass) }) @@ -431,36 +472,6 @@ Estimator <- torch::nn_module( ParallelLogger::logInfo(paste('Loaded best model from epoch: ', epoch)) }, - # extracts best model from the results directory - extractBestModel = function(metric='val_loss'){ - - if (metric=='val_loss') - { - direction = 'min' - } - else - { - direction = 'max' - } - - # goes over checkpoints in folder and extracts metric value from name - checkpoints <- Sys.glob(file.path(self$resultsDir, paste0('*', metric, '*'))) - metric_value <- c() - for (file in checkpoints) { - fileName <- basename(file) - metric_value <- c(metric_value, as.double(strsplit(strsplit(fileName, paste0(metric, ':'))[[1]][2], '_')[[1]][1])) - } - - if (direction == 'max') { - best_index <- which.max(metric_value) - } else if (direction == 'min') { - best_index <- which.min(metric_value) - } - bestModel <- checkpoints[[best_index]] - - return(bestModel) - }, - # sends a batch of data to device ## TODO make agnostic of the form of batch @@ -485,32 +496,25 @@ Dataset <- torch::dataset( name = 'Dataset', initialize=function(data, labels, indices, numericalIndex) { - matrix <- data[indices,] - - tensor <- torch::torch_tensor(as.matrix(matrix), dtype=torch::torch_float32()) - # if labels have already been restricted to population - if (max(indices)>length(labels)) { - self$labels <- torch::torch_tensor(labels) - } - else { - self$labels <- torch::torch_tensor(labels)[indices] - } + # add labels + self$target <- torch::torch_tensor(labels[indices]) - notNumIndex <- 1:tensor$shape[2] != numericalIndex - self$cat <- tensor[, notNumIndex] - self$num <- tensor[,numericalIndex, drop=F] + # add features + #print(dim(as.matrix(data[indices,]))) ## testing + self$cat <- torch::torch_tensor(as.matrix(data[indices,-numericalIndex, drop = F]), dtype=torch::torch_float32()) + self$num <- torch::torch_tensor(as.matrix(data[indices,numericalIndex, drop = F]), dtype=torch::torch_float32()) }, .getitem = function(item) { - return(list(self$cat[item,], - self$num[item,], - self$labels[item])) + return(list(cat = self$cat[item,], + num = self$num[item,], + target = self$target[item])) }, .length = function() { - self$labels$shape[1] + self$target$size()[[1]] # shape[1] } ) diff --git a/extras/example.R b/extras/example.R index 3d8fcc4..551465b 100644 --- a/extras/example.R +++ b/extras/example.R @@ -1,20 +1,24 @@ # testing code (requires sequential branch of FeatureExtraction): rm(list = ls()) library(FeatureExtraction) +library(PatientLevelPrediction) library(DeepPatientLevelPrediction) connectionDetails <- Eunomia::getEunomiaConnectionDetails() Eunomia::createCohorts(connectionDetails) +temp <- F + covSet <- createCovariateSettings(useDemographicsGender = T, - useDemographicsAge = T, - useDemographicsRace = T, - useDemographicsEthnicity = T, - useDemographicsAgeGroup = T, - useConditionGroupEraLongTerm = T, - useDrugEraStartLongTerm = T, + useDemographicsAge = T, + useDemographicsRace = T, + useDemographicsEthnicity = T, + useDemographicsAgeGroup = T, + useConditionGroupEraLongTerm = T, + useDrugEraStartLongTerm = T, endDays = -1 - ) +) +if(temp){ covSetT <- createTemporalSequenceCovariateSettings(useDemographicsGender = T, useDemographicsAge = T, useDemographicsRace = T, @@ -26,6 +30,7 @@ covSetT <- createTemporalSequenceCovariateSettings(useDemographicsGender = T, timeInterval = 1, sequenceEndDay = -1, sequenceStartDay = -365*5) +} plpData <- PatientLevelPrediction::getPlpData(connectionDetails = connectionDetails, cdmDatabaseSchema = "main", @@ -39,6 +44,8 @@ plpData <- PatientLevelPrediction::getPlpData(connectionDetails = connectionDeta washoutPeriod = 365, covariateSettings = covSet ) + +if(temp){ plpDataT <- PatientLevelPrediction::getPlpData(connectionDetails = connectionDetails, cdmDatabaseSchema = "main", cohortId = 1, @@ -51,6 +58,9 @@ plpDataT <- PatientLevelPrediction::getPlpData(connectionDetails = connectionDet washoutPeriod = 365, covariateSettings = covSetT ) +} + + population <- PatientLevelPrediction::createStudyPopulation(plpData = plpData, outcomeId = 3, @@ -59,6 +69,7 @@ population <- PatientLevelPrediction::createStudyPopulation(plpData = plpData, riskWindowEnd = 365) ##sparseMat <- toSparseRTorch(plpData, population, map=NULL, temporal=T) +if(F){ x <- toSparseMDeep(plpData ,population, map=NULL, temporal=F) @@ -66,13 +77,13 @@ x <- toSparseMDeep(plpData ,population, x2 <- toSparseMDeep(plpDataT ,population, map=NULL, temporal=T) +} # code to train models deepset <- setDeepNNTorch(units=list(c(128, 64), 128), layer_dropout=c(0.2), lr =c(1e-4), decay=c(1e-5), outcome_weight = c(1.0), batch_size = c(100), epochs= c(1), seed=NULL ) -library(PatientLevelPrediction) #debug(fitDeepNNTorch) res <- runPlp(population = population, @@ -84,7 +95,34 @@ res <- runPlp(population = population, savePlpPlots = F, saveEvaluation = F) - + + +resSet <- setResNet(numLayers=2, sizeHidden=c(2^3,2^4), hiddenFactor=1:4, + residualDropout=c(0,0,1), + hiddenDropout=c(0,0,1), + normalization='BatchNorm', activation= 'RelU', + sizeEmbedding=2^(6), weightDecay=c(1e-6), + learningRate=c(0.1), seed=42, hyperParamSearch='random', + randomSample=3, + device='cpu', + batch_size=1024, + epochs=10) + +resSet <- setResNet(numLayers=1:16, sizeHidden=2^(6:10), hiddenFactor=1:4, + residualDropout=seq(0,0.3,0.05), hiddenDropout=seq(0,0.3,0.05), + normalization='BatchNorm', activation='RelU', + sizeEmbedding=2^(6:9), weightDecay=c(1e-6, 1e-3), + learningRate=c(1e-2,1e-5), seed=42, hyperParamSearch='random', + randomSample=10, device='cpu', batch_size=1024, epochs=5) + +res2 <- runPlp(population = population, + plpData = plpData, + nfold = 3, + modelSettings = resSet, + savePlpData = F, + savePlpResult = F, + savePlpPlots = F, + saveEvaluation = F) ##predict.customLibrary(libraryName, predictionFunction, inputList){ ## libraryName <- 'PatientLevelPrediction' diff --git a/man/predict.deepEstimator.Rd b/man/predict.deepEstimator.Rd new file mode 100644 index 0000000..f5600f8 --- /dev/null +++ b/man/predict.deepEstimator.Rd @@ -0,0 +1,11 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/Predict.R +\name{predict.deepEstimator} +\alias{predict.deepEstimator} +\title{predict.deepEstimator} +\usage{ +predict.deepEstimator(x) +} +\description{ +prediction function for models using estimator class +} From 4ec0480241801ce1479e245bf0e9f04d5a1992d3 Mon Sep 17 00:00:00 2001 From: jreps Date: Mon, 4 Oct 2021 19:28:04 -0400 Subject: [PATCH 015/140] fixed DeepNN - fixed DeepNN - it now works --- NAMESPACE | 20 +- R/DeepNNTorch.R | 81 +- R/SaveLoadPlp.R | 894 ------------------ R/helpers.R | 31 +- extras/example.R | 3 +- man/getPlpData.Rd | 118 --- man/loadPlpData.Rd | 27 - man/loadPlpFromCsv.Rd | 17 - man/loadPlpModel.Rd | 17 - man/loadPlpResult.Rd | 17 - man/loadPrediction.Rd | 17 - man/predictAndromeda.Rd | 36 - man/predictPlp.Rd | 27 - man/predictProbabilities.Rd | 26 - ...pEstimator.Rd => predict_deepEstimator.Rd} | 4 +- man/savePlpData.Rd | 29 - man/savePlpModel.Rd | 19 - man/savePlpResult.Rd | 19 - man/savePlpToCsv.Rd | 19 - man/savePrediction.Rd | 21 - 20 files changed, 65 insertions(+), 1377 deletions(-) delete mode 100644 R/SaveLoadPlp.R delete mode 100644 man/getPlpData.Rd delete mode 100644 man/loadPlpData.Rd delete mode 100644 man/loadPlpFromCsv.Rd delete mode 100644 man/loadPlpModel.Rd delete mode 100644 man/loadPlpResult.Rd delete mode 100644 man/loadPrediction.Rd delete mode 100644 man/predictAndromeda.Rd delete mode 100644 man/predictPlp.Rd delete mode 100644 man/predictProbabilities.Rd rename man/{predict.deepEstimator.Rd => predict_deepEstimator.Rd} (79%) delete mode 100644 man/savePlpData.Rd delete mode 100644 man/savePlpModel.Rd delete mode 100644 man/savePlpResult.Rd delete mode 100644 man/savePlpToCsv.Rd delete mode 100644 man/savePrediction.Rd diff --git a/NAMESPACE b/NAMESPACE index 7e09f9e..f563447 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,25 +1,9 @@ # Generated by roxygen2: do not edit by hand -S3method(print,plpData) -S3method(print,summary.plpData) -S3method(summary,plpData) export(fitDeepNNTorch) export(fitResNet) -export(getPlpData) -export(loadPlpData) -export(loadPlpFromCsv) -export(loadPlpModel) -export(loadPlpResult) -export(loadPrediction) -export(predict.deepEstimator) -export(predictAndromeda) -export(predictPlp) -export(predictProbabilities) -export(savePlpData) -export(savePlpModel) -export(savePlpResult) -export(savePlpToCsv) -export(savePrediction) +export(predict_deepEstimator) +export(predict_deepNNTorch) export(setCIReNN) export(setCNNTorch) export(setCovNN) diff --git a/R/DeepNNTorch.R b/R/DeepNNTorch.R index a6f11e2..6d9722f 100644 --- a/R/DeepNNTorch.R +++ b/R/DeepNNTorch.R @@ -118,22 +118,21 @@ fitDeepNNTorch <- function(plpData,population, param, search='grid', quiet=F, # This can be changed after supporting the temporal covariates. }} - metaData <- attr(population, 'metaData') - if(!is.null(population$indexes)) + + if(!is.null(population$indexes)){ + metaData <- attr(population, 'metaData') population <- population[population$indexes>0,] - attr(population, 'metaData') <- metaData + attr(population, 'metaData') <- metaData + } + + # make sure popualtion is ordered as this is required for rest of code + population <- population[order(population$rowId),] start<-Sys.time() result<- toSparseM(plpData,population,map=NULL, temporal=F) data <- result$data - #one-hot encoding - y <- population$outcomeCount - y[y>0] <- 1 - population$y <- cbind(matrix(y), matrix(abs(y-1))) - - # do cross validation to find hyperParameter datas <- list(population=population, plpData=data) hyperParamSel <- list() @@ -196,30 +195,34 @@ trainDeepNNTorch <-function(plpData, population, index_vect <- unique(population$indexes) ParallelLogger::logInfo(paste('Training deep neural network using Torch with ',length(index_vect ),' fold CV')) + #initiate values perform <- c() - - # create prediction matrix to store all predictions - predictionMat <- population - predictionMat$value <- 0 - attr(predictionMat, "metaData") <- list(predictionType = "binary") - + prediction <- list() + length(prediction) <- length(index_vect) for(index in 1:length(index_vect)){ - ParallelLogger::logInfo(paste('Fold ',index, ' -- with ', sum(population$indexes!=index),'train rows')) + ParallelLogger::logInfo(paste('Fold ',index, ' -- with ', sum(population$indexes!=index & population$indexes>0),'train rows and ', + sum(population$indexes==index),' validation rows' )) if(is.na(units2)){ + ParallelLogger::logInfo(paste('single layer')) + ParallelLogger::logInfo(paste(ncol(plpData), units1, 2, sep='-')) model <- singleLayerNN(inputN = ncol(plpData), layer1 = units1, outputN = 2, layer_dropout = layer_dropout) } else if(is.na(units3)){ + ParallelLogger::logInfo(paste('double layer')) + ParallelLogger::logInfo(paste(ncol(plpData), units1,units2, 2, sep='-')) model <- doubleLayerNN(inputN = ncol(plpData), layer1 = units1, layer2 = units2, outputN = 2, layer_dropout = layer_dropout) } else{ + ParallelLogger::logInfo(paste('triple layer')) + ParallelLogger::logInfo(paste(ncol(plpData), units1, units2, units3, 2, sep='-')) model <- tripleLayerNN(inputN = ncol(plpData), layer1 = units1, layer2 = units2, @@ -243,45 +246,48 @@ trainDeepNNTorch <-function(plpData, population, for(i in 1:epochs){ for(batchRowIds in batches){ trainDataBatch <- convertToTorchData(plpData, - population$y, + population, rowIds = batchRowIds) optimizer$zero_grad() y_pred = model(trainDataBatch$x) - loss = criterion(y_pred, trainDataBatch$y) + ParallelLogger::logInfo(paste(dim(y_pred),collapse = '-')) + ParallelLogger::logInfo(paste(dim(trainDataBatch$y))) + loss = criterion(y_pred[,1], trainDataBatch$y) loss$backward() optimizer$step() - if(i%%10 == 0){ - # winners = y_pred$argmax(dim = 2) + 1 - # winners = y_pred - # corrects = (winners = y_train) - # accuracy = corrects$sum()$item() / y_train$size()[1] - # cat("Epoch:", i, "Loss:", loss$item(), " Accuracy:", accuracy, "\n") - - cat("Epoch:", i, "Loss:", loss$item(), "\n") - + if(i%%1 == 0){ + cat("Epoch:", i, "out of ", epochs , ": Loss:", loss$item(), "\n") } } } - + ParallelLogger::logInfo(paste('Model Trained')) model$eval() # batch predict - prediction <- batchPredict(model, + ParallelLogger::logInfo(paste('Calculating prediction on ', length(rowIdSet$testRowIds), ' test set patients')) + + prediction[[index]] <- batchPredict(model, plpData, - population, + population = population, predictRowIds = rowIdSet$testRowIds, batch_size ) - aucVal <- computeAuc(prediction) + ParallelLogger::logInfo(paste('Prediction on test done')) + + aucVal <- computeAuc(prediction[[index]]) + + ParallelLogger::logInfo(paste('AUC ', aucVal )) perform <- c(perform,aucVal) - predictionMat <- updatePredictionMat(predictionMat, - prediction) + ParallelLogger::logInfo('fold complete') } - auc <- computeAuc(predictionMat) + prediction <- do.call(rbind, prediction) + attr(prediction, "metaData") <- list(predictionType = "binary") + + auc <- computeAuc(prediction) foldPerm <- perform # Output ---------------------------------------------------------------- @@ -327,12 +333,12 @@ trainDeepNNTorch <-function(plpData, population, for(i in 1:epochs){ for(batchRowIds in batches){ trainDataBatch <- convertToTorchData(plpData, - population$y, + population, rowIds = batchRowIds) optimizer$zero_grad() y_pred = model(trainDataBatch$x) - loss = criterion(y_pred, trainDataBatch$y) + loss = criterion(y_pred[,1], trainDataBatch$y) loss$backward() optimizer$step() @@ -353,12 +359,11 @@ trainDeepNNTorch <-function(plpData, population, auc <- computeAuc(prediction) foldPerm <- auc - predictionMat <- prediction } result <- list(model=model, auc=auc, - prediction = predictionMat, + prediction = prediction, hyperSum = unlist(list(units1=units1,units2=units2,units3=units3, layer_dropout=layer_dropout,lr =lr, decay=decay, batch_size = batch_size, epochs= epochs))) diff --git a/R/SaveLoadPlp.R b/R/SaveLoadPlp.R deleted file mode 100644 index 274d9ad..0000000 --- a/R/SaveLoadPlp.R +++ /dev/null @@ -1,894 +0,0 @@ -# @file PlpSaveLoad.R -# -# Copyright 2020 Observational Health Data Sciences and Informatics -# -# This file is part of CohortMethod -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -#' Get the patient level prediction data from the server -#' @description -#' This function executes a large set of SQL statements against the database in OMOP CDM format to -#' extract the data needed to perform the analysis. -#' -#' @details -#' Based on the arguments, the at risk cohort data is retrieved, as well as outcomes -#' occurring in these subjects. The at risk cohort is identified through -#' user-defined cohorts in a cohort table either inside the CDM instance or in a separate schema. -#' Similarly, outcomes are identified -#' through user-defined cohorts in a cohort table either inside the CDM instance or in a separate -#' schema. Covariates are automatically extracted from the appropriate tables within the CDM. -#' If you wish to exclude concepts from covariates you will need to -#' manually add the concept_ids and descendants to the \code{excludedCovariateConceptIds} of the -#' \code{covariateSettings} argument. -#' -#' @param connectionDetails An R object of type\cr\code{connectionDetails} created using the -#' function \code{createConnectionDetails} in the -#' \code{DatabaseConnector} package. -#' @param cdmDatabaseSchema The name of the database schema that contains the OMOP CDM -#' instance. Requires read permissions to this database. On SQL -#' Server, this should specifiy both the database and the schema, -#' so for example 'cdm_instance.dbo'. -#' @param oracleTempSchema For Oracle only: the name of the database schema where you want -#' all temporary tables to be managed. Requires create/insert -#' permissions to this database. -#' @param cohortId A unique identifier to define the at risk cohort. CohortId is -#' used to select the cohort_concept_id in the cohort-like table. -#' @param outcomeIds A list of cohort_definition_ids used to define outcomes (-999 mean no outcome gets downloaded). -#' @param studyStartDate A calendar date specifying the minimum date that a cohort index -#' date can appear. Date format is 'yyyymmdd'. -#' @param studyEndDate A calendar date specifying the maximum date that a cohort index -#' date can appear. Date format is 'yyyymmdd'. Important: the study -#' end data is also used to truncate risk windows, meaning no outcomes -#' beyond the study end date will be considered. -#' @param cohortDatabaseSchema The name of the database schema that is the location where the -#' cohort data used to define the at risk cohort is available. -#' Requires read permissions to this database. -#' @param cohortTable The tablename that contains the at risk cohort. cohortTable has -#' format of COHORT table: cohort_concept_id, SUBJECT_ID, -#' COHORT_START_DATE, COHORT_END_DATE. -#' @param outcomeDatabaseSchema The name of the database schema that is the location where -#' the data used to define the outcome cohorts is available. -#' Requires read permissions to this database. -#' @param outcomeTable The tablename that contains the outcome cohorts. Expectation is -#' outcomeTable has format of COHORT table: -#' COHORT_DEFINITION_ID, SUBJECT_ID, COHORT_START_DATE, -#' COHORT_END_DATE. -#' @param cdmVersion Define the OMOP CDM version used: currently support "4", "5" and "6". -#' @param firstExposureOnly Should only the first exposure per subject be included? Note that -#' this is typically done in the \code{createStudyPopulation} function, -#' but can already be done here for efficiency reasons. -#' @param washoutPeriod The mininum required continuous observation time prior to index -#' date for a person to be included in the at risk cohort. Note that -#' this is typically done in the \code{createStudyPopulation} function, -#' but can already be done here for efficiency reasons. -#' @param sampleSize If not NULL, only this number of people will be sampled from the target population (Default NULL) -#' -#' @param covariateSettings An object of type \code{covariateSettings} as created using the -#' \code{createCovariateSettings} function in the -#' \code{FeatureExtraction} package. -#' @param excludeDrugsFromCovariates A redundant option -#' -#' @return -#' Returns an object of type \code{plpData}, containing information on the cohorts, their -#' outcomes, and baseline covariates. Information about multiple outcomes can be captured at once for -#' efficiency reasons. This object is a list with the following components: \describe{ -#' \item{outcomes}{A data frame listing the outcomes per person, including the time to event, and -#' the outcome id. Outcomes are not yet filtered based on risk window, since this is done at -#' a later stage.} \item{cohorts}{A data frame listing the persons in each cohort, listing their -#' exposure status as well as the time to the end of the observation period and time to the end of the -#' cohort (usually the end of the exposure era).} \item{covariates}{An ffdf object listing the -#' baseline covariates per person in the two cohorts. This is done using a sparse representation: -#' covariates with a value of 0 are omitted to save space.} \item{covariateRef}{An ffdf object describing the covariates that have been extracted.} -#' \item{metaData}{A list of objects with information on how the cohortMethodData object was -#' constructed.} } The generic \code{()} and \code{summary()} functions have been implemented for this object. -#' -#' @export -getPlpData <- function(connectionDetails, - cdmDatabaseSchema, - oracleTempSchema = cdmDatabaseSchema, - cohortId, - outcomeIds, - studyStartDate = "", - studyEndDate = "", - cohortDatabaseSchema = cdmDatabaseSchema, - cohortTable = "cohort", - outcomeDatabaseSchema = cdmDatabaseSchema, - outcomeTable = "cohort", - cdmVersion = "5", - firstExposureOnly = FALSE, - washoutPeriod = 0, - sampleSize = NULL, - covariateSettings, - excludeDrugsFromCovariates = FALSE) { - if (studyStartDate != "" && regexpr("^[12][0-9]{3}[01][0-9][0-3][0-9]$", studyStartDate) == -1) - stop("Study start date must have format YYYYMMDD") - if (studyEndDate != "" && regexpr("^[12][0-9]{3}[01][0-9][0-3][0-9]$", studyEndDate) == -1) - stop("Study end date must have format YYYYMMDD") - if(!is.null(sampleSize)){ - if(!class(sampleSize) %in% c('numeric', 'integer')) - stop("sampleSize must be numeric") - } - - if(is.null(cohortId)) - stop('User must input cohortId') - if(length(cohortId)>1) - stop('Currently only supports one cohortId at a time') - if(is.null(outcomeIds)) - stop('User must input outcomeIds') - #ToDo: add other checks the inputs are valid - - connection <- DatabaseConnector::connect(connectionDetails) - on.exit(DatabaseConnector::disconnect(connection)) - dbms <- connectionDetails$dbms - - writeLines("\nConstructing the at risk cohort") - if(!is.null(sampleSize)) writeLines(paste("\n Sampling ",sampleSize, " people")) - renderedSql <- SqlRender::loadRenderTranslateSql("CreateCohorts.sql", - packageName = "PatientLevelPrediction", - dbms = dbms, - oracleTempSchema = oracleTempSchema, - cdm_database_schema = cdmDatabaseSchema, - cohort_database_schema = cohortDatabaseSchema, - cohort_table = cohortTable, - cdm_version = cdmVersion, - cohort_id = cohortId, - study_start_date = studyStartDate, - study_end_date = studyEndDate, - first_only = firstExposureOnly, - washout_period = washoutPeriod, - use_sample = !is.null(sampleSize), - sample_number=sampleSize - ) - DatabaseConnector::executeSql(connection, renderedSql) - - writeLines("Fetching cohorts from server") - start <- Sys.time() - cohortSql <- SqlRender::loadRenderTranslateSql("GetCohorts.sql", - packageName = "PatientLevelPrediction", - dbms = dbms, - oracleTempSchema = oracleTempSchema, - cdm_version = cdmVersion) - cohorts <- DatabaseConnector::querySql(connection, cohortSql) - colnames(cohorts) <- SqlRender::snakeCaseToCamelCase(colnames(cohorts)) - metaData.cohort <- list(cohortId = cohortId, - studyStartDate = studyStartDate, - studyEndDate = studyEndDate) - - if(nrow(cohorts)==0) - stop('Target population is empty') - - delta <- Sys.time() - start - writeLines(paste("Loading cohorts took", signif(delta, 3), attr(delta, "units"))) - - #covariateSettings$useCovariateCohortIdIs1 <- TRUE - covariateData <- FeatureExtraction::getDbCovariateData(connection = connection, - oracleTempSchema = oracleTempSchema, - cdmDatabaseSchema = cdmDatabaseSchema, - cdmVersion = cdmVersion, - cohortTable = "#cohort_person", - cohortTableIsTemp = TRUE, - rowIdField = "row_id", - covariateSettings = covariateSettings) - # add indexes for tidyCov + covariate summary - Andromeda::createIndex(covariateData$covariates, c('rowId'), - indexName = 'covariates_rowId') - Andromeda::createIndex(covariateData$covariates, c('covariateId'), - indexName = 'covariates_covariateId') - Andromeda::createIndex(covariateData$covariates, c('covariateId', 'covariateValue'), - indexName = 'covariates_covariateId_value') - - if(max(outcomeIds)!=-999){ - writeLines("Fetching outcomes from server") - start <- Sys.time() - outcomeSql <- SqlRender::loadRenderTranslateSql("GetOutcomes.sql", - packageName = "PatientLevelPrediction", - dbms = dbms, - oracleTempSchema = oracleTempSchema, - cdm_database_schema = cdmDatabaseSchema, - outcome_database_schema = outcomeDatabaseSchema, - outcome_table = outcomeTable, - outcome_ids = outcomeIds, - cdm_version = cdmVersion) - outcomes <- DatabaseConnector::querySql(connection, outcomeSql) - colnames(outcomes) <- SqlRender::snakeCaseToCamelCase(colnames(outcomes)) - metaData.outcome <- data.frame(outcomeIds =outcomeIds) - attr(outcomes, "metaData") <- metaData.outcome - if(nrow(outcomes)==0) - stop('No Outcomes') - - metaData.cohort$attrition <- getCounts2(cohorts,outcomes, "Original cohorts") - attr(cohorts, "metaData") <- metaData.cohort - - delta <- Sys.time() - start - writeLines(paste("Loading outcomes took", signif(delta, 3), attr(delta, "units"))) - } else { - outcomes <- NULL - } - - - - - # Remove temp tables: - renderedSql <- SqlRender::loadRenderTranslateSql("RemoveCohortTempTables.sql", - packageName = "PatientLevelPrediction", - dbms = dbms, - oracleTempSchema = oracleTempSchema) - DatabaseConnector::executeSql(connection, renderedSql, progressBar = FALSE, reportOverallTime = FALSE) - #DatabaseConnector::disconnect(connection) - - metaData <- covariateData$metaData - metaData$call <- match.call() - metaData$call$connectionDetails = connectionDetails - metaData$call$connection = NULL - metaData$call$cdmDatabaseSchema = cdmDatabaseSchema - metaData$call$oracleTempSchema = oracleTempSchema - metaData$call$cohortId = cohortId - metaData$call$outcomeIds = outcomeIds - metaData$call$studyStartDate = studyStartDate - metaData$call$studyEndDate = studyEndDate - metaData$call$cohortDatabaseSchema = cohortDatabaseSchema - metaData$call$cohortTable = cohortTable - metaData$call$outcomeDatabaseSchema = outcomeDatabaseSchema - metaData$call$outcomeTable = outcomeTable - metaData$call$cdmVersion = cdmVersion - metaData$call$firstExposureOnly = firstExposureOnly - metaData$call$washoutPeriod = washoutPeriod - metaData$call$covariateSettings= covariateSettings - metaData$call$sampleSize = sampleSize - - # create the temporal settings (if temporal use) - timeReference <- NULL - if(!is.null(covariateSettings$temporal)){ - if(covariateSettings$temporal){ - # make sure time days populated - if(length(covariateSettings$temporalStartDays)>0){ - timeReference = data.frame(timeId=1:length(covariateSettings$temporalStartDays), - startDay = covariateSettings$temporalStartDays, - endDay = covariateSettings$temporalEndDays) - } - }} - - - result <- list(cohorts = cohorts, - outcomes = outcomes, - covariateData = covariateData, - timeRef = timeReference, - metaData = metaData) - - class(result) <- "plpData" - return(result) -} - - -#' Save the cohort data to folder -#' -#' @description -#' \code{savePlpData} saves an object of type plpData to folder. -#' -#' @param plpData An object of type \code{plpData} as generated using -#' \code{getPlpData}. -#' @param file The name of the folder where the data will be written. The folder should -#' not yet exist. -#' @param envir The environment for to evaluate variables when saving -#' @param overwrite Whether to force overwrite an existing file -#' @details -#' The data will be written to a set of files in the folder specified by the user. -#' -#' @examples -#' # todo -#' -#' @export -savePlpData <- function(plpData, file, envir=NULL, overwrite=F) { - if (missing(plpData)) - stop("Must specify plpData") - if (missing(file)) - stop("Must specify file") - if (!class(plpData) %in% c("plpData","plpData.libsvm" )) - stop("Data not of class plpData") - if(dir.exists(file.path(file, "covariates"))){ - stop('Folder to save covariates already exists...') - } - - if(!dir.exists(file)){ - dir.create(file) - } - - # save the actual values in the metaData - # TODO - only do this if exists in parent or environ - if(is.null(plpData$metaData$call$sampleSize)){ # fixed a bug when sampleSize is NULL - plpData$metaData$call$sampleSize <- 'NULL' - } - for(i in 2:length(plpData$metaData$call)){ - if(!is.null(plpData$metaData$call[[i]])) - plpData$metaData$call[[i]] <- eval(plpData$metaData$call[[i]], envir = envir) - } - - #FeatureExtraction::saveCovariateData(covariateData = plpData$covariateData, file = file.path(file, "covariates")) - Andromeda::saveAndromeda(plpData$covariateData, file = file.path(file, "covariates"), maintainConnection = T) - saveRDS(plpData$timeRef, file = file.path(file, "timeRef.rds")) - saveRDS(plpData$cohorts, file = file.path(file, "cohorts.rds")) - saveRDS(plpData$outcomes, file = file.path(file, "outcomes.rds")) - saveRDS(plpData$metaData, file = file.path(file, "metaData.rds")) -} - -#' Load the cohort data from a folder -#' -#' @description -#' \code{loadPlpData} loads an object of type plpData from a folder in the file -#' system. -#' -#' @param file The name of the folder containing the data. -#' @param readOnly If true, the data is opened read only. -#' -#' @details -#' The data will be written to a set of files in the folder specified by the user. -#' -#' @return -#' An object of class plpData. -#' -#' @examples -#' # todo -#' -#' @export -loadPlpData <- function(file, readOnly = TRUE) { - if (!file.exists(file)) - stop(paste("Cannot find folder", file)) - if (!file.info(file)$isdir) - stop(paste("Not a folder", file)) - - result <- list(covariateData = FeatureExtraction::loadCovariateData(file = file.path(file, "covariates")), - timeRef = readRDS(file.path(file, "timeRef.rds")), - cohorts = readRDS(file.path(file, "cohorts.rds")), - outcomes = readRDS(file.path(file, "outcomes.rds")), - metaData = readRDS(file.path(file, "metaData.rds"))) - - class(result) <- "plpData" - - return(result) -} - - -#' @export -print.plpData <- function(x, ...) { - writeLines("plpData object") - writeLines("") - writeLines(paste("At risk concept ID:", attr(x$cohorts, "metaData")$cohortId)) - writeLines(paste("Outcome concept ID(s):", paste(attr(x$outcomes, "metaData")$outcomeIds, collapse = ","))) -} - -#' @method summary plpData -#' @export -summary.plpData <- function(object,...){ - people <- length(unique(object$cohorts$subjectId)) - outcomeCounts <- data.frame(outcomeId = attr(object$outcomes, "metaData")$outcomeIds, - eventCount = 0, - personCount = 0) - for (i in 1:nrow(outcomeCounts)) { - outcomeCounts$eventCount[i] <- sum(object$outcomes$outcomeId == attr(object$outcomes, "metaData")$outcomeIds[i]) - outcomeCounts$personCount[i] <- length(unique(object$outcomes$rowId[object$outcomes$outcomeId == attr(object$outcomes, "metaData")$outcomeIds[i]])) - } - - covDetails <- FeatureExtraction::summary(object$covariateData) - result <- list(metaData = append(append(object$metaData, attr(object$cohorts, "metaData")), attr(object$outcomes, "metaData")), - people = people, - outcomeCounts = outcomeCounts, - covariateCount = covDetails$covariateCount, - covariateValueCount = covDetails$covariateValueCount) - class(result) <- "summary.plpData" - return(result) -} - -#' @export -print.summary.plpData <- function(x, ...) { - writeLines("plpData object summary") - writeLines("") - writeLines(paste("At risk cohort concept ID:", x$metaData$cohortId)) - writeLines(paste("Outcome concept ID(s):", x$metaData$outcomeIds, collapse = ",")) - writeLines("") - writeLines(paste("People:", paste(x$people))) - writeLines("") - writeLines("Outcome counts:") - outcomeCounts <- x$outcomeCounts - rownames(outcomeCounts) <- outcomeCounts$outcomeId - outcomeCounts$outcomeId <- NULL - colnames(outcomeCounts) <- c("Event count", "Person count") - stats::printCoefmat(outcomeCounts) - writeLines("") - writeLines("Covariates:") - writeLines(paste("Number of covariates:", x$covariateCount)) - writeLines(paste("Number of non-zero covariate values:", x$covariateValueCount)) -} - - -#' Saves the plp model -#' -#' @details -#' Saves the plp model to a user specificed folder -#' -#' @param plpModel A trained classifier returned by running \code{runPlp()$model} -#' @param dirPath A location to save the model to -#' -#' @export -savePlpModel <- function(plpModel, dirPath){ - if (missing(plpModel)) - stop("Must specify plpModel") - if (missing(dirPath)) - stop("Must specify directory path") - if (class(plpModel) != "plpModel") - stop("Not a plpModel") - - if(!dir.exists(dirPath)) dir.create(dirPath) - - - # If model is saved on hard drive move it... - #============================================================ - moveFile <- moveHdModel(plpModel, dirPath ) - if(!moveFile){ - ParallelLogger::logError('Moving model files error') - } - #============================================================ - - - # if deep (keras) then save hdfs - if(attr(plpModel, 'type')%in%c('deep', 'deepMulti','deepEnsemble')){ - - if(attr(plpModel, 'type')=='deepEnsemble'){ - tryCatch( - {#saveRDS(plpModel, file = file.path(dirPath, "deepEnsemble_model.rds")) - for (i in seq(plpModel$modelSettings$modelParameters$numberOfEnsembleNetwork)){ - model<-keras::serialize_model(plpModel$model[[i]], include_optimizer = TRUE) - keras::save_model_hdf5(model, filepath = file.path(dirPath, "keras_model",i)) - }},error=function(e) NULL - ) - } - if(attr(plpModel, 'type')=='deep'){ - keras::save_model_hdf5(plpModel$model, filepath = file.path(dirPath, "keras_model")) - } - if(attr(plpModel, 'type')=='deepMulti'){ - saveRDS(attr(plpModel, 'inputs'), file = file.path(dirPath, "inputs_attr.rds")) - } - if(attr(plpModel, 'type')=='deepNNTorch'){ - torch::torch_save(model, file = file.path(dirPath, "model.rt")) - } - } else if(attr(plpModel, 'type') == "xgboost"){ - # fixing xgboost save/load issue - xgboost::xgb.save(model = plpModel$model, fname = file.path(dirPath, "model")) - } else { - saveRDS(plpModel$model, file = file.path(dirPath, "model.rds")) - } - #saveRDS(plpModel$predict, file = file.path(dirPath, "transform.rds")) - saveRDS(NULL, file = file.path(dirPath, "transform.rds")) - saveRDS(plpModel$index, file = file.path(dirPath, "index.rds")) - saveRDS(plpModel$trainCVAuc, file = file.path(dirPath, "trainCVAuc.rds")) - saveRDS(plpModel$hyperParamSearch, file = file.path(dirPath, "hyperParamSearch.rds")) - saveRDS(plpModel$modelSettings, file = file.path(dirPath, "modelSettings.rds")) - saveRDS(plpModel$metaData, file = file.path(dirPath, "metaData.rds")) - saveRDS(plpModel$populationSettings, file = file.path(dirPath, "populationSettings.rds")) - saveRDS(plpModel$trainingTime, file = file.path(dirPath, "trainingTime.rds")) - saveRDS(plpModel$varImp, file = file.path(dirPath, "varImp.rds")) - saveRDS(plpModel$dense, file = file.path(dirPath, "dense.rds")) - saveRDS(plpModel$cohortId, file = file.path(dirPath, "cohortId.rds")) - saveRDS(plpModel$outcomeId, file = file.path(dirPath, "outcomeId.rds")) - saveRDS(plpModel$analysisId, file = file.path(dirPath, "analysisId.rds")) - #if(!is.null(plpModel$covariateMap)) - saveRDS(plpModel$covariateMap, file = file.path(dirPath, "covariateMap.rds")) - - attributes <- list(type=attr(plpModel, 'type'), predictionType=attr(plpModel, 'predictionType') ) - saveRDS(attributes, file = file.path(dirPath, "attributes.rds")) - - -} - -moveHdModel <- function(plpModel, dirPath ){ - #================================================================== - # if python then move pickle - #================================================================== - if(attr(plpModel, 'type') %in% c('pythonOld','pythonReticulate', 'pythonAuto') ){ - if(!dir.exists(file.path(dirPath,'python_model'))) - dir.create(file.path(dirPath,'python_model')) - for(file in dir(plpModel$model)){ #DOES THIS CORRECTLY TRANSFER AUTOENCODER BITS? - file.copy(file.path(plpModel$model,file), - file.path(dirPath,'python_model'), overwrite=TRUE, recursive = FALSE, - copy.mode = TRUE, copy.date = FALSE) - } - } - - #================================================================== - # if sagemaker then move pickle - #================================================================== - if(attr(plpModel, 'type') =='sagemaker'){ - if(!dir.exists(file.path(dirPath,'sagemaker_model'))) - dir.create(file.path(dirPath,'sagemaker_model')) - for(file in dir(plpModel$model$loc)){ - file.copy(file.path(plpModel$model$loc,file), - file.path(dirPath,'sagemaker_model'), overwrite=TRUE, recursive = FALSE, - copy.mode = TRUE, copy.date = FALSE) - } - } - - #================================================================== - # if knn then move model - #================================================================== - if(attr(plpModel, 'type') =='knn'){ - if(!dir.exists(file.path(dirPath,'knn_model'))) - dir.create(file.path(dirPath,'knn_model')) - for(file in dir(plpModel$model)){ - file.copy(file.path(plpModel$model,file), - file.path(dirPath,'knn_model'), overwrite=TRUE, recursive = FALSE, - copy.mode = TRUE, copy.date = FALSE) - } - } - - return(TRUE) -} - -#' loads the plp model -#' -#' @details -#' Loads a plp model that was saved using \code{savePlpModel()} -#' -#' @param dirPath The location of the model -#' -#' @export -loadPlpModel <- function(dirPath) { - if (!file.exists(dirPath)) - stop(paste("Cannot find folder", dirPath)) - if (!file.info(dirPath)$isdir) - stop(paste("Not a folder", dirPath)) - - hyperParamSearch <- tryCatch(readRDS(file.path(dirPath, "hyperParamSearch.rds")), - error=function(e) NULL) - # add in these as they got dropped - outcomeId <- tryCatch(readRDS(file.path(dirPath, "outcomeId.rds")), - error=function(e) NULL) - cohortId <- tryCatch(readRDS(file.path(dirPath, "cohortId.rds")), - error=function(e) NULL) - dense <- tryCatch(readRDS(file.path(dirPath, "dense.rds")), - error=function(e) NULL) - covariateMap <- tryCatch(readRDS(file.path(dirPath, "covariateMap.rds")), - error=function(e) NULL) - analysisId <- tryCatch(readRDS(file.path(dirPath, "analysisId.rds")), - error=function(e) NULL) - - if(file.exists(file.path(dirPath, "keras_model"))){ - ensure_installed("keras") - model <- keras::load_model_hdf5(file.path(dirPath, "keras_model")) - } else if(readRDS(file.path(dirPath, "attributes.rds"))$type == "xgboost"){ - ensure_installed("xgboost") - # fixing xgboost save/load issue - model <- xgboost::xgb.load(file.path(dirPath, "model")) - } else { - model <- readRDS(file.path(dirPath, "model.rds")) - } - - result <- list(model = model, - modelSettings = readRDS(file.path(dirPath, "modelSettings.rds")), - hyperParamSearch = hyperParamSearch, - trainCVAuc = readRDS(file.path(dirPath, "trainCVAuc.rds")), - metaData = readRDS(file.path(dirPath, "metaData.rds")), - populationSettings= readRDS(file.path(dirPath, "populationSettings.rds")), - outcomeId = outcomeId, - cohortId = cohortId, - varImp = readRDS(file.path(dirPath, "varImp.rds")), - trainingTime = readRDS(file.path(dirPath, "trainingTime.rds")), - covariateMap =covariateMap, - predict = readRDS(file.path(dirPath, "transform.rds")), - index = readRDS(file.path(dirPath, "index.rds")), - dense = dense, - analysisId = analysisId) - - #attributes <- readRDS(file.path(dirPath, "attributes.rds")) - attributes <- readRDS(file.path(dirPath, "attributes.rds")) - attr(result, 'type') <- attributes$type - attr(result, 'predictionType') <- attributes$predictionType - class(result) <- "plpModel" - - # update the model location to the load dirPath - result <- updateModelLocation(result, dirPath) - - # make this backwrds compatible for ffdf: - result$predict <- createTransform(result) - - return(result) -} - -updateModelLocation <- function(plpModel, dirPath){ - type <- attr(plpModel, 'type') - # if python update the location - if( type %in% c('pythonOld','pythonReticulate', 'pythonAuto')){ - plpModel$model <- file.path(dirPath,'python_model') - plpModel$predict <- createTransform(plpModel) - } - if( type =='sagemaker'){ - plpModel$model$loc <- file.path(dirPath,'sagemaker_model') - plpModel$predict <- createTransform(plpModel) - } - # if knn update the locaiton - TODO !!!!!!!!!!!!!! - if( type =='knn'){ - plpModel$model <- file.path(dirPath,'knn_model') - plpModel$predict <- createTransform(plpModel) - } - if( type =='deep' ){ - plpModel$predict <- createTransform(plpModel) - } - if( type =='deepEnsemble' ){ - plpModel$predict <- createTransform(plpModel) - } - if( type =='deepMulti'){ - attr(plpModel, 'inputs') <- tryCatch(readRDS(file.path(dirPath, "inputs_attr.rds")), - error=function(e) NULL) - plpModel$predict <- createTransform(plpModel) - - } - - return(plpModel) -} - - -#' Saves the prediction dataframe to RDS -#' -#' @details -#' Saves the prediction data frame returned by predict.R to an RDS file and returns the fileLocation where the prediction is saved -#' -#' @param prediction The prediciton data.frame -#' @param dirPath The directory to save the prediction RDS -#' @param fileName The name of the RDS file that will be saved in dirPath -#' -#' @export -savePrediction <- function(prediction, dirPath, fileName='prediction.rds'){ - #TODO check inupts - saveRDS(prediction, file=file.path(dirPath,fileName)) - - return(file.path(dirPath,fileName)) -} - -#' Loads the prediciton dataframe to csv -#' -#' @details -#' Loads the prediciton RDS file -#' -#' @param fileLocation The location with the saved prediction -#' -#' @export -loadPrediction <- function(fileLocation){ - #TODO check inupts - prediction <- readRDS(file=fileLocation) - return(prediction) -} - -#' Saves the result from runPlp into the location directory -#' -#' @details -#' Saves the result from runPlp into the location directory -#' -#' @param result The result of running runPlp() -#' @param dirPath The directory to save the csv -#' -#' @export -savePlpResult <- function(result, dirPath){ - if (missing(result)) - stop("Must specify runPlp output") - if (missing(dirPath)) - stop("Must specify directory location") - #if (class(plpModel) != "plpModel") - # stop("Not a plpModel") - - if(!dir.exists(dirPath)) dir.create(dirPath, recursive = T) - - savePlpModel(result$model, dirPath=file.path(dirPath,'model') ) - saveRDS(result$analysisRef, file = file.path(dirPath, "analysisRef.rds")) - saveRDS(result$inputSetting, file = file.path(dirPath, "inputSetting.rds")) - saveRDS(result$executionSummary, file = file.path(dirPath, "executionSummary.rds")) - saveRDS(result$prediction, file = file.path(dirPath, "prediction.rds")) - saveRDS(result$performanceEvaluation, file = file.path(dirPath, "performanceEvaluation.rds")) - #saveRDS(result$performanceEvaluationTrain, file = file.path(dirPath, "performanceEvaluationTrain.rds")) - saveRDS(result$covariateSummary, file = file.path(dirPath, "covariateSummary.rds")) - - -} - -#' Loads the evalaution dataframe -#' -#' @details -#' Loads the evaluation -#' -#' @param dirPath The directory where the evaluation was saved -#' -#' @export -loadPlpResult <- function(dirPath){ - if (!file.exists(dirPath)) - stop(paste("Cannot find folder", dirPath)) - if (!file.info(dirPath)$isdir) - stop(paste("Not a folder", dirPath)) - - - result <- list(model = loadPlpModel(file.path(dirPath, "model")), - analysisRef = readRDS(file.path(dirPath, "analysisRef.rds")), - inputSetting = readRDS(file.path(dirPath, "inputSetting.rds")), - executionSummary = readRDS(file.path(dirPath, "executionSummary.rds")), - prediction = readRDS(file.path(dirPath, "prediction.rds")), - performanceEvaluation = readRDS(file.path(dirPath, "performanceEvaluation.rds")), - #performanceEvaluationTrain= readRDS(file.path(dirPath, "performanceEvaluationTrain.rds")), - covariateSummary = readRDS(file.path(dirPath, "covariateSummary.rds")) - ) - class(result) <- "runPlp" - - return(result) - -} - - -#result$inputSetting$dataExtrractionSettings$covariateSettings -formatCovariateSettings <- function(covariateSettings){ - - if(class(covariateSettings) == "covariateSettings"){ - return(list(cvs = data.frame(X = names(unlist(covariateSettings)), x= unlist(covariateSettings)), - fun = attr(covariateSettings,'fun'))) - - } else{ - return(list(cvs = do.call(rbind, lapply(1:length(covariateSettings), function(i){ - inds <- which(lapply(covariateSettings[[i]], class) == "function") - if(length(inds)>0){ - for(j in inds){ - covariateSettings[[i]][[j]] <- paste0(deparse(covariateSettings[[i]][[j]]), collapse = " ") - } - } - tempResult <- data.frame(names = names(unlist(covariateSettings[[i]])), - values = unlist(covariateSettings[[i]])) - tempResult$settingsId <- i - return(tempResult) - })), - fun = unlist(lapply(covariateSettings, function(x) attr(x,'fun'))) - ) - ) - } - -} - -reformatCovariateSettings <- function(covariateSettingsLocation){ - # adding this to stop warnings when files does not exist - if(!file.exists(covariateSettingsLocation)){ - return(NULL) - } - cs <- utils::read.csv(covariateSettingsLocation, stringsAsFactors=FALSE) - fun <- utils::read.csv(gsub('.csv','_fun.csv',covariateSettingsLocation), stringsAsFactors=FALSE) - - if(sum(colnames(cs)%in%c('X','x'))==2){ - covariateSettings <- cs$x - covariateSettings <- as.list(covariateSettings) - names(covariateSettings) <- cs$X - attr(covariateSettings,'fun') <- fun$x - class(covariateSettings) <- 'covariateSettings' - } else { - - covariateSettings <- list() - length(covariateSettings) <- max(cs$settingsId) - - for(i in 1:max(cs$settingsId)){ - covariateSettings[[i]] <- cs$values[cs$settingsId==i] - covariateSettings[[i]] <- as.list(covariateSettings[[i]]) - names(covariateSettings[[i]]) <- cs$names[cs$settingsId==i] - attr(covariateSettings[[i]],'fun') <- fun$x[i] - } - - } - -return(covariateSettings) -} - - -#' Save parts of the plp result as a csv for transparent sharing -#' -#' @details -#' Saves the main results as a csv (these files can be read by the shiny app) -#' -#' @param result An object of class runPlp with development or validation results -#' @param dirPath The directory the save the results as csv files -#' -#' @export -savePlpToCsv <- function(result, dirPath){ - - #inputSetting - if(!dir.exists(file.path(dirPath, 'inputSetting'))){dir.create(file.path(dirPath, 'inputSetting'), recursive = T)} - utils::write.csv(result$inputSetting$modelSettings$model, file = file.path(dirPath, 'inputSetting','modelSettings_model.csv'), row.names = F) - - if(!is.null(result$inputSetting$modelSettings$param)){ - utils::write.csv(as.data.frame(t(unlist(result$inputSetting$modelSettings$param))), file = file.path(dirPath, 'inputSetting','modelSettings_param.csv'), row.names = F) - }else{ - utils::write.csv(NULL, file = file.path(dirPath, 'inputSetting','modelSettings_param.csv'), row.names = F) - } - utils::write.csv(result$inputSetting$modelSettings$name, file = file.path(dirPath, 'inputSetting','modelSettings_name.csv'), row.names = F) - if(!is.null(result$inputSetting$dataExtrractionSettings$covariateSettings)){ - utils::write.csv(formatCovariateSettings(result$inputSetting$dataExtrractionSettings$covariateSettings)$cvs, file = file.path(dirPath, 'inputSetting','dataExtrractionSettings_covariateSettings.csv'), row.names = F) - utils::write.csv(formatCovariateSettings(result$inputSetting$dataExtrractionSettings$covariateSettings)$fun, file = file.path(dirPath, 'inputSetting','dataExtrractionSettings_covariateSettings_fun.csv'), row.names = F) - } - utils::write.csv(result$inputSetting$populationSettings$attrition, file = file.path(dirPath, 'inputSetting','populationSettings_attrition.csv'), row.names = F) - result$inputSetting$populationSettings$attrition <- NULL - utils::write.csv(result$inputSetting$populationSettings, file = file.path(dirPath, 'inputSetting','populationSettings.csv'), row.names = F) - - #executionSummary - if(!dir.exists(file.path(dirPath, 'executionSummary'))){dir.create(file.path(dirPath, 'executionSummary'), recursive = T)} - utils::write.csv(result$executionSummary$PackageVersion, file = file.path(dirPath, 'executionSummary','PackageVersion.csv'), row.names = F) - utils::write.csv(unlist(result$executionSummary$PlatformDetails), file = file.path(dirPath, 'executionSummary','PlatformDetails.csv')) - utils::write.csv(result$executionSummary$TotalExecutionElapsedTime, file = file.path(dirPath, 'executionSummary','TotalExecutionElapsedTime.csv'), row.names = F) - utils::write.csv(result$executionSummary$ExecutionDateTime, file = file.path(dirPath, 'executionSummary','ExecutionDateTime.csv'), row.names = F) - - #performanceEvaluation - if(!dir.exists(file.path(dirPath, 'performanceEvaluation'))){dir.create(file.path(dirPath, 'performanceEvaluation'), recursive = T)} - utils::write.csv(result$performanceEvaluation$evaluationStatistics, file = file.path(dirPath, 'performanceEvaluation','evaluationStatistics.csv'), row.names = F) - utils::write.csv(result$performanceEvaluation$thresholdSummary, file = file.path(dirPath, 'performanceEvaluation','thresholdSummary.csv'), row.names = F) - utils::write.csv(result$performanceEvaluation$demographicSummary, file = file.path(dirPath, 'performanceEvaluation','demographicSummary.csv'), row.names = F) - utils::write.csv(result$performanceEvaluation$calibrationSummary, file = file.path(dirPath, 'performanceEvaluation','calibrationSummary.csv'), row.names = F) - utils::write.csv(result$performanceEvaluation$predictionDistribution, file = file.path(dirPath, 'performanceEvaluation','predictionDistribution.csv'), row.names = F) - - #covariateSummary - utils::write.csv(result$covariateSummary, file = file.path(dirPath,'covariateSummary.csv'), row.names = F) -} - -#' Loads parts of the plp result saved as csv files for transparent sharing -#' -#' @details -#' Load the main results from csv files into a runPlp object -#' -#' @param dirPath The directory with the results as csv files -#' -#' @export -loadPlpFromCsv <- function(dirPath){ - - result <- list() - objects <- gsub('.csv','',dir(dirPath)) - if(sum(!c('covariateSummary','executionSummary','inputSetting','performanceEvaluation')%in%objects)>0){ - stop('Incorrect csv results file') - } - - length(result) <- length(objects) - names(result) <- objects - - #covariateSummary - result$covariateSummary <- utils::read.csv(file = file.path(dirPath,'covariateSummary.csv')) - - #executionSummary - result$executionSummary <- list() - result$executionSummary$PackageVersion <- tryCatch({as.list(utils::read.csv(file = file.path(dirPath, 'executionSummary','PackageVersion.csv')))}, error = function(e){return(NULL)}) - result$executionSummary$PlatformDetails <- tryCatch({as.list(utils::read.csv(file = file.path(dirPath, 'executionSummary','PlatformDetails.csv'))$x)}, error = function(e){return(NULL)}) - names(result$executionSummary$PlatformDetails) <- tryCatch({utils::read.csv(file = file.path(dirPath, 'executionSummary','PlatformDetails.csv'))$X}, error = function(e){return(NULL)}) - result$executionSummary$TotalExecutionElapsedTime <- tryCatch({utils::read.csv(file = file.path(dirPath, 'executionSummary','TotalExecutionElapsedTime.csv'))$x}, error = function(e){return(NULL)}) - result$executionSummary$ExecutionDateTime <- tryCatch({utils::read.csv(file = file.path(dirPath, 'executionSummary','ExecutionDateTime.csv'))$x}, error = function(e){return(NULL)}) - - #inputSetting - result$inputSetting <- list() - result$inputSetting$modelSettings$model <- tryCatch({utils::read.csv(file = file.path(dirPath, 'inputSetting','modelSettings_model.csv'))$x}, error = function(e){return(NULL)}) - result$inputSetting$modelSettings$param <- tryCatch({as.list(utils::read.csv(file = file.path(dirPath, 'inputSetting','modelSettings_param.csv')))}, error = function(e){return(NULL)}) - result$inputSetting$modelSettings$name <- tryCatch({utils::read.csv(file = file.path(dirPath, 'inputSetting','modelSettings_name.csv'))$x}, error = function(e){return(NULL)}) - - result$inputSetting$dataExtrractionSettings$covariateSettings <- tryCatch({reformatCovariateSettings(file.path(dirPath, 'inputSetting','dataExtrractionSettings_covariateSettings.csv'))}, error = function(e){return(NULL)}) - - result$inputSetting$populationSettings <- tryCatch({as.list(utils::read.csv(file = file.path(dirPath, 'inputSetting','populationSettings.csv')))}, error = function(e){return(NULL)}) - result$inputSetting$populationSettings$attrition <- tryCatch({utils::read.csv(file = file.path(dirPath, 'inputSetting','populationSettings_attrition.csv'))}, error = function(e){return(NULL)}) - - #performanceEvaluation - result$performanceEvaluation <- list() - result$performanceEvaluation$evaluationStatistics <- tryCatch({utils::read.csv(file = file.path(dirPath, 'performanceEvaluation','evaluationStatistics.csv'))}, error = function(e){return(NULL)}) - result$performanceEvaluation$thresholdSummary <- tryCatch({utils::read.csv(file = file.path(dirPath, 'performanceEvaluation','thresholdSummary.csv'))}, error = function(e){return(NULL)}) - result$performanceEvaluation$demographicSummary <- tryCatch({utils::read.csv(file = file.path(dirPath, 'performanceEvaluation','demographicSummary.csv'))}, error = function(e){return(NULL)}) - result$performanceEvaluation$calibrationSummary <- tryCatch({utils::read.csv(file = file.path(dirPath, 'performanceEvaluation','calibrationSummary.csv'))}, error = function(e){return(NULL)}) - result$performanceEvaluation$predictionDistribution <- tryCatch({utils::read.csv(file = file.path(dirPath, 'performanceEvaluation','predictionDistribution.csv'))}, error = function(e){return(NULL)}) - - result$model$modelSettings <- result$inputSetting$modelSettings - result$model$populationSettings <- result$inputSetting$populationSettings - result$model$metaData$call$covariateSettings <- result$inputSetting$dataExtrractionSettings$covariateSettings - - class(result) <- "runPlp" - return(result) -} diff --git a/R/helpers.R b/R/helpers.R index 2065f88..0d1a0ec 100644 --- a/R/helpers.R +++ b/R/helpers.R @@ -10,9 +10,9 @@ rowIdSets <- function(population, earlyStopRowIds <- trainRowIds[valSamp] trainRowIds <- trainRowIds[-valSamp] - datas <- list(testRowIds = testRowIds, - trainRowIds = trainRowIds, - earlyStopRowIds = earlyStopRowIds + datas <- list(testRowIds = sort(testRowIds), + trainRowIds = sort(trainRowIds), + earlyStopRowIds = sort(earlyStopRowIds) ) }else{ trainRowIds <- population$rowId @@ -22,8 +22,8 @@ rowIdSets <- function(population, earlyStopRowIds <- trainRowIds[valSamp] trainRowIds <- trainRowIds[-valSamp] - datas <- list(trainRowIds = trainRowIds, - earlyStopRowIds = earlyStopRowIds + datas <- list(trainRowIds = sort(trainRowIds), + earlyStopRowIds = sort(earlyStopRowIds) ) } @@ -31,9 +31,14 @@ rowIdSets <- function(population, return(datas) } -convertToTorchData <- function(data, label, rowIds){ +convertToTorchData <- function(data, population, rowIds){ x <- torch::torch_tensor(as.matrix(data[rowIds,]), dtype = torch::torch_float()) - y <- torch::torch_tensor(label, dtype = torch::torch_float()) + + #one-hot encoding + y <- population$outcomeCount[population$rowId%in%rowIds] + y[y>0] <- 1 + y <- torch::torch_tensor(matrix(y), dtype = torch::torch_float()) + return(list(x=x, y=y)) } @@ -43,13 +48,16 @@ batchPredict <- function(model, population, predictRowIds, batch_size ){ + ParallelLogger::logInfo('Predicting using batch') maxVal <- length(predictRowIds) batches <- lapply(1:ceiling(maxVal/batch_size), function(x) ((x-1)*batch_size+1):min((x*batch_size), maxVal)) - prediction <- population[predictRowIds,] + + ParallelLogger::logInfo('Pop') + prediction <- population[population$rowId%in%predictRowIds,] prediction$value <- 0 for(batch in batches){ - b <- torch::torch_tensor(as.matrix(plpData[predictRowIds,][batch,,drop = F]), dtype = torch::torch_float()) + b <- torch::torch_tensor(as.matrix(plpData[predictRowIds[batch],, drop = F]), dtype = torch::torch_float()) pred <- model(b) prediction$value[batch] <- as.array(pred$to())[,1] } @@ -57,11 +65,6 @@ batchPredict <- function(model, return(prediction) } -updatePredictionMat <- function(predictionMat,prediction){ - predictionMat$value[prediction$rowIds] <- prediction$value -} - - diff --git a/extras/example.R b/extras/example.R index 551465b..ac748c0 100644 --- a/extras/example.R +++ b/extras/example.R @@ -61,7 +61,6 @@ plpDataT <- PatientLevelPrediction::getPlpData(connectionDetails = connectionDet } - population <- PatientLevelPrediction::createStudyPopulation(plpData = plpData, outcomeId = 3, requireTimeAtRisk = F, @@ -82,7 +81,7 @@ x2 <- toSparseMDeep(plpDataT ,population, # code to train models deepset <- setDeepNNTorch(units=list(c(128, 64), 128), layer_dropout=c(0.2), lr =c(1e-4), decay=c(1e-5), outcome_weight = c(1.0), batch_size = c(100), - epochs= c(1), seed=NULL ) + epochs= c(5), seed=NULL ) #debug(fitDeepNNTorch) diff --git a/man/getPlpData.Rd b/man/getPlpData.Rd deleted file mode 100644 index 6bea686..0000000 --- a/man/getPlpData.Rd +++ /dev/null @@ -1,118 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/SaveLoadPlp.R -\name{getPlpData} -\alias{getPlpData} -\title{Get the patient level prediction data from the server} -\usage{ -getPlpData( - connectionDetails, - cdmDatabaseSchema, - oracleTempSchema = cdmDatabaseSchema, - cohortId, - outcomeIds, - studyStartDate = "", - studyEndDate = "", - cohortDatabaseSchema = cdmDatabaseSchema, - cohortTable = "cohort", - outcomeDatabaseSchema = cdmDatabaseSchema, - outcomeTable = "cohort", - cdmVersion = "5", - firstExposureOnly = FALSE, - washoutPeriod = 0, - sampleSize = NULL, - covariateSettings, - excludeDrugsFromCovariates = FALSE -) -} -\arguments{ -\item{connectionDetails}{An R object of type\cr\code{connectionDetails} created using the -function \code{createConnectionDetails} in the -\code{DatabaseConnector} package.} - -\item{cdmDatabaseSchema}{The name of the database schema that contains the OMOP CDM -instance. Requires read permissions to this database. On SQL -Server, this should specifiy both the database and the schema, -so for example 'cdm_instance.dbo'.} - -\item{oracleTempSchema}{For Oracle only: the name of the database schema where you want -all temporary tables to be managed. Requires create/insert -permissions to this database.} - -\item{cohortId}{A unique identifier to define the at risk cohort. CohortId is -used to select the cohort_concept_id in the cohort-like table.} - -\item{outcomeIds}{A list of cohort_definition_ids used to define outcomes (-999 mean no outcome gets downloaded).} - -\item{studyStartDate}{A calendar date specifying the minimum date that a cohort index -date can appear. Date format is 'yyyymmdd'.} - -\item{studyEndDate}{A calendar date specifying the maximum date that a cohort index -date can appear. Date format is 'yyyymmdd'. Important: the study -end data is also used to truncate risk windows, meaning no outcomes -beyond the study end date will be considered.} - -\item{cohortDatabaseSchema}{The name of the database schema that is the location where the -cohort data used to define the at risk cohort is available. -Requires read permissions to this database.} - -\item{cohortTable}{The tablename that contains the at risk cohort. cohortTable has -format of COHORT table: cohort_concept_id, SUBJECT_ID, -COHORT_START_DATE, COHORT_END_DATE.} - -\item{outcomeDatabaseSchema}{The name of the database schema that is the location where -the data used to define the outcome cohorts is available. -Requires read permissions to this database.} - -\item{outcomeTable}{The tablename that contains the outcome cohorts. Expectation is -outcomeTable has format of COHORT table: -COHORT_DEFINITION_ID, SUBJECT_ID, COHORT_START_DATE, -COHORT_END_DATE.} - -\item{cdmVersion}{Define the OMOP CDM version used: currently support "4", "5" and "6".} - -\item{firstExposureOnly}{Should only the first exposure per subject be included? Note that -this is typically done in the \code{createStudyPopulation} function, -but can already be done here for efficiency reasons.} - -\item{washoutPeriod}{The mininum required continuous observation time prior to index -date for a person to be included in the at risk cohort. Note that -this is typically done in the \code{createStudyPopulation} function, -but can already be done here for efficiency reasons.} - -\item{sampleSize}{If not NULL, only this number of people will be sampled from the target population (Default NULL)} - -\item{covariateSettings}{An object of type \code{covariateSettings} as created using the -\code{createCovariateSettings} function in the -\code{FeatureExtraction} package.} - -\item{excludeDrugsFromCovariates}{A redundant option} -} -\value{ -Returns an object of type \code{plpData}, containing information on the cohorts, their -outcomes, and baseline covariates. Information about multiple outcomes can be captured at once for -efficiency reasons. This object is a list with the following components: \describe{ -\item{outcomes}{A data frame listing the outcomes per person, including the time to event, and -the outcome id. Outcomes are not yet filtered based on risk window, since this is done at -a later stage.} \item{cohorts}{A data frame listing the persons in each cohort, listing their -exposure status as well as the time to the end of the observation period and time to the end of the -cohort (usually the end of the exposure era).} \item{covariates}{An ffdf object listing the -baseline covariates per person in the two cohorts. This is done using a sparse representation: -covariates with a value of 0 are omitted to save space.} \item{covariateRef}{An ffdf object describing the covariates that have been extracted.} -\item{metaData}{A list of objects with information on how the cohortMethodData object was -constructed.} } The generic \code{()} and \code{summary()} functions have been implemented for this object. -} -\description{ -This function executes a large set of SQL statements against the database in OMOP CDM format to -extract the data needed to perform the analysis. -} -\details{ -Based on the arguments, the at risk cohort data is retrieved, as well as outcomes -occurring in these subjects. The at risk cohort is identified through -user-defined cohorts in a cohort table either inside the CDM instance or in a separate schema. -Similarly, outcomes are identified -through user-defined cohorts in a cohort table either inside the CDM instance or in a separate -schema. Covariates are automatically extracted from the appropriate tables within the CDM. -If you wish to exclude concepts from covariates you will need to -manually add the concept_ids and descendants to the \code{excludedCovariateConceptIds} of the -\code{covariateSettings} argument. -} diff --git a/man/loadPlpData.Rd b/man/loadPlpData.Rd deleted file mode 100644 index 2240222..0000000 --- a/man/loadPlpData.Rd +++ /dev/null @@ -1,27 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/SaveLoadPlp.R -\name{loadPlpData} -\alias{loadPlpData} -\title{Load the cohort data from a folder} -\usage{ -loadPlpData(file, readOnly = TRUE) -} -\arguments{ -\item{file}{The name of the folder containing the data.} - -\item{readOnly}{If true, the data is opened read only.} -} -\value{ -An object of class plpData. -} -\description{ -\code{loadPlpData} loads an object of type plpData from a folder in the file -system. -} -\details{ -The data will be written to a set of files in the folder specified by the user. -} -\examples{ -# todo - -} diff --git a/man/loadPlpFromCsv.Rd b/man/loadPlpFromCsv.Rd deleted file mode 100644 index 1808d66..0000000 --- a/man/loadPlpFromCsv.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/SaveLoadPlp.R -\name{loadPlpFromCsv} -\alias{loadPlpFromCsv} -\title{Loads parts of the plp result saved as csv files for transparent sharing} -\usage{ -loadPlpFromCsv(dirPath) -} -\arguments{ -\item{dirPath}{The directory with the results as csv files} -} -\description{ -Loads parts of the plp result saved as csv files for transparent sharing -} -\details{ -Load the main results from csv files into a runPlp object -} diff --git a/man/loadPlpModel.Rd b/man/loadPlpModel.Rd deleted file mode 100644 index c3ebe52..0000000 --- a/man/loadPlpModel.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/SaveLoadPlp.R -\name{loadPlpModel} -\alias{loadPlpModel} -\title{loads the plp model} -\usage{ -loadPlpModel(dirPath) -} -\arguments{ -\item{dirPath}{The location of the model} -} -\description{ -loads the plp model -} -\details{ -Loads a plp model that was saved using \code{savePlpModel()} -} diff --git a/man/loadPlpResult.Rd b/man/loadPlpResult.Rd deleted file mode 100644 index 3e6fb69..0000000 --- a/man/loadPlpResult.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/SaveLoadPlp.R -\name{loadPlpResult} -\alias{loadPlpResult} -\title{Loads the evalaution dataframe} -\usage{ -loadPlpResult(dirPath) -} -\arguments{ -\item{dirPath}{The directory where the evaluation was saved} -} -\description{ -Loads the evalaution dataframe -} -\details{ -Loads the evaluation -} diff --git a/man/loadPrediction.Rd b/man/loadPrediction.Rd deleted file mode 100644 index 06e51ca..0000000 --- a/man/loadPrediction.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/SaveLoadPlp.R -\name{loadPrediction} -\alias{loadPrediction} -\title{Loads the prediciton dataframe to csv} -\usage{ -loadPrediction(fileLocation) -} -\arguments{ -\item{fileLocation}{The location with the saved prediction} -} -\description{ -Loads the prediciton dataframe to csv -} -\details{ -Loads the prediciton RDS file -} diff --git a/man/predictAndromeda.Rd b/man/predictAndromeda.Rd deleted file mode 100644 index 0903869..0000000 --- a/man/predictAndromeda.Rd +++ /dev/null @@ -1,36 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/Predict.R -\name{predictAndromeda} -\alias{predictAndromeda} -\title{Generated predictions from a regression model} -\usage{ -predictAndromeda( - coefficients, - population, - covariateData, - modelType = "logistic" -) -} -\arguments{ -\item{coefficients}{A names numeric vector where the names are the covariateIds, except for the -first value which is expected to be the intercept.} - -\item{population}{A data frame containing the population to do the prediction for} - -\item{covariateData}{An andromeda object containing the covariateData with predefined columns -(see below).} - -\item{modelType}{Current supported types are "logistic", "poisson", "cox" or "survival".} -} -\description{ -Generated predictions from a regression model -} -\details{ -These columns are expected in the outcome object: \tabular{lll}{ \verb{rowId} \tab(integer) \tab -Row ID is used to link multiple covariates (x) to a single outcome (y) \cr \verb{time} \tab(real) -\tab For models that use time (e.g. Poisson or Cox regression) this contains time \cr \tab -\tab(e.g. number of days) \cr } These columns are expected in the covariates object: \tabular{lll}{ -\verb{rowId} \tab(integer) \tab Row ID is used to link multiple covariates (x) to a single outcome -(y) \cr \verb{covariateId} \tab(integer) \tab A numeric identifier of a covariate \cr -\verb{covariateValue} \tab(real) \tab The value of the specified covariate \cr } -} diff --git a/man/predictPlp.Rd b/man/predictPlp.Rd deleted file mode 100644 index f1718a2..0000000 --- a/man/predictPlp.Rd +++ /dev/null @@ -1,27 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/Predict.R -\name{predictPlp} -\alias{predictPlp} -\title{predictPlp} -\usage{ -predictPlp(plpModel, population, plpData, index = NULL) -} -\arguments{ -\item{plpModel}{An object of type \code{plpModel} - a patient level prediction model} - -\item{population}{The population created using createStudyPopulation() who will have their risks predicted} - -\item{plpData}{An object of type \code{plpData} - the patient level prediction -data extracted from the CDM.} - -\item{index}{A data frame containing rowId: a vector of rowids and index: a vector of doubles the same length as the rowIds. If used, only the rowIds with a negative index value are used to calculate the prediction.} -} -\value{ -A dataframe containing the prediction for each person in the population with an attribute metaData containing prediction details. -} -\description{ -Predict the risk of the outcome using the input plpModel for the input plpData -} -\details{ -The function applied the trained model on the plpData to make predictions -} diff --git a/man/predictProbabilities.Rd b/man/predictProbabilities.Rd deleted file mode 100644 index affba63..0000000 --- a/man/predictProbabilities.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/Predict.R -\name{predictProbabilities} -\alias{predictProbabilities} -\title{Create predictive probabilities} -\usage{ -predictProbabilities(predictiveModel, population, covariateData) -} -\arguments{ -\item{predictiveModel}{An object of type \code{predictiveModel} as generated using -\code{\link{fitPlp}}.} - -\item{population}{The population to calculate the prediction for} - -\item{covariateData}{The covariateData containing the covariates for the population} -} -\value{ -The value column in the result data.frame is: logistic: probabilities of the outcome, poisson: -Poisson rate (per day) of the outome, survival: hazard rate (per day) of the outcome. -} -\description{ -Create predictive probabilities -} -\details{ -Generates predictions for the population specified in plpData given the model. -} diff --git a/man/predict.deepEstimator.Rd b/man/predict_deepEstimator.Rd similarity index 79% rename from man/predict.deepEstimator.Rd rename to man/predict_deepEstimator.Rd index f5600f8..6246715 100644 --- a/man/predict.deepEstimator.Rd +++ b/man/predict_deepEstimator.Rd @@ -1,7 +1,7 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/Predict.R -\name{predict.deepEstimator} -\alias{predict.deepEstimator} +\name{predict_deepEstimator} +\alias{predict_deepEstimator} \title{predict.deepEstimator} \usage{ predict.deepEstimator(x) diff --git a/man/savePlpData.Rd b/man/savePlpData.Rd deleted file mode 100644 index 2562b0f..0000000 --- a/man/savePlpData.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/SaveLoadPlp.R -\name{savePlpData} -\alias{savePlpData} -\title{Save the cohort data to folder} -\usage{ -savePlpData(plpData, file, envir = NULL, overwrite = F) -} -\arguments{ -\item{plpData}{An object of type \code{plpData} as generated using -\code{getPlpData}.} - -\item{file}{The name of the folder where the data will be written. The folder should -not yet exist.} - -\item{envir}{The environment for to evaluate variables when saving} - -\item{overwrite}{Whether to force overwrite an existing file} -} -\description{ -\code{savePlpData} saves an object of type plpData to folder. -} -\details{ -The data will be written to a set of files in the folder specified by the user. -} -\examples{ -# todo - -} diff --git a/man/savePlpModel.Rd b/man/savePlpModel.Rd deleted file mode 100644 index 3ef435a..0000000 --- a/man/savePlpModel.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/SaveLoadPlp.R -\name{savePlpModel} -\alias{savePlpModel} -\title{Saves the plp model} -\usage{ -savePlpModel(plpModel, dirPath) -} -\arguments{ -\item{plpModel}{A trained classifier returned by running \code{runPlp()$model}} - -\item{dirPath}{A location to save the model to} -} -\description{ -Saves the plp model -} -\details{ -Saves the plp model to a user specificed folder -} diff --git a/man/savePlpResult.Rd b/man/savePlpResult.Rd deleted file mode 100644 index 133bd18..0000000 --- a/man/savePlpResult.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/SaveLoadPlp.R -\name{savePlpResult} -\alias{savePlpResult} -\title{Saves the result from runPlp into the location directory} -\usage{ -savePlpResult(result, dirPath) -} -\arguments{ -\item{result}{The result of running runPlp()} - -\item{dirPath}{The directory to save the csv} -} -\description{ -Saves the result from runPlp into the location directory -} -\details{ -Saves the result from runPlp into the location directory -} diff --git a/man/savePlpToCsv.Rd b/man/savePlpToCsv.Rd deleted file mode 100644 index 018c0aa..0000000 --- a/man/savePlpToCsv.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/SaveLoadPlp.R -\name{savePlpToCsv} -\alias{savePlpToCsv} -\title{Save parts of the plp result as a csv for transparent sharing} -\usage{ -savePlpToCsv(result, dirPath) -} -\arguments{ -\item{result}{An object of class runPlp with development or validation results} - -\item{dirPath}{The directory the save the results as csv files} -} -\description{ -Save parts of the plp result as a csv for transparent sharing -} -\details{ -Saves the main results as a csv (these files can be read by the shiny app) -} diff --git a/man/savePrediction.Rd b/man/savePrediction.Rd deleted file mode 100644 index 2e7b4a2..0000000 --- a/man/savePrediction.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/SaveLoadPlp.R -\name{savePrediction} -\alias{savePrediction} -\title{Saves the prediction dataframe to RDS} -\usage{ -savePrediction(prediction, dirPath, fileName = "prediction.rds") -} -\arguments{ -\item{prediction}{The prediciton data.frame} - -\item{dirPath}{The directory to save the prediction RDS} - -\item{fileName}{The name of the RDS file that will be saved in dirPath} -} -\description{ -Saves the prediction dataframe to RDS -} -\details{ -Saves the prediction data frame returned by predict.R to an RDS file and returns the fileLocation where the prediction is saved -} From f1130cf1f7d023f5412f008a8ff6106de9addf83 Mon Sep 17 00:00:00 2001 From: ted9219 Date: Wed, 13 Oct 2021 01:58:56 +0000 Subject: [PATCH 016/140] fix sparseRTorch and re- roxygenize --- NAMESPACE | 6 ++-- R/Formatting.R | 10 ++---- R/sparseRTorch.R | 2 -- ...pEstimator.Rd => predict.deepEstimator.Rd} | 4 +-- man/predictAndromeda.Rd | 36 +++++++++++++++++++ man/predictPlp.Rd | 27 ++++++++++++++ man/predictProbabilities.Rd | 26 ++++++++++++++ man/toSparseMDeep.Rd | 8 +++-- 8 files changed, 103 insertions(+), 16 deletions(-) rename man/{predict_deepEstimator.Rd => predict.deepEstimator.Rd} (79%) create mode 100644 man/predictAndromeda.Rd create mode 100644 man/predictPlp.Rd create mode 100644 man/predictProbabilities.Rd diff --git a/NAMESPACE b/NAMESPACE index f563447..c9a4bc0 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -2,8 +2,10 @@ export(fitDeepNNTorch) export(fitResNet) -export(predict_deepEstimator) -export(predict_deepNNTorch) +export(predict.deepEstimator) +export(predictAndromeda) +export(predictPlp) +export(predictProbabilities) export(setCIReNN) export(setCNNTorch) export(setCovNN) diff --git a/R/Formatting.R b/R/Formatting.R index d631edd..90515f4 100644 --- a/R/Formatting.R +++ b/R/Formatting.R @@ -1,8 +1,8 @@ -# @file formatting.R +# @file Formatting.R # # Copyright 2020 Observational Health Data Sciences and Informatics # -# This file is part of PatientLevelPrediction +# This file is part of DeepPatientLevelPrediction # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,11 +17,7 @@ # limitations under the License. #' Convert the plpData in COO format into a sparse R matrix -#' -#' @description #' Converts the standard plpData to a sparse matrix -#' -#' @details #' This function converts the covariate file from ffdf in COO format into a sparse matrix from #' the package Matrix #' @param plpData An object of type \code{plpData} with covariate in coo format - the patient level prediction @@ -132,7 +128,7 @@ toSparseMDeep <- function(plpData, } -# restricts to pop and saves/creates mapping + MapCovariates <- function(covariateData,population, mapping=NULL){ # to remove check notes diff --git a/R/sparseRTorch.R b/R/sparseRTorch.R index 3548234..890052f 100644 --- a/R/sparseRTorch.R +++ b/R/sparseRTorch.R @@ -1,5 +1,3 @@ -source('R/Formatting.R') - #' Convert the plpData in COO format into a sparse Torch tensor #' #' @description diff --git a/man/predict_deepEstimator.Rd b/man/predict.deepEstimator.Rd similarity index 79% rename from man/predict_deepEstimator.Rd rename to man/predict.deepEstimator.Rd index 6246715..f5600f8 100644 --- a/man/predict_deepEstimator.Rd +++ b/man/predict.deepEstimator.Rd @@ -1,7 +1,7 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/Predict.R -\name{predict_deepEstimator} -\alias{predict_deepEstimator} +\name{predict.deepEstimator} +\alias{predict.deepEstimator} \title{predict.deepEstimator} \usage{ predict.deepEstimator(x) diff --git a/man/predictAndromeda.Rd b/man/predictAndromeda.Rd new file mode 100644 index 0000000..0903869 --- /dev/null +++ b/man/predictAndromeda.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/Predict.R +\name{predictAndromeda} +\alias{predictAndromeda} +\title{Generated predictions from a regression model} +\usage{ +predictAndromeda( + coefficients, + population, + covariateData, + modelType = "logistic" +) +} +\arguments{ +\item{coefficients}{A names numeric vector where the names are the covariateIds, except for the +first value which is expected to be the intercept.} + +\item{population}{A data frame containing the population to do the prediction for} + +\item{covariateData}{An andromeda object containing the covariateData with predefined columns +(see below).} + +\item{modelType}{Current supported types are "logistic", "poisson", "cox" or "survival".} +} +\description{ +Generated predictions from a regression model +} +\details{ +These columns are expected in the outcome object: \tabular{lll}{ \verb{rowId} \tab(integer) \tab +Row ID is used to link multiple covariates (x) to a single outcome (y) \cr \verb{time} \tab(real) +\tab For models that use time (e.g. Poisson or Cox regression) this contains time \cr \tab +\tab(e.g. number of days) \cr } These columns are expected in the covariates object: \tabular{lll}{ +\verb{rowId} \tab(integer) \tab Row ID is used to link multiple covariates (x) to a single outcome +(y) \cr \verb{covariateId} \tab(integer) \tab A numeric identifier of a covariate \cr +\verb{covariateValue} \tab(real) \tab The value of the specified covariate \cr } +} diff --git a/man/predictPlp.Rd b/man/predictPlp.Rd new file mode 100644 index 0000000..f1718a2 --- /dev/null +++ b/man/predictPlp.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/Predict.R +\name{predictPlp} +\alias{predictPlp} +\title{predictPlp} +\usage{ +predictPlp(plpModel, population, plpData, index = NULL) +} +\arguments{ +\item{plpModel}{An object of type \code{plpModel} - a patient level prediction model} + +\item{population}{The population created using createStudyPopulation() who will have their risks predicted} + +\item{plpData}{An object of type \code{plpData} - the patient level prediction +data extracted from the CDM.} + +\item{index}{A data frame containing rowId: a vector of rowids and index: a vector of doubles the same length as the rowIds. If used, only the rowIds with a negative index value are used to calculate the prediction.} +} +\value{ +A dataframe containing the prediction for each person in the population with an attribute metaData containing prediction details. +} +\description{ +Predict the risk of the outcome using the input plpModel for the input plpData +} +\details{ +The function applied the trained model on the plpData to make predictions +} diff --git a/man/predictProbabilities.Rd b/man/predictProbabilities.Rd new file mode 100644 index 0000000..affba63 --- /dev/null +++ b/man/predictProbabilities.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/Predict.R +\name{predictProbabilities} +\alias{predictProbabilities} +\title{Create predictive probabilities} +\usage{ +predictProbabilities(predictiveModel, population, covariateData) +} +\arguments{ +\item{predictiveModel}{An object of type \code{predictiveModel} as generated using +\code{\link{fitPlp}}.} + +\item{population}{The population to calculate the prediction for} + +\item{covariateData}{The covariateData containing the covariates for the population} +} +\value{ +The value column in the result data.frame is: logistic: probabilities of the outcome, poisson: +Poisson rate (per day) of the outome, survival: hazard rate (per day) of the outcome. +} +\description{ +Create predictive probabilities +} +\details{ +Generates predictions for the population specified in plpData given the model. +} diff --git a/man/toSparseMDeep.Rd b/man/toSparseMDeep.Rd index 4af9719..f6ef1e0 100644 --- a/man/toSparseMDeep.Rd +++ b/man/toSparseMDeep.Rd @@ -2,7 +2,10 @@ % Please edit documentation in R/Formatting.R \name{toSparseMDeep} \alias{toSparseMDeep} -\title{Convert the plpData in COO format into a sparse R matrix} +\title{Convert the plpData in COO format into a sparse R matrix +Converts the standard plpData to a sparse matrix +This function converts the covariate file from ffdf in COO format into a sparse matrix from +the package Matrix} \usage{ toSparseMDeep(plpData, population, map = NULL, temporal = F) } @@ -26,9 +29,8 @@ This object is a list with the following components: \describe{ } } \description{ +Convert the plpData in COO format into a sparse R matrix Converts the standard plpData to a sparse matrix -} -\details{ This function converts the covariate file from ffdf in COO format into a sparse matrix from the package Matrix } From 41c8f85e63c10491cf912272264a1c95070dec97 Mon Sep 17 00:00:00 2001 From: ted9219 Date: Mon, 18 Oct 2021 09:40:54 +0000 Subject: [PATCH 017/140] sync with 14_buildable --- DESCRIPTION | 2 +- NAMESPACE | 4 ++++ R/Formatting.R | 4 ++-- man/predict.deepEstimator.Rd | 11 +++++++++++ 4 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 man/predict.deepEstimator.Rd diff --git a/DESCRIPTION b/DESCRIPTION index 2fdbaf0..d0f5e70 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -6,7 +6,7 @@ Date: 2021-06-07 Authors@R: c( person("Jenna", "Reps", email = "jreps@its.jnj.com", role = c("aut", "cre")), person("Seng", "Chan You", role = c("aut")), - person("Egill", "Fridgeirsson", role = c("aut")) + person("Egill", "Fridgeirsson", role = c("aut")), person("Chungsoo", "Kim", role = c("aut")) ) Maintainer: Jenna Reps diff --git a/NAMESPACE b/NAMESPACE index 062102d..7e09f9e 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -4,12 +4,14 @@ S3method(print,plpData) S3method(print,summary.plpData) S3method(summary,plpData) export(fitDeepNNTorch) +export(fitResNet) export(getPlpData) export(loadPlpData) export(loadPlpFromCsv) export(loadPlpModel) export(loadPlpResult) export(loadPrediction) +export(predict.deepEstimator) export(predictAndromeda) export(predictPlp) export(predictProbabilities) @@ -25,7 +27,9 @@ export(setCovNN2) export(setDeepNN) export(setDeepNNTorch) export(setRNNTorch) +export(setResNet) export(toSparseMDeep) export(toSparseRTorch) +export(trainResNet) export(transferLearning) importFrom(zeallot,"%<-%") diff --git a/R/Formatting.R b/R/Formatting.R index d631edd..a54bcf5 100644 --- a/R/Formatting.R +++ b/R/Formatting.R @@ -1,8 +1,8 @@ -# @file formatting.R +# @file Formatting.R # # Copyright 2020 Observational Health Data Sciences and Informatics # -# This file is part of PatientLevelPrediction +# This file is part of DeepPatientLevelPrediction # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/man/predict.deepEstimator.Rd b/man/predict.deepEstimator.Rd new file mode 100644 index 0000000..f5600f8 --- /dev/null +++ b/man/predict.deepEstimator.Rd @@ -0,0 +1,11 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/Predict.R +\name{predict.deepEstimator} +\alias{predict.deepEstimator} +\title{predict.deepEstimator} +\usage{ +predict.deepEstimator(x) +} +\description{ +prediction function for models using estimator class +} From 809c32fe6dbfa655273e2e50ca4b28fa7ddcec13 Mon Sep 17 00:00:00 2001 From: ted9219 Date: Mon, 18 Oct 2021 09:47:45 +0000 Subject: [PATCH 018/140] sync with 14_buildable, with missed commit --- R/sparseRTorch.R | 2 -- 1 file changed, 2 deletions(-) diff --git a/R/sparseRTorch.R b/R/sparseRTorch.R index 3548234..890052f 100644 --- a/R/sparseRTorch.R +++ b/R/sparseRTorch.R @@ -1,5 +1,3 @@ -source('R/Formatting.R') - #' Convert the plpData in COO format into a sparse Torch tensor #' #' @description From d2723302d125d46bc4554ee32b500ec4d90b36ca Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Thu, 11 Nov 2021 09:26:11 +0100 Subject: [PATCH 019/140] Refactored Estimator and Dataset out of ResNet file and fixed some bugs so resnet runs now --- R/Dataset.R | 26 ++ R/Estimator.R | 233 ++++++++++ R/Predict.R | 23 +- R/ResNet.R | 404 +++++------------- ...pEstimator.Rd => predict_deepEstimator.Rd} | 8 +- 5 files changed, 379 insertions(+), 315 deletions(-) create mode 100644 R/Dataset.R create mode 100644 R/Estimator.R rename man/{predict.deepEstimator.Rd => predict_deepEstimator.Rd} (59%) diff --git a/R/Dataset.R b/R/Dataset.R new file mode 100644 index 0000000..21445de --- /dev/null +++ b/R/Dataset.R @@ -0,0 +1,26 @@ +Dataset <- torch::dataset( + name = 'Dataset', + + initialize=function(data, labels, indices, numericalIndex) { + + # add labels + self$target <- torch::torch_tensor(labels[indices]) + + # add features + #print(dim(as.matrix(data[indices,]))) ## testing + # TODO should be torch sparse COO matrix + self$cat <- torch::torch_tensor(as.matrix(data[indices,-numericalIndex, drop = F]), dtype=torch::torch_float32()) + self$num <- torch::torch_tensor(as.matrix(data[indices,numericalIndex, drop = F]), dtype=torch::torch_float32()) + + }, + + .getitem = function(item) { + return(list(cat = self$cat[item,], + num = self$num[item,], + target = self$target[item])) + }, + + .length = function() { + self$target$size()[[1]] # shape[1] + } +) \ No newline at end of file diff --git a/R/Estimator.R b/R/Estimator.R new file mode 100644 index 0000000..1148f1f --- /dev/null +++ b/R/Estimator.R @@ -0,0 +1,233 @@ +Estimator <- R6::R6Class('Estimator', + public = list( + device = NULL, + mode = NULL, + modelParameters = NULL, + epochs = NULL, + learningRate = NULL, + l2Norm = NULL, + batchSize = NULL, + resultsDir = NULL, + prefix = NULL, + previousEpochs = NULL, + optimizer = NULL, + criterion = NULL, + bestScore = NULL, + bestEpoch = NULL, + model = NULL, + initialize = function(baseModel, modelParameters, fitParameters, + optimizer=torch::optim_adam, + criterion=torch::nn_bce_with_logits_loss, + device='cpu'){ + self$device <- device + self$model <- do.call(baseModel, modelParameters) + self$modelParameters <- modelParameters + + self$epochs <- self$itemOrDefaults(fitParameters, 'epochs', 10) + self$learningRate <- self$itemOrDefaults(fitParameters,'learningRate', 1e-3) + self$l2Norm <- self$itemOrDefaults(fitParameters, 'weightDecay', 1e-5) + self$batchSize <- self$itemOrDefaults(fitParameters, 'batchSize', 1024) + + # don´t save checkpoints unless you get a resultDir + self$resultsDir <- self$itemOrDefaults(fitParameters, 'resultsDir', NULL) + if (!is.null(self$resultsDir)) { + dir.create(self$resultsDir, recursive=TRUE, showWarnings=FALSE) + } + self$prefix <- self$itemOrDefaults(fitParameters, 'prefix', self$model$name) + + self$previousEpochs <- self$itemOrDefaults(fitParameters, 'previousEpochs', 0) + + self$optimizer <- optimizer(params=self$model$parameters, + lr=self$learningRate, + weight_decay=self$l2Norm) + self$criterion <- criterion() + self$model$to(device=self$device) + + self$bestScore <- NULL + self$bestEpoch <- NULL + }, + + # fits the estimator + fit = function(dataset, testDataset) { + valLosses <- c() + valAUCs <- c() + + dataloader <- torch::dataloader(dataset, + batch_size=self$batchSize, + shuffle=T) + testDataloader <- torch::dataloader(testDataset, + batch_size=self$batchSize, + shuffle=F) + + modelStateDict <- list() + epoch <- list() + + lr <- c() + for (epochI in 1:self$epochs) { + + # fit the model + self$fitEpoch(dataloader) + + # predict on test data + scores <- self$score(testDataloader) + + currentEpoch <- epochI + self$previousEpochs + lr <- c(lr, self$optimizer$param_groups[[1]]$lr) + ParallelLogger::logInfo('Epochs: ', currentEpoch, ' | Val AUC: ', + round(scores$auc,3), ' | Val Loss: ', + round(scores$loss,3), ' | LR: ', + self$optimizer$param_groups[[1]]$lr) + valLosses <- c(valLosses, scores$loss) + valAUCs <- c(valAUCs, scores$auc) + + # here it saves the results to lists rather than files + modelStateDict[[epochI]] <- self$model$state_dict() + epoch[[epochI]] <- currentEpoch + + } + + + #extract best epoch from the saved checkpoints + bestEpochInd <- which.min(valAUCs) # change this if a different metric is used + + bestModelStateDict <- modelStateDict[[bestEpochInd]] + self$model$load_state_dict(bestModelStateDict) + + bestEpoch <- epoch[[bestEpochInd]] + self$bestEpoch <- bestEpoch + self$bestScore <- list(loss= valLosses[bestEpochInd], auc=valAUCs[bestEpochInd]) + + ParallelLogger::logInfo(paste0('Loaded best model (based on loss) from epoch ', bestEpoch)) + ParallelLogger::logInfo(paste0('ValLoss: ', self$bestScore$loss)) + ParallelLogger::logInfo(paste0('valAUC: ', self$bestScore$auc)) + + invisible(self) + }, + + # Fits whole training set on a specific number of epochs + # TODO What happens when learning rate changes per epochs? + # Ideally I would copy the learning rate strategy from before + # and adjust for different sizes ie more iterations/updates??? + fitWholeTrainingSet = function(dataset) { + dataloader <- torch::dataloader(dataset, + batch_size=self$batchSize, + shuffle=TRUE, + drop_last=FALSE) + for (epoch in 1:self$epochs) { + self$fitEpoch(dataloader) + } + torch::torch_save(list(modelStateDict=self$model$state_dict(), + modelParameters=self$modelParameters, + fitParameters=self$fitParameters, + epoch=self$epochs), + file.path(self$resultsDir, paste0( + self$prefix, '_epochs:', self$epochs) + )) + + }, + + # trains for one epoch + fitEpoch = function(dataloader){ + t = Sys.time() + batch_loss = 0 + i=1 + + self$model$train() + + coro::loop(for (b in dataloader) { + cat = b[[1]]$to(device=self$device) + num = b[[2]]$to(device=self$device) + target = b[[3]]$to(device=self$device) + out = self$model(num, cat) + + loss = self$criterion(out, target) + + batch_loss = batch_loss + loss + if (i %% 10 == 0) { + elapsed_time <- Sys.time() - t + ParallelLogger::logInfo('Loss: ', round((batch_loss/1)$item(), 3), ' | Time: ', + round(elapsed_time,digits = 2), units(elapsed_time)) + t = Sys.time() + batch_loss = 0 + } + + loss$backward() + self$optimizer$step() + self$optimizer$zero_grad() + i = i + 1 + }) + + }, + + # calculates loss and auc after training for one epoch + score = function(dataloader){ + torch::with_no_grad({ + loss = c() + predictions = c() + targets = c() + self$model$eval() + coro::loop(for (b in dataloader) { + b <- self$batchToDevice(b) + cat <- b$cat + num <- b$num + target <- b$target + + pred <- self$model(num, cat) + predictions <- c(predictions, as.array(pred$cpu())) + targets <- c(targets, as.array(target$cpu())) + loss <- c(loss, self$criterion(pred, target)$item()) + }) + mean_loss <- mean(loss) + predictionsClass <- data.frame(value=predictions, outcomeCount=targets) + attr(predictionsClass, 'metaData')$predictionType <-'binary' + auc <- computeAuc(predictionsClass) + }) + return(list(loss=mean_loss, auc=auc)) + }, + + # predicts and outputs the probabilities + predictProba = function(dataset) { + dataloader <- torch::dataloader(dataset, + batch_size = self$batchSize, + shuffle=F) + torch::with_no_grad({ + predictions <- c() + self$model$eval() + coro::loop(for (b in dataloader){ + b <- self$batchToDevice(b) + cat <- b$cat + num <- b$num + target <- b$target + pred <- self$model(num,cat) + predictions <- c(predictions, as.array(torch::torch_sigmoid(pred$cpu()))) + }) + }) + return(predictions) + }, + + + # predicts and outputs the class + predict = function(dataset){ + predictions <- self$predict_proba(dataset) + predicted_class <- torch::torch_argmax(torch::torch_unsqueeze(torch::torch_tensor(predictions), dim=2),dim=2) + return(predicted_class) + }, + + # sends a batch of data to device + ## TODO make agnostic of the form of batch + batchToDevice = function(batch) { + cat <- batch[[1]]$to(device=self$device) + num <- batch[[2]]$to(device=self$device) + target <- batch[[3]]$to(device=self$device) + + result <- list(cat=cat, num=num, target=target) + return(result) + }, + + # select item from list, and if it's null sets a default + itemOrDefaults = function (list, item, default = NULL) { + value <- list[[item]] + if (is.null(value)) default else value + } + ) +) \ No newline at end of file diff --git a/R/Predict.R b/R/Predict.R index c822c51..ec98f60 100644 --- a/R/Predict.R +++ b/R/Predict.R @@ -237,21 +237,24 @@ predict.knn <- function(plpData, population, plpModel, ...){ return(prediction) } -#' predict.deepEstimator +#' predict_deepEstimator +#' +#' #' @description prediction function for models using estimator class -#' @usage predict.deepEstimator(x) -#' @export predict.deepEstimator -predict.deepEstimator <- function(plpModel, population, plpData, ...) { - - sparseMatrix <- toSparseM(plpData, population) +#' +#' @param plpModel The plpModel object +#' @param population Population dataframe +#' @param plpData plpData object +#' @usage predict_deepEstimator(plpModel, population, plpData) +#' @export predict_deepEstimator +predict_deepEstimator <- function(plpModel, population, plpData, ...) { + + sparseMatrix <- toSparseMDeep(plpData, population) indices <- population$rowId numericalIndex <- sparseMatrix$map$newCovariateId[sparseMatrix$map$oldCovariateId==1002] dataset <- Dataset(sparseMatrix$data, population$outcomeCount,indices=indices, numericalIndex = numericalIndex) - dataloader <- torch::dataloader(dataset, - batch_size=plpModel$modelSettings$modelParameters$batch_size, - shuffle=FALSE, drop_last = FALSE) - prediction <- plpModel$model$predictProba(dataloader) + prediction <- plpModel$model$predictProba(dataset) prediction <- population %>% mutate(value=prediction) return(prediction) } diff --git a/R/ResNet.R b/R/ResNet.R index baebf13..84bcb64 100644 --- a/R/ResNet.R +++ b/R/ResNet.R @@ -1,26 +1,72 @@ +# @file ResNet.R +# +# Copyright 2020 Observational Health Data Sciences and Informatics +# +# This file is part of PatientLevelPrediction +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#' setResNet +#' +#' @description +#' Creates settings for a ResNet model +#' +#' @details +#' Model architecture from by https://arxiv.org/abs/2106.11959 +#' +#' +#' @param numLayers Number of layers in network, default: 1:16 +#' @param sizeHidden Amount of neurons in each default layer, default: 2^(6:10) (64 to 1024) +#' @param hiddenFactor How much to grow the amount of neurons in each ResLayer, default: 1:4 +#' @param residualDropout How much dropout to apply after last linear layer in ResLayer, default: seq(0, 0.3, 0.05) +#' @param hiddenDropout How much dropout to apply after first linear layer in ResLayer, default: seq(0, 0.3, 0.05) +#' @param normalization Which type of normalization to use. Default: 'Batchnorm' +#' @param activation What kind of activation to use. Default: 'RelU' +#' @param sizeEmbedding Size of embedding layer, default: 2^(6:9) (64 to 512) +#' @param weightDecay Weight decay to apply, default: c(1e-6, 1e-3) +#' @param learningRate Learning rate to use. default: c(1e-2, 1e-5) +#' @param seed Seed to use for sampling hyperparameter space +#' @param hyperParamSearch Which kind of hyperparameter search to use random sampling or exhaustive grid search. default: 'random' +#' @param randomSample How many random samples from hyperparameter space to use +#' @param device Which device to run analysis on, either 'cpu' or 'cuda', default: 'cpu' +#' @param batch_size Size of batch, default: 1024 +#' @param epochs Number of epochs to run, default: 10 +#' #' @export setResNet <- function(numLayers=1:16, sizeHidden=2^(6:10), hiddenFactor=1:4, residualDropout=seq(0,0.3,0.05), hiddenDropout=seq(0,0.3,0.05), normalization='BatchNorm', activation='RelU', sizeEmbedding=2^(6:9), weightDecay=c(1e-6, 1e-3), - learningRate=c(1e-2,1e-5), seed=42, hyperParamSearch='random', - randomSample=100, device='cpu', batch_size=1024, epochs=10) { + learningRate=c(1e-2,1e-5), seed=NULL, hyperParamSearch='random', + randomSample=100, device='cpu', batchSize=1024, epochs=10) { if (!is.null(seed)) { seed <- as.integer(sample(1e5, 1)) } - + + set.seed(seed) + param <- expand.grid(numLayers=numLayers, sizeHidden=sizeHidden, - hiddenFactor=hiddenFactor, - residualDropout=residualDropout, - hiddenDropout=hiddenDropout, - sizeEmbedding=sizeEmbedding, weightDecay=weightDecay, - learningRate=learningRate) + hiddenFactor=hiddenFactor, + residualDropout=residualDropout, + hiddenDropout=hiddenDropout, + sizeEmbedding=sizeEmbedding, weightDecay=weightDecay, + learningRate=learningRate) if (hyperParamSearch=='random'){ param <- param[sample(nrow(param), randomSample),] } param$device <- device - param$batch_size <- batch_size + param$batch_size <- batchSize param$epochs <- epochs results <- list(model='fitResNet', param=param, name='ResNet') @@ -30,10 +76,18 @@ setResNet <- function(numLayers=1:16, sizeHidden=2^(6:10), hiddenFactor=1:4, return(results) } - +#' @description +#' fits a ResNet model to data +#' +#' @param population the study population dataframe +#' @param plpData plp data object +#' @param param parameters to use for model +#' @param outcomeId Id of the outcome +#' @param cohortId Id of the cohort +#' @param ... +#' #' @export -fitResNet <- function(population, plpData, param, - quiet=F, outcomeId, cohortId, ...) { +fitResNet <- function(population, plpData, param, outcomeId, cohortId, ...) { start <- Sys.time() #sparseMatrix <- toSparseM(plpData, population) @@ -41,42 +95,34 @@ fitResNet <- function(population, plpData, param, map=NULL, temporal=F) - # TODO where to save results? - outLoc <- tempfile(pattern = 'resNet') - dir.create(outLoc) - #do cross validation to find hyperParameters hyperParamSel <- list() for (i in 1:nrow(param)) { - outLocHP <- file.path(outLoc, paste0('Iteration_', i)) hyperParamSel[[i]] <- do.call(trainResNet, listAppend(param[i,], list(sparseMatrix =sparseMatrix, population = population, - train=TRUE, - modelOutput=outLocHP, - quiet = quiet))) + train=TRUE))) } hyperSummary <-as.data.frame(cbind(do.call(rbind, lapply(hyperParamSel, function(x) x$hyperSum)))) hyperSummary$auc <- unlist(lapply(hyperParamSel, function(x) x$auc)) scores <- unlist(lapply(hyperParamSel, function(x) x$auc)) - # now train the final model and return coef + # now train the final model bestInd <- which.max(abs(unlist(scores)-0.5))[1] + param.best <- param[bestInd,] uniqueEpochs <- unique(hyperSummary$bestEpochs[[bestInd]]) - param$epochs <- uniqueEpochs[which.max(tabulate(match(hyperSummary$bestEpochs[[bestInd]], uniqueEpochs)))] - outLoc <- file.path(outLoc, paste0('whole_training_set')) - finalModel <- do.call(trainResNet, listAppend(param[bestInd,], - list(sparseMatrix = sparseMatrix, - population = population, - train=FALSE, - modelOutput=outLoc))) - covariateRef <- as.data.frame(plpData$covariateData$covariateRef) - incs <- rep(1, nrow(covariateRef)) - covariateRef$included <- incs - covariateRef$covariateValue <- rep(0, nrow(covariateRef)) + param.best$epochs <- uniqueEpochs[which.max(tabulate(match(hyperSummary$bestEpochs[[bestInd]], uniqueEpochs)))] + outLoc <- tempfile(pattern = 'resNet') + outLoc <- file.path(outLoc, paste0('finalModel')) + param.best$resultsDir <- outLoc + dir.create(outLoc, recursive = TRUE) - modelTrained <- file.path(outLoc) - param.best <- param[bestInd,] + + + finalModel <- do.call(trainResNet, listAppend(param.best, list(sparseMatrix = sparseMatrix, + population = population, + train=FALSE))) + modelTrained <- file.path(outLoc, dir(outLoc)) comp <- Sys.time() - start # return model location @@ -88,7 +134,7 @@ fitResNet <- function(population, plpData, param, populationSettings = attr(population, 'metaData'), outcomeId=outcomeId, cohortId=cohortId, - varImp = covariateRef, + varImp = NULL, trainingTime =comp, covariateMap=sparseMatrix$map, # I think this is need for new data to map the same? predictionTrain = finalModel$prediction @@ -99,6 +145,12 @@ fitResNet <- function(population, plpData, param, return(result) } +#' @param sparseMatrix +#' +#' @param population +#' @param ... +#' @param train +#' #' @export trainResNet <- function(sparseMatrix, population,...,train=T) { @@ -108,7 +160,7 @@ trainResNet <- function(sparseMatrix, population,...,train=T) { "residualDropout", "hiddenDropout", "sizeEmbedding") modelParam <- param[modelParamNames] - fitParamNames <- c("weightDecay", "learningRate", "epochs") + fitParamNames <- c("weightDecay", "learningRate", "epochs", "batchSize") fitParams <- param[fitParamNames] n_features <- ncol(sparseMatrix$data) @@ -123,17 +175,11 @@ trainResNet <- function(sparseMatrix, population,...,train=T) { foldAuc <- c() foldEpochs <- c() for(index in 1:length(index_vect)){ - fitParams$resultsDir <- file.path(param$modelOutput, paste0('fold_', index)) - - if(!dir.exists(file.path(param$modelOutput, paste0('fold_', index)))){ - dir.create(file.path(param$modelOutput, paste0('fold_', index)), recursive = T) - } - ParallelLogger::logInfo(paste('Fold ',index, ' -- with ', sum(population$indexes!=index & population$indexes > 0),'train rows')) - estimator <- Estimator(baseModel=ResNet, - modelParameters=modelParam, - fitParameters=fitParams, - device=param$device) + estimator <- Estimator$new(baseModel=ResNet, + modelParameters=modelParam, + fitParameters=fitParams, + device=param$device) testIndices <- population$rowId[population$indexes==index] trainIndices <- population$rowId[(population$indexes!=index) & (population$indexes > 0)] trainDataset <- Dataset(sparseMatrix$data[population$rowId,], @@ -144,15 +190,8 @@ trainResNet <- function(sparseMatrix, population,...,train=T) { population$outcomeCount, indices = population$rowId%in%testIndices, numericalIndex = numericalIndex) - trainDataloader <- torch::dataloader(trainDataset, - batch_size=param$batch_size, - shuffle=T, - drop_last=TRUE) - testDataloader <- torch::dataloader(testDataset, - batch_size=param$batch_size, - shuffle=F) - - score <- estimator$fit(trainDataloader, testDataloader)$score(testDataloader) + estimator$fit(trainDataset, testDataset) + score <- estimator$bestScore bestEpoch <- estimator$bestEpoch auc <- score$auc foldAuc <- c(foldAuc, auc) @@ -164,11 +203,11 @@ trainResNet <- function(sparseMatrix, population,...,train=T) { } else { ParallelLogger::logInfo('Training deep neural network using Torch on whole training set') - fitParams$resultsDir <- param$modelOutput - estimator <- Estimator(baseModel = ResNet, - modelParameters = modelParam, - fitParameters = fitParams, - device=param$device) + fitParams$resultsDir <- param$resultsDir + estimator <- Estimator$new(baseModel = ResNet, + modelParameters = modelParam, + fitParameters = fitParams, + device=param$device) trainIndices <- population$rowId[population$indexes > 0] @@ -176,19 +215,12 @@ trainResNet <- function(sparseMatrix, population,...,train=T) { population$outcomeCount, indices=population$rowId%in%trainIndices, numericalIndex=numericalIndex) - trainDataloader <- torch::dataloader(trainDataset, - batch_size=param$batch_size, - shuffle=T, - drop_last=TRUE) - estimator$fitWholeTrainingSet(trainDataloader, - param$epochs) + + estimator$fitWholeTrainingSet(trainDataset) + # get predictions - dataloader <- torch::dataloader(trainDataset, - batch_size = param$batch_size, - shuffle=F, - drop_last=FALSE) prediction <- population[population$rowId%in%trainIndices, ] - prediction$value <- estimator$predictProba(dataloader) + prediction$value <- estimator$predictProba(trainDataset) #predictionsClass <- data.frame(value=predictions$value, # outcomeCount=as.array(trainDataset$labels)) @@ -288,236 +320,6 @@ ResNet <- torch::nn_module( } ) -Estimator <- torch::nn_module( - name = 'Estimator', - initialize = function(baseModel, modelParameters, fitParameters, - optimizer=torch::optim_adam, - criterion=torch::nn_bce_with_logits_loss, - device='cpu'){ - self$device <- device - self$model <- do.call(baseModel, modelParameters) - self$modelParameters <- modelParameters - - self$epochs <- self$itemOrDefaults(fitParameters, 'epochs', 10) - self$learningRate <- self$itemOrDefaults(fitParameters,'learningRate', 1e-3) - self$l2Norm <- self$itemOrDefaults(fitParameters, 'weightDecay', 1e-5) - - self$resultsDir <- self$itemOrDefaults(fitParameters, 'resultsDir', './results') - dir.create(self$resultsDir, recursive=TRUE, showWarnings=FALSE) - self$prefix <- self$itemOrDefaults(fitParameters, 'prefix', 'resnet') - - self$previousEpochs <- self$itemOrDefaults(fitParameters, 'previousEpochs', 0) - - self$optimizer <- optimizer(params=self$model$parameters, - lr=self$learningRate, - weight_decay=self$l2Norm) - self$criterion <- criterion() - self$model$to(device=self$device) - }, - - # fits the estimator - fit = function(dataloader, testDataloader) { - valLosses <- c() - valAUCs <- c() - - modelStateDict <- list() - modelHyperparameters <- list() - epoch <- list() - - lr <- c() - for (epochI in 1:self$epochs) { - - # fit the model - self$fitEpoch(dataloader) - - print(self$model$state_dict()$first_layer.weight[1,1:10]) # viewing - - # predict on test data - scores <- self$score(testDataloader) - - currentEpoch <- epochI + self$previousEpochs - lr <- c(lr, self$optimizer$param_groups[[1]]$lr) - ParallelLogger::logInfo('Epochs: ', currentEpoch, ' | Val AUC: ', - round(scores$auc,3), ' | Val Loss: ', - round(scores$loss,3), ' | LR: ', - self$optimizer$param_groups[[1]]$lr) - valLosses <- c(valLosses, scores$loss) - valAUCs <- c(valAUCs, scores$auc) - - # here it saves the results to lists rather than files - modelStateDict[[epochI]] <- self$model$state_dict() - modelHyperparameters[[epochI]] <- self$modelParameters - epoch[[epochI]] <- currentEpoch - - } - - - #extract best epoch from the saved checkpoints - bestEpochInd <- which.min(valLosses) # change this if a different metric is used - - bestModelStateDict <- modelStateDict[[bestEpochInd]] - self$model$load_state_dict(bestModelStateDict) - - bestEpoch <- epoch[[bestEpochInd]] - ParallelLogger::logInfo(paste0('Loaded best model (based on loss) from epoch ', bestEpoch)) - ParallelLogger::logInfo(paste0('ValLoss: ', valLosses[bestEpochInd])) - ParallelLogger::logInfo(paste0('valAUC: ', valAUCs[bestEpochInd])) - self$bestEpoch <- bestEpoch - - invisible(self) - }, - - # Fits whole training set on a specific number of epochs - # TODO What happens when learning rate changes per epochs? - # Ideally I would copy the learning rate strategy from before - fitWholeTrainingSet = function(dataloader, epochs) { - for (epoch in 1:epochs) { - self$fitEpoch(dataloader) - } - - }, - - # trains for one epoch - fitEpoch = function(dataloader){ - t = Sys.time() - batch_loss = 0 - i=1 - - self$model$train() - - print('testing') - - coro::loop(for (b in dataloader) { - cat = b[[1]]$to(device=self$device) - num = b[[2]]$to(device=self$device) - target = b[[3]]$to(device=self$device) - out = self$model(num, cat) - - loss = self$criterion(out, target) - - batch_loss = batch_loss + loss - if (i %% 10 == 0) { - elapsed_time <- Sys.time() - t - ParallelLogger::logInfo('Loss: ', round((batch_loss/1)$item(), 3), ' | Time: ', - round(elapsed_time,digits = 2), units(elapsed_time)) - t = Sys.time() - batch_loss = 0 - } - - loss$backward() - self$optimizer$step() - self$optimizer$zero_grad() - i = i + 1 - }) - - }, - - # calculates loss and auc after training for one epoch - score = function(dataloader){ - torch::with_no_grad({ - loss = c() - predictions = c() - targets = c() - self$model$eval() - coro::loop(for (b in dataloader) { - b <- self$batchToDevice(b) - cat <- b$cat - num <- b$num - target <- b$target - - pred <- self$model(num, cat) - predictions <- c(predictions, as.array(pred$cpu())) - targets <- c(targets, as.array(target$cpu())) - loss <- c(loss, self$criterion(pred, target)$item()) - }) - mean_loss <- mean(loss) - predictionsClass <- data.frame(value=predictions, outcomeCount=targets) - attr(predictionsClass, 'metaData')$predictionType <-'binary' - auc <- computeAuc(predictionsClass) - }) - return(list(loss=mean_loss, auc=auc)) - }, - - # predicts and outputs the probabilities - predictProba = function(dataloader) { - torch::with_no_grad({ - predictions <- c() - self$model$eval() - coro::loop(for (b in dataloader){ - b <- self$batchToDevice(b) - cat <- b$cat - num <- b$num - target <- b$target - pred <- self$model(num,cat) - predictions <- c(predictions, as.array(torch::torch_sigmoid(pred$cpu()))) - }) - }) - return(predictions) - }, - - - # predicts and outputs the class - predict = function(dataloader){ - predictions <- self$predict_proba(dataloader) - predicted_class <- torch::torch_argmax(torch::torch_unsqueeze(torch::torch_tensor(predictions), dim=2),dim=2) - return(predicted_class) - }, - - load_best_weight = function(){ - best_model_file <- self$extract_best_model(self$resultsDor) - best_model <- torch::torch_load(best_model_file) - state_dict <- best_model$model_state_dict - epoch <- best_model$epoch - self$model$load_state_dict(state_dict) - ParallelLogger::logInfo(paste('Loaded best model from epoch: ', epoch)) - }, - - - # sends a batch of data to device - ## TODO make agnostic of the form of batch - batchToDevice = function(batch) { - cat <- batch[[1]]$to(device=self$device) - num <- batch[[2]]$to(device=self$device) - target <- batch[[3]]$to(device=self$device) - - result <- list(cat=cat, num=num, target=target) - return(result) - }, - - # select item from list, and if it's null sets a default - itemOrDefaults = function (list, item, default = NULL) { - value = list[[item]] - if (is.null(value)) default else value - }, - -) - -Dataset <- torch::dataset( - name = 'Dataset', - - initialize=function(data, labels, indices, numericalIndex) { - - # add labels - self$target <- torch::torch_tensor(labels[indices]) - - # add features - #print(dim(as.matrix(data[indices,]))) ## testing - self$cat <- torch::torch_tensor(as.matrix(data[indices,-numericalIndex, drop = F]), dtype=torch::torch_float32()) - self$num <- torch::torch_tensor(as.matrix(data[indices,numericalIndex, drop = F]), dtype=torch::torch_float32()) - - }, - - .getitem = function(item) { - return(list(cat = self$cat[item,], - num = self$num[item,], - target = self$target[item])) - }, - - .length = function() { - self$target$size()[[1]] # shape[1] - } -) - diff --git a/man/predict.deepEstimator.Rd b/man/predict_deepEstimator.Rd similarity index 59% rename from man/predict.deepEstimator.Rd rename to man/predict_deepEstimator.Rd index f5600f8..1ddfbd2 100644 --- a/man/predict.deepEstimator.Rd +++ b/man/predict_deepEstimator.Rd @@ -1,10 +1,10 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/Predict.R -\name{predict.deepEstimator} -\alias{predict.deepEstimator} -\title{predict.deepEstimator} +\name{predict_deepEstimator} +\alias{predict_deepEstimator} +\title{predict_deepEstimator} \usage{ -predict.deepEstimator(x) +predict_deepEstimator(x) } \description{ prediction function for models using estimator class From c7ad8c23dccf840ad4746b9785668d702d2f82c7 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Thu, 11 Nov 2021 09:31:21 +0100 Subject: [PATCH 020/140] Fixed name of predict_deepEstimator function --- NAMESPACE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NAMESPACE b/NAMESPACE index c9a4bc0..c9f2849 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -2,10 +2,10 @@ export(fitDeepNNTorch) export(fitResNet) -export(predict.deepEstimator) export(predictAndromeda) export(predictPlp) export(predictProbabilities) +export(predict_deepEstimator) export(setCIReNN) export(setCNNTorch) export(setCovNN) From ebc2ca5b22219008ba82d79e1d273cd7bb0d8ef3 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Thu, 11 Nov 2021 16:47:23 +0100 Subject: [PATCH 021/140] changed dataset to use sparse COO matrix --- R/Dataset.R | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/R/Dataset.R b/R/Dataset.R index 21445de..7ab7cd4 100644 --- a/R/Dataset.R +++ b/R/Dataset.R @@ -7,15 +7,19 @@ Dataset <- torch::dataset( self$target <- torch::torch_tensor(labels[indices]) # add features - #print(dim(as.matrix(data[indices,]))) ## testing - # TODO should be torch sparse COO matrix - self$cat <- torch::torch_tensor(as.matrix(data[indices,-numericalIndex, drop = F]), dtype=torch::torch_float32()) + dataCat <- data[indices,-numericalIndex] + matrix <- as(dataCat, 'dgTMatrix') # convert to triplet sparse format + sparseIndices <- torch::torch_tensor(matrix(c(matrix@i + 1, matrix@j + 1), ncol=2), dtype = torch::torch_long()) + values <- torch::torch_tensor(matrix(c(matrix@x)), dtype = torch::torch_float32()) + self$cat <- torch::torch_sparse_coo_tensor(indices=sparseIndices$t(), + values=values$squeeze(), + dtype=torch::torch_float32())$coalesce() self$num <- torch::torch_tensor(as.matrix(data[indices,numericalIndex, drop = F]), dtype=torch::torch_float32()) }, .getitem = function(item) { - return(list(cat = self$cat[item,], + return(list(cat = self$cat[item]$to_dense(), num = self$num[item,], target = self$target[item])) }, From de84bd5f4ef5f98ce1a3238a5b83b4fb72e0c9f7 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Thu, 11 Nov 2021 16:47:47 +0100 Subject: [PATCH 022/140] Added an earlyStopper class, which stops the fit if the AUCs on validation set is not improving --- R/Estimator.R | 94 +++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 76 insertions(+), 18 deletions(-) diff --git a/R/Estimator.R b/R/Estimator.R index 1148f1f..71289a7 100644 --- a/R/Estimator.R +++ b/R/Estimator.R @@ -15,10 +15,14 @@ Estimator <- R6::R6Class('Estimator', bestScore = NULL, bestEpoch = NULL, model = NULL, - initialize = function(baseModel, modelParameters, fitParameters, - optimizer=torch::optim_adam, - criterion=torch::nn_bce_with_logits_loss, - device='cpu'){ + earlyStopper = NULL, + initialize = function(baseModel, + modelParameters, + fitParameters, + optimizer=torch::optim_adam, + criterion=torch::nn_bce_with_logits_loss, + device='cpu', + patience=3){ self$device <- device self$model <- do.call(baseModel, modelParameters) self$modelParameters <- modelParameters @@ -41,6 +45,8 @@ Estimator <- R6::R6Class('Estimator', lr=self$learningRate, weight_decay=self$l2Norm) self$criterion <- criterion() + self$earlyStopper <- EarlyStopping$new(patience=patience) + self$model$to(device=self$device) self$bestScore <- NULL @@ -79,16 +85,26 @@ Estimator <- R6::R6Class('Estimator', self$optimizer$param_groups[[1]]$lr) valLosses <- c(valLosses, scores$loss) valAUCs <- c(valAUCs, scores$auc) - - # here it saves the results to lists rather than files - modelStateDict[[epochI]] <- self$model$state_dict() - epoch[[epochI]] <- currentEpoch - + self$earlyStopper$call(scores$auc) + if (self$earlyStopper$improved) { + # here it saves the results to lists rather than files + modelStateDict[[epochI]] <- self$model$state_dict() + epoch[[epochI]] <- currentEpoch + } + if (self$earlyStopper$earlyStop) { + ParallelLogger::logInfo('Early stopping, validation AUC stopped improving') + self$finishFit(valAUCs, modelStateDict, valLosses, epoch) + invisible(self) + } } - - + self$finishFit(valAUCs, modelStateDict, valLosses, epoch) + invisible(self) + }, + + # operations that run when fitting is finished + finishFit = function(valAUCs, modelStateDict, valLosses, epoch) { #extract best epoch from the saved checkpoints - bestEpochInd <- which.min(valAUCs) # change this if a different metric is used + bestEpochInd <- which.max(valAUCs) # change this if a different metric is used bestModelStateDict <- modelStateDict[[bestEpochInd]] self$model$load_state_dict(bestModelStateDict) @@ -97,11 +113,9 @@ Estimator <- R6::R6Class('Estimator', self$bestEpoch <- bestEpoch self$bestScore <- list(loss= valLosses[bestEpochInd], auc=valAUCs[bestEpochInd]) - ParallelLogger::logInfo(paste0('Loaded best model (based on loss) from epoch ', bestEpoch)) - ParallelLogger::logInfo(paste0('ValLoss: ', self$bestScore$loss)) - ParallelLogger::logInfo(paste0('valAUC: ', self$bestScore$auc)) - - invisible(self) + ParallelLogger::logInfo('Loaded best model (based on AUC) from epoch ', bestEpoch) + ParallelLogger::logInfo('ValLoss: ', self$bestScore$loss) + ParallelLogger::logInfo('valAUC: ', self$bestScore$auc) }, # Fits whole training set on a specific number of epochs @@ -230,4 +244,48 @@ Estimator <- R6::R6Class('Estimator', if (is.null(value)) default else value } ) -) \ No newline at end of file +) + +EarlyStopping <- R6::R6Class('EarlyStopping', + public = list( + patience = NULL, + delta = NULL, + counter = NULL, + bestScore = NULL, + earlyStop = NULL, + improved = NULL, + previousScore = NULL, + initialize = function(patience=3, delta=0) { + self$patience <- patience + self$counter <- 0 + self$bestScore <- NULL + self$earlyStop <- FALSE + self$improved <- FALSE + self$delta <- delta + self$previousScore <- 0 + }, + call = function(metric){ + score <- metric + if (is.null(self$bestScore)) { + self$bestScore <- score + self$improved <- TRUE + } + else if (score < self$bestScore + self$delta) { + self$counter <- self$counter + 1 + self$improved <- FALSE + ParallelLogger::logInfo('EarlyStopping counter: ', self$counter, + ' out of ', self$patience) + if (self$counter >= self$patience) { + self$earlyStop <- TRUE + } + } + else { + self$bestScore <- score + self$counter <- 0 + self$improved <- TRUE + } + self$previousScore <- score + } + ) +) + From 9c44d8c62f84e231d8802dae10f6a1d66ea00841 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Sun, 14 Nov 2021 16:19:59 +0100 Subject: [PATCH 023/140] Added a collate fn, so instead of changing each sample to_dense and then stacking in batch, it first stacks the sparse tensors and then calls to_dense --- R/Dataset.R | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/R/Dataset.R b/R/Dataset.R index 7ab7cd4..c9cd47c 100644 --- a/R/Dataset.R +++ b/R/Dataset.R @@ -1,7 +1,7 @@ Dataset <- torch::dataset( name = 'Dataset', - initialize=function(data, labels, indices, numericalIndex) { + initialize = function(data, labels, indices, numericalIndex) { # add labels self$target <- torch::torch_tensor(labels[indices]) @@ -15,11 +15,10 @@ Dataset <- torch::dataset( values=values$squeeze(), dtype=torch::torch_float32())$coalesce() self$num <- torch::torch_tensor(as.matrix(data[indices,numericalIndex, drop = F]), dtype=torch::torch_float32()) - }, .getitem = function(item) { - return(list(cat = self$cat[item]$to_dense(), + return(list(cat = self$cat[item], num = self$num[item,], target = self$target[item])) }, @@ -27,4 +26,31 @@ Dataset <- torch::dataset( .length = function() { self$target$size()[[1]] # shape[1] } -) \ No newline at end of file +) + +# a function to speed up the collation so I dont' call to_dense() +# on the sparse tensors until they have been combined for the batch +sparseCollate <- function(batch) { + browser() + elem <- batch[[1]] + if (inherits(elem, "torch_tensor")) { + # temporary fix using a tryCatch until torch in R author adds + # an is_sparse method or exposes tensor&layout + tryCatch(return(torch::torch_stack(batch,dim = 1)$to_dense()), + error=function(e) return(torch::torch_stack(batch, dim = 1))) + + # if (Reduce("*", elem$size()) > elem$numel()) { + # return(torch::torch_stack(batch,dim = 1)$to_dense()) + # } + # + # return(torch::torch_stack(batch, dim = 1)) + } + else if (is.list(elem)) { + + # preserve names of elements + named_seq <- seq_along(elem) + names(named_seq) <- names(elem) + + lapply(named_seq, function(i) {sparseCollate(lapply(batch, function(x) x[[i]]))}) + } +} \ No newline at end of file From aebcbd9d899684f0ab69f9e4efd8e18961c891ac Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Sun, 14 Nov 2021 16:21:23 +0100 Subject: [PATCH 024/140] the custom collate_fn is now a property of the dataset class --- R/Dataset.R | 1 + 1 file changed, 1 insertion(+) diff --git a/R/Dataset.R b/R/Dataset.R index c9cd47c..e8fec48 100644 --- a/R/Dataset.R +++ b/R/Dataset.R @@ -3,6 +3,7 @@ Dataset <- torch::dataset( initialize = function(data, labels, indices, numericalIndex) { + self$collate_fn <- sparseCollate # add labels self$target <- torch::torch_tensor(labels[indices]) From 425696eeda872d3db9cf6935cc16621fa782774e Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Sun, 14 Nov 2021 16:22:09 +0100 Subject: [PATCH 025/140] added collate_fn to dataloaders, and moved fit_epoch method --- R/Estimator.R | 80 +++++++++++++++++++++++++++------------------------ 1 file changed, 42 insertions(+), 38 deletions(-) diff --git a/R/Estimator.R b/R/Estimator.R index 71289a7..1f9938a 100644 --- a/R/Estimator.R +++ b/R/Estimator.R @@ -59,11 +59,13 @@ Estimator <- R6::R6Class('Estimator', valAUCs <- c() dataloader <- torch::dataloader(dataset, - batch_size=self$batchSize, - shuffle=T) + batch_size = self$batchSize, + shuffle = T, + collate_fn = dataset$collate_fn) testDataloader <- torch::dataloader(testDataset, - batch_size=self$batchSize, - shuffle=F) + batch_size = self$batchSize, + shuffle = F, + collate_fn = dataset$collate_fn) modelStateDict <- list() epoch <- list() @@ -101,6 +103,39 @@ Estimator <- R6::R6Class('Estimator', invisible(self) }, + # trains for one epoch + fitEpoch = function(dataloader){ + t <- Sys.time() + batch_loss <- 0 + i <- 1 + + self$model$train() + + coro::loop(for (b in dataloader) { + cat <- b[[1]]$to(device=self$device) + num <- b[[2]]$to(device=self$device) + target <- b[[3]]$to(device=self$device) + out <- self$model(num, cat) + + loss <- self$criterion(out, target) + + batch_loss = batch_loss + loss + if (i %% 10 == 0) { + elapsed_time <- Sys.time() - t + ParallelLogger::logInfo('Loss: ', round((batch_loss/1)$item(), 3), ' | Time: ', + round(elapsed_time,digits = 2), units(elapsed_time)) + t <- Sys.time() + batch_loss = 0 + } + + loss$backward() + self$optimizer$step() + self$optimizer$zero_grad() + i <- i + 1 + }) + + }, + # operations that run when fitting is finished finishFit = function(valAUCs, modelStateDict, valLosses, epoch) { #extract best epoch from the saved checkpoints @@ -111,7 +146,8 @@ Estimator <- R6::R6Class('Estimator', bestEpoch <- epoch[[bestEpochInd]] self$bestEpoch <- bestEpoch - self$bestScore <- list(loss= valLosses[bestEpochInd], auc=valAUCs[bestEpochInd]) + self$bestScore <- list(loss = valLosses[bestEpochInd], + auc = valAUCs[bestEpochInd]) ParallelLogger::logInfo('Loaded best model (based on AUC) from epoch ', bestEpoch) ParallelLogger::logInfo('ValLoss: ', self$bestScore$loss) @@ -140,39 +176,6 @@ Estimator <- R6::R6Class('Estimator', }, - # trains for one epoch - fitEpoch = function(dataloader){ - t = Sys.time() - batch_loss = 0 - i=1 - - self$model$train() - - coro::loop(for (b in dataloader) { - cat = b[[1]]$to(device=self$device) - num = b[[2]]$to(device=self$device) - target = b[[3]]$to(device=self$device) - out = self$model(num, cat) - - loss = self$criterion(out, target) - - batch_loss = batch_loss + loss - if (i %% 10 == 0) { - elapsed_time <- Sys.time() - t - ParallelLogger::logInfo('Loss: ', round((batch_loss/1)$item(), 3), ' | Time: ', - round(elapsed_time,digits = 2), units(elapsed_time)) - t = Sys.time() - batch_loss = 0 - } - - loss$backward() - self$optimizer$step() - self$optimizer$zero_grad() - i = i + 1 - }) - - }, - # calculates loss and auc after training for one epoch score = function(dataloader){ torch::with_no_grad({ @@ -289,3 +292,4 @@ EarlyStopping <- R6::R6Class('EarlyStopping', ) ) + From 87dae54672d2d4ffc7ba1e17ca60b5f75bab8259 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Sun, 14 Nov 2021 16:22:58 +0100 Subject: [PATCH 026/140] changed mapcovariates so deleted covariates won't be a part of the sparse matrix --- R/Formatting.R | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/R/Formatting.R b/R/Formatting.R index 90515f4..47feec8 100644 --- a/R/Formatting.R +++ b/R/Formatting.R @@ -141,8 +141,13 @@ MapCovariates <- function(covariateData,population, mapping=NULL){ # restrict to population for speed ParallelLogger::logTrace('restricting to population for speed and mapping') if(is.null(mapping)){ - mapping <- data.frame(oldCovariateId = as.data.frame(covariateData$covariateRef %>% dplyr::select(.data$covariateId)), - newCovariateId = 1:nrow(as.data.frame(covariateData$covariateRef))) + metaData <- attr(covariateData, 'metaData') + deletedCovariates <- c(metaData$deletedRedundantCovariateIds, metaData$deletedInfrequentCovariateIds) + mapping <- data.frame(oldCovariateId = as.data.frame(covariateData$covariateRef %>% + dplyr::filter(!(.data$covariateId %in% deletedCovariates)) %>% + dplyr::select(.data$covariateId)), + newCovariateId = 1:nrow(as.data.frame(covariateData$covariateRef %>% + filter(!(covariateId %in% deletedCovariates))))) } if(sum(colnames(mapping)%in%c('oldCovariateId','newCovariateId'))!=2){ colnames(mapping) <- c('oldCovariateId','newCovariateId') From 107ec5aee849e264ff4351fdafa869ed02afd5a9 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Sun, 14 Nov 2021 16:23:20 +0100 Subject: [PATCH 027/140] Fixed batchSize parameter --- R/ResNet.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/ResNet.R b/R/ResNet.R index 84bcb64..6335c8a 100644 --- a/R/ResNet.R +++ b/R/ResNet.R @@ -66,7 +66,7 @@ setResNet <- function(numLayers=1:16, sizeHidden=2^(6:10), hiddenFactor=1:4, param <- param[sample(nrow(param), randomSample),] } param$device <- device - param$batch_size <- batchSize + param$batchSize <- batchSize param$epochs <- epochs results <- list(model='fitResNet', param=param, name='ResNet') From 4cfebcb4363e5d913f55ba27cf2b8399e6c65b9f Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Mon, 15 Nov 2021 10:35:51 +0100 Subject: [PATCH 028/140] Removed a stray browser call that shouldn't have been commited --- R/Dataset.R | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/R/Dataset.R b/R/Dataset.R index e8fec48..3b9ad15 100644 --- a/R/Dataset.R +++ b/R/Dataset.R @@ -32,11 +32,10 @@ Dataset <- torch::dataset( # a function to speed up the collation so I dont' call to_dense() # on the sparse tensors until they have been combined for the batch sparseCollate <- function(batch) { - browser() elem <- batch[[1]] if (inherits(elem, "torch_tensor")) { # temporary fix using a tryCatch until torch in R author adds - # an is_sparse method or exposes tensor&layout + # an is_sparse method or exposes tensor$layout tryCatch(return(torch::torch_stack(batch,dim = 1)$to_dense()), error=function(e) return(torch::torch_stack(batch, dim = 1))) From 9b90e6b4f1632075bf790a261670e0d95a4227cf Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Mon, 15 Nov 2021 10:36:31 +0100 Subject: [PATCH 029/140] Need to move model to gpu BEFORE i initialize the optimizer, otherwise the model won't train --- R/Estimator.R | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/R/Estimator.R b/R/Estimator.R index 1f9938a..908861f 100644 --- a/R/Estimator.R +++ b/R/Estimator.R @@ -40,6 +40,7 @@ Estimator <- R6::R6Class('Estimator', self$prefix <- self$itemOrDefaults(fitParameters, 'prefix', self$model$name) self$previousEpochs <- self$itemOrDefaults(fitParameters, 'previousEpochs', 0) + self$model$to(device=self$device) self$optimizer <- optimizer(params=self$model$parameters, lr=self$learningRate, @@ -47,7 +48,6 @@ Estimator <- R6::R6Class('Estimator', self$criterion <- criterion() self$earlyStopper <- EarlyStopping$new(patience=patience) - self$model$to(device=self$device) self$bestScore <- NULL self$bestEpoch <- NULL @@ -96,7 +96,7 @@ Estimator <- R6::R6Class('Estimator', if (self$earlyStopper$earlyStop) { ParallelLogger::logInfo('Early stopping, validation AUC stopped improving') self$finishFit(valAUCs, modelStateDict, valLosses, epoch) - invisible(self) + return(invisible(self)) } } self$finishFit(valAUCs, modelStateDict, valLosses, epoch) @@ -112,14 +112,17 @@ Estimator <- R6::R6Class('Estimator', self$model$train() coro::loop(for (b in dataloader) { + self$optimizer$zero_grad() cat <- b[[1]]$to(device=self$device) num <- b[[2]]$to(device=self$device) target <- b[[3]]$to(device=self$device) out <- self$model(num, cat) - loss <- self$criterion(out, target) + loss$backward() + self$optimizer$step() batch_loss = batch_loss + loss + if (i %% 10 == 0) { elapsed_time <- Sys.time() - t ParallelLogger::logInfo('Loss: ', round((batch_loss/1)$item(), 3), ' | Time: ', @@ -128,9 +131,7 @@ Estimator <- R6::R6Class('Estimator', batch_loss = 0 } - loss$backward() - self$optimizer$step() - self$optimizer$zero_grad() + i <- i + 1 }) @@ -206,7 +207,8 @@ Estimator <- R6::R6Class('Estimator', predictProba = function(dataset) { dataloader <- torch::dataloader(dataset, batch_size = self$batchSize, - shuffle=F) + shuffle=F, + collate_fn = dataset$collate_fn) torch::with_no_grad({ predictions <- c() self$model$eval() From ac086114e7437a1e8db1fc39a88d24449f6a930b Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Mon, 15 Nov 2021 10:38:27 +0100 Subject: [PATCH 030/140] Add preproccessor settings to covariateData so sparseMatrix conversion knows which covariates have been deleted by tidyCovariates --- R/Predict.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/Predict.R b/R/Predict.R index ec98f60..625e150 100644 --- a/R/Predict.R +++ b/R/Predict.R @@ -248,7 +248,7 @@ predict.knn <- function(plpData, population, plpModel, ...){ #' @usage predict_deepEstimator(plpModel, population, plpData) #' @export predict_deepEstimator predict_deepEstimator <- function(plpModel, population, plpData, ...) { - + attr(plpData$covariateData,'metaData') <- plpModel$metaData$preprocessSettings sparseMatrix <- toSparseMDeep(plpData, population) indices <- population$rowId numericalIndex <- sparseMatrix$map$newCovariateId[sparseMatrix$map$oldCovariateId==1002] From 3cddf9bce587bb19ca2fe6dd98c4a37657b37293 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Mon, 15 Nov 2021 10:38:41 +0100 Subject: [PATCH 031/140] Fixed year of copyright --- R/ResNet.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/ResNet.R b/R/ResNet.R index 6335c8a..c32e233 100644 --- a/R/ResNet.R +++ b/R/ResNet.R @@ -1,6 +1,6 @@ # @file ResNet.R # -# Copyright 2020 Observational Health Data Sciences and Informatics +# Copyright 2021 Observational Health Data Sciences and Informatics # # This file is part of PatientLevelPrediction # From a13256aeab3f3755ee95c678334135e0f81b841c Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Mon, 15 Nov 2021 13:24:58 +0100 Subject: [PATCH 032/140] Added posWeight argument to loss function, removed sparse matrix generation from dataset for now. --- R/Dataset.R | 19 +++++++++++++------ R/Estimator.R | 11 ++++++----- R/ResNet.R | 9 +++++---- 3 files changed, 24 insertions(+), 15 deletions(-) diff --git a/R/Dataset.R b/R/Dataset.R index 3b9ad15..a8d9574 100644 --- a/R/Dataset.R +++ b/R/Dataset.R @@ -7,14 +7,21 @@ Dataset <- torch::dataset( # add labels self$target <- torch::torch_tensor(labels[indices]) + # Weight to add in loss function to positive class + self$posWeight <- ((self$target==0)$sum()/self$target$sum())$item() + # add features dataCat <- data[indices,-numericalIndex] - matrix <- as(dataCat, 'dgTMatrix') # convert to triplet sparse format - sparseIndices <- torch::torch_tensor(matrix(c(matrix@i + 1, matrix@j + 1), ncol=2), dtype = torch::torch_long()) - values <- torch::torch_tensor(matrix(c(matrix@x)), dtype = torch::torch_float32()) - self$cat <- torch::torch_sparse_coo_tensor(indices=sparseIndices$t(), - values=values$squeeze(), - dtype=torch::torch_float32())$coalesce() + self$cat <- torch::torch_tensor(as.matrix(dataCat), dtype=torch::torch_float32()) + + # comment out the sparse matrix for now, is really slow need to find + # a better solution for converting it to dense before feeding to model + # matrix <- as(dataCat, 'dgTMatrix') # convert to triplet sparse format + # sparseIndices <- torch::torch_tensor(matrix(c(matrix@i + 1, matrix@j + 1), ncol=2), dtype = torch::torch_long()) + # values <- torch::torch_tensor(matrix(c(matrix@x)), dtype = torch::torch_float32()) + # self$cat <- torch::torch_sparse_coo_tensor(indices=sparseIndices$t(), + # values=values$squeeze(), + # dtype=torch::torch_float32())$coalesce() self$num <- torch::torch_tensor(as.matrix(data[indices,numericalIndex, drop = F]), dtype=torch::torch_float32()) }, diff --git a/R/Estimator.R b/R/Estimator.R index 908861f..6e66200 100644 --- a/R/Estimator.R +++ b/R/Estimator.R @@ -16,6 +16,7 @@ Estimator <- R6::R6Class('Estimator', bestEpoch = NULL, model = NULL, earlyStopper = NULL, + posWeight = NULL, initialize = function(baseModel, modelParameters, fitParameters, @@ -31,6 +32,7 @@ Estimator <- R6::R6Class('Estimator', self$learningRate <- self$itemOrDefaults(fitParameters,'learningRate', 1e-3) self$l2Norm <- self$itemOrDefaults(fitParameters, 'weightDecay', 1e-5) self$batchSize <- self$itemOrDefaults(fitParameters, 'batchSize', 1024) + self$posWeight <- self$itemOrDefaults(fitParameters, 'posWeight', 1) # don´t save checkpoints unless you get a resultDir self$resultsDir <- self$itemOrDefaults(fitParameters, 'resultsDir', NULL) @@ -45,7 +47,8 @@ Estimator <- R6::R6Class('Estimator', self$optimizer <- optimizer(params=self$model$parameters, lr=self$learningRate, weight_decay=self$l2Norm) - self$criterion <- criterion() + self$criterion <- criterion(torch::torch_tensor(self$posWeight, + device=self$device)) self$earlyStopper <- EarlyStopping$new(patience=patience) @@ -60,12 +63,10 @@ Estimator <- R6::R6Class('Estimator', dataloader <- torch::dataloader(dataset, batch_size = self$batchSize, - shuffle = T, - collate_fn = dataset$collate_fn) + shuffle = T) testDataloader <- torch::dataloader(testDataset, batch_size = self$batchSize, - shuffle = F, - collate_fn = dataset$collate_fn) + shuffle = F) modelStateDict <- list() epoch <- list() diff --git a/R/ResNet.R b/R/ResNet.R index c32e233..ffdab97 100644 --- a/R/ResNet.R +++ b/R/ResNet.R @@ -176,10 +176,6 @@ trainResNet <- function(sparseMatrix, population,...,train=T) { foldEpochs <- c() for(index in 1:length(index_vect)){ ParallelLogger::logInfo(paste('Fold ',index, ' -- with ', sum(population$indexes!=index & population$indexes > 0),'train rows')) - estimator <- Estimator$new(baseModel=ResNet, - modelParameters=modelParam, - fitParameters=fitParams, - device=param$device) testIndices <- population$rowId[population$indexes==index] trainIndices <- population$rowId[(population$indexes!=index) & (population$indexes > 0)] trainDataset <- Dataset(sparseMatrix$data[population$rowId,], @@ -190,6 +186,11 @@ trainResNet <- function(sparseMatrix, population,...,train=T) { population$outcomeCount, indices = population$rowId%in%testIndices, numericalIndex = numericalIndex) + fitParams['posWeight'] <- trainDataset$posWeight + estimator <- Estimator$new(baseModel=ResNet, + modelParameters=modelParam, + fitParameters=fitParams, + device=param$device) estimator$fit(trainDataset, testDataset) score <- estimator$bestScore bestEpoch <- estimator$bestEpoch From d5407516b478743b747275cdbe516b5ef642c7cc Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Mon, 15 Nov 2021 14:05:16 +0100 Subject: [PATCH 033/140] Fixed example to work for resnet --- extras/example.R | 46 ++++++++++++++++++++-------------------------- 1 file changed, 20 insertions(+), 26 deletions(-) diff --git a/extras/example.R b/extras/example.R index ac748c0..5626dc9 100644 --- a/extras/example.R +++ b/extras/example.R @@ -85,34 +85,28 @@ deepset <- setDeepNNTorch(units=list(c(128, 64), 128), layer_dropout=c(0.2), #debug(fitDeepNNTorch) -res <- runPlp(population = population, - plpData = plpData, - nfold = 3, - modelSettings = deepset, - savePlpData = F, - savePlpResult = F, - savePlpPlots = F, - saveEvaluation = F) - - - -resSet <- setResNet(numLayers=2, sizeHidden=c(2^3,2^4), hiddenFactor=1:4, - residualDropout=c(0,0,1), - hiddenDropout=c(0,0,1), - normalization='BatchNorm', activation= 'RelU', - sizeEmbedding=2^(6), weightDecay=c(1e-6), - learningRate=c(0.1), seed=42, hyperParamSearch='random', - randomSample=3, - device='cpu', - batch_size=1024, +# res <- runPlp(population = population, +# plpData = plpData, +# nfold = 3, +# modelSettings = deepset, +# savePlpData = F, +# savePlpResult = F, +# savePlpPlots = F, +# saveEvaluation = F) +# + + +resSet <- setResNet(numLayers=5, sizeHidden=256, hiddenFactor=2, + residualDropout=c(0.1), + hiddenDropout=c(0.1), + normalization='BatchNorm', activation= 'RelU', + sizeEmbedding=64, weightDecay=c(1e-6), + learningRate=c(3e-4), seed=42, hyperParamSearch='random', + randomSample=1, + device='cuda:0', + batchSize=128, epochs=10) -resSet <- setResNet(numLayers=1:16, sizeHidden=2^(6:10), hiddenFactor=1:4, - residualDropout=seq(0,0.3,0.05), hiddenDropout=seq(0,0.3,0.05), - normalization='BatchNorm', activation='RelU', - sizeEmbedding=2^(6:9), weightDecay=c(1e-6, 1e-3), - learningRate=c(1e-2,1e-5), seed=42, hyperParamSearch='random', - randomSample=10, device='cpu', batch_size=1024, epochs=5) res2 <- runPlp(population = population, plpData = plpData, From 84704b0ffa72b1623929f6187d6db572096b672a Mon Sep 17 00:00:00 2001 From: jreps Date: Mon, 24 Jan 2022 14:27:17 -0500 Subject: [PATCH 034/140] adding code for new PLP initial code to run with new PLP (>=v5.0.0) --- NAMESPACE | 3 + R/Dataset.R | 63 ++++ R/Estimator.R | 3 +- R/ResNet_plp5.R | 542 +++++++++++++++++++++++++++++++++++ extras/example_plp5.R | 142 +++++++++ man/fitResNet_plp5.Rd | 24 ++ man/predict.deepEstimator.Rd | 11 - man/predictDeepEstimator.Rd | 18 ++ man/predict_deepEstimator.Rd | 9 +- man/setResNet.Rd | 64 +++++ man/setResNet_plp5.Rd | 64 +++++ 11 files changed, 930 insertions(+), 13 deletions(-) create mode 100644 R/ResNet_plp5.R create mode 100644 extras/example_plp5.R create mode 100644 man/fitResNet_plp5.Rd delete mode 100644 man/predict.deepEstimator.Rd create mode 100644 man/predictDeepEstimator.Rd create mode 100644 man/setResNet.Rd create mode 100644 man/setResNet_plp5.Rd diff --git a/NAMESPACE b/NAMESPACE index c9f2849..ff740a3 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -2,7 +2,9 @@ export(fitDeepNNTorch) export(fitResNet) +export(fitResNet_plp5) export(predictAndromeda) +export(predictDeepEstimator) export(predictPlp) export(predictProbabilities) export(predict_deepEstimator) @@ -14,6 +16,7 @@ export(setDeepNN) export(setDeepNNTorch) export(setRNNTorch) export(setResNet) +export(setResNet_plp5) export(toSparseMDeep) export(toSparseRTorch) export(trainResNet) diff --git a/R/Dataset.R b/R/Dataset.R index a8d9574..7e54532 100644 --- a/R/Dataset.R +++ b/R/Dataset.R @@ -1,3 +1,66 @@ +Dataset_plp5 <- torch::dataset( + name = 'Dataset', + + initialize = function(data, labels = NULL, numericalIndex = NULL) { + + # determine numeric + if(is.null(numericalIndex)){ + colBin <- apply(data, 2, function(x) sum(x==1 | x==0)) + colLen <- apply(data, 2, length) + numericalIndex <- colLen != colBin + } + + self$numericalIndex <- numericalIndex + + self$collate_fn <- sparseCollate + # add labels if training (make 0 vector for prediction) + if(!is.null(labels)){ + self$target <- torch::torch_tensor(labels) + } else{ + self$target <- torch::torch_tensor(rep(0, nrow(data))) + } + + + # Weight to add in loss function to positive class + self$posWeight <- ((self$target==0)$sum()/self$target$sum())$item() + + # add features + dataCat <- data[, !numericalIndex] + self$cat <- torch::torch_tensor(as.matrix(dataCat), dtype=torch::torch_float32()) + + # comment out the sparse matrix for now, is really slow need to find + # a better solution for converting it to dense before feeding to model + # matrix <- as(dataCat, 'dgTMatrix') # convert to triplet sparse format + # sparseIndices <- torch::torch_tensor(matrix(c(matrix@i + 1, matrix@j + 1), ncol=2), dtype = torch::torch_long()) + # values <- torch::torch_tensor(matrix(c(matrix@x)), dtype = torch::torch_float32()) + # self$cat <- torch::torch_sparse_coo_tensor(indices=sparseIndices$t(), + # values=values$squeeze(), + # dtype=torch::torch_float32())$coalesce() + self$num <- torch::torch_tensor(as.matrix(data[,numericalIndex, drop = F]), dtype=torch::torch_float32()) + }, + + .getNumericalIndex = function() { + return( + self$numericalIndex + ) + }, + + .getitem = function(item) { + return( + list( + cat = self$cat[item], + num = self$num[item,], + target = self$target[item] + ) + ) + }, + + .length = function() { + self$target$size()[[1]] # shape[1] + } +) + + Dataset <- torch::dataset( name = 'Dataset', diff --git a/R/Estimator.R b/R/Estimator.R index 6e66200..e2188d2 100644 --- a/R/Estimator.R +++ b/R/Estimator.R @@ -198,7 +198,8 @@ Estimator <- R6::R6Class('Estimator', }) mean_loss <- mean(loss) predictionsClass <- data.frame(value=predictions, outcomeCount=targets) - attr(predictionsClass, 'metaData')$predictionType <-'binary' + attr(predictionsClass, 'metaData')$predictionType <-'binary' #old can be remvoed + attr(predictionsClass, 'metaData')$modelType <-'binary' auc <- computeAuc(predictionsClass) }) return(list(loss=mean_loss, auc=auc)) diff --git a/R/ResNet_plp5.R b/R/ResNet_plp5.R new file mode 100644 index 0000000..94692cd --- /dev/null +++ b/R/ResNet_plp5.R @@ -0,0 +1,542 @@ +# @file ResNet.R +# +# Copyright 2021 Observational Health Data Sciences and Informatics +# +# This file is part of PatientLevelPrediction +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#' setResNet_plp5 +#' +#' @description +#' Creates settings for a ResNet model +#' +#' @details +#' Model architecture from by https://arxiv.org/abs/2106.11959 +#' +#' +#' @param numLayers Number of layers in network, default: 1:16 +#' @param sizeHidden Amount of neurons in each default layer, default: 2^(6:10) (64 to 1024) +#' @param hiddenFactor How much to grow the amount of neurons in each ResLayer, default: 1:4 +#' @param residualDropout How much dropout to apply after last linear layer in ResLayer, default: seq(0, 0.3, 0.05) +#' @param hiddenDropout How much dropout to apply after first linear layer in ResLayer, default: seq(0, 0.3, 0.05) +#' @param normalization Which type of normalization to use. Default: 'Batchnorm' +#' @param activation What kind of activation to use. Default: 'RelU' +#' @param sizeEmbedding Size of embedding layer, default: 2^(6:9) (64 to 512) +#' @param weightDecay Weight decay to apply, default: c(1e-6, 1e-3) +#' @param learningRate Learning rate to use. default: c(1e-2, 1e-5) +#' @param seed Seed to use for sampling hyperparameter space +#' @param hyperParamSearch Which kind of hyperparameter search to use random sampling or exhaustive grid search. default: 'random' +#' @param randomSample How many random samples from hyperparameter space to use +#' @param device Which device to run analysis on, either 'cpu' or 'cuda', default: 'cpu' +#' @param batch_size Size of batch, default: 1024 +#' @param epochs Number of epochs to run, default: 10 +#' +#' @export +setResNet_plp5 <- function( + numLayers = list(1:16), + sizeHidden = list(2^(6:10)), + hiddenFactor = list(1:4), + residualDropout = list(seq(0,0.3,0.05)), + hiddenDropout = list(seq(0,0.3,0.05)), + normalization = list('BatchNorm'), + activation = list('RelU'), + sizeEmbedding = list(2^(6:9)), + weightDecay = list(c(1e-6, 1e-3)), + learningRate = list(c(1e-2,1e-5)), + seed = NULL, + hyperParamSearch = 'random', + randomSample = 100, + device = 'cpu', + batchSize = 1024, + epochs = 10 + ) { + + if (!is.null(seed)) { + seed <- as.integer(sample(1e5, 1)) + } + + paramGrid <- list( + numLayers = numLayers, + sizeHidden = sizeHidden, + hiddenFactor = hiddenFactor, + residualDropout = residualDropout, + hiddenDropout = hiddenDropout, + sizeEmbedding = sizeEmbedding, + weightDecay = weightDecay, + learningRate = learningRate, + seed = list(as.integer(seed[[1]])) + ) + + param <- listCartesian(paramGrid) + + if (hyperParamSearch=='random'){ + param <- param[sample(length(param), randomSample)] + } + + attr(param, 'settings') <- list( + seed = seed[1], + device = device, + batchSize = batchSize, + epochs = epochs, + name = "ResNet", + saveType = 'file' + ) + + results <- list( + fitFunction = 'fitResNet_plp5', + param = param + ) + + class(results) <- 'modelSettings' + + return(results) + +} + +#' fitResNet_plp5 +#' +#' @description +#' fits a ResNet model to data +#' +#' @param population the study population dataframe +#' @param plpData plp data object +#' @param param parameters to use for model +#' @param outcomeId Id of the outcome +#' @param cohortId Id of the cohort +#' @param ... +#' +#' @export +fitResNet_plp5 <- function( + trainData, + param, + search = 'grid', + analysisId, + ... +) { + + start <- Sys.time() + + # check covariate data + if(!FeatureExtraction::isCovariateData(trainData$covariateData)){stop("Needs correct covariateData")} + + # get the settings from the param + settings <- attr(param, 'settings') + + if(!is.null(trainData$folds)){ + trainData$labels <- merge(trainData$labels, trainData$fold, by = 'rowId') + } + + mappedData <- PatientLevelPrediction::toSparseM( + plpData = trainData, + map = NULL + ) + + matrixData <- mappedData$dataMatrix + labels <- mappedData$labels + covariateRef <- mappedData$covariateRef + + outLoc <- PatientLevelPrediction:::createTempModelLoc() # export + + cvResult <- do.call( + what = gridCvDeep, + args = list( + matrixData = matrixData, + labels = labels, + seed = settings$seed, + modelName = settings$name, + device = settings$device, + batchSize = settings$batchSize, + epochs = settings$epochs, + modelLocation = outLoc, + paramSearch = param + ) + ) + + hyperSummary <- do.call(rbind, lapply(cvResult$paramGridSearch, function(x) x$hyperSummary)) + + prediction <- cvResult$prediction + + incs <- rep(1, nrow(covariateRef)) + covariateRef$included <- incs + covariateRef$covariateValue <- 0 + + comp <- start - Sys.time() + + result <- list( + model = cvResult$estimator, #file.path(outLoc), + + prediction = prediction, + + settings = list( + plpDataSettings = attr(trainData, "metaData")$plpDataSettings, + covariateSettings = attr(trainData, "metaData")$covariateSettings, + populationSettings = attr(trainData, "metaData")$populationSettings, + featureEngineering = attr(trainData$covariateData, "metaData")$featureEngineering, + tidyCovariates = attr(trainData$covariateData, "metaData")$tidyCovariateDataSettings, + requireDenseMatrix = F, + modelSettings = list( + model = settings$name, + param = param, + finalModelParameters = cvResult$finalParam, + extraSettings = attr(param, 'settings') + ), + splitSettings = attr(trainData, "metaData")$splitSettings, + sampleSettings = attr(trainData, "metaData")$sampleSettings + ), + + trainDetails = list( + analysisId = analysisId, + cdmDatabaseSchema = attr(trainData, "metaData")$cdmDatabaseSchema, + outcomeId = attr(trainData, "metaData")$outcomeId, + cohortId = attr(trainData, "metaData")$cohortId, + attrition = attr(trainData, "metaData")$attrition, + trainingTime = comp, + trainingDate = Sys.Date(), + hyperParamSearch = hyperSummary + ), + + covariateImportance = covariateRef + ) + + class(result) <- "plpModel" + attr(result, "predictionFunction") <- "predictDeepEstimator" + attr(result, "modelType") <- "binary" + attr(result, "saveType") <- attr(param, 'saveType') + + return(result) +} + +#' predictDeepEstimator +#' +#' @description +#' the prediction function for the binary classification deep learning models +#' +#' @param plpModel the plpModel +#' @param data plp data object or a torch dataset +#' @param cohort a data.frame with the rowIds of the people to predict risk for +#' +#' @export +predictDeepEstimator <- function( + plpModel, + data, + cohort +){ + + if(!'plpModel' %in% class(plpModel)){ + plpModel <- list(model = plpModel) + attr(plpModel, 'modelType') <- 'binary' + } + + if("plpData" %in% class(data)){ + + dataMat <- PatientLevelPrediction::toSparseM( + plpData = data, + cohort = cohort, + map = plpModel$covariateImportance %>% + dplyr::select(.data$columnId, .data$covariateId) + ) + + data <- Dataset_plp5(dataMat$dataMatrix) # add numeric details.. + } + + # get predictions + prediction <- cohort + prediction$value <- plpModel$model$predictProba(data) + + attr(prediction, "metaData")$modelType <- attr(plpModel, 'modelType') + + return(prediction) +} + + +gridCvDeep <- function( + matrixData, + labels, + seed, + modelName, + device, + batchSize, + epochs, + modelLocation, + paramSearch +){ + + + ParallelLogger::logInfo(paste0("Rnning CV for ",modelName," model")) + + ########################################################################### + + + n_features <- ncol(matrixData) + + gridSearchPredictons <- list() + length(gridSearchPredictons) <- length(paramSearch) + + for(gridId in 1:length(paramSearch)){ + + # get the params + modelParamNames <- c("numLayers", "sizeHidden", "hiddenFactor", + "residualDropout", "hiddenDropout", "sizeEmbedding") + modelParams <- paramSearch[[gridId]][modelParamNames] + modelParams$n_features <- n_features + + fitParams <- paramSearch[[gridId]][c("weightDecay", "learningRate")] + fitParams$epochs <- epochs + fitParams$batchSize <- batchSize + + + # initiate prediction + prediction <- c() + + fold <- labels$index + ParallelLogger::logInfo(paste0('Max fold: ', max(fold))) + + for( i in 1:max(fold)){ + + ParallelLogger::logInfo(paste0('Fold ',i)) + trainDataset <- Dataset_plp5( + matrixData[fold != i,], + labels$outcomeCount[fold != i] + ) + testDataset <- Dataset_plp5( + matrixData[fold == i,], + labels$outcomeCount[fold == i], + trainDataset$getNumericalIndex + ) + + fitParams['posWeight'] <- trainDataset$posWeight + + estimator <- Estimator$new( + baseModel = ResNet, + modelParameters = modelParams, + fitParameters = fitParams, + device = device + ) + + estimator$fit( + trainDataset, + testDataset + ) + + ParallelLogger::logInfo("Calculating predictions on left out fold set...") + + prediction <- rbind( + prediction, + predictDeepEstimator( + plpModel = estimator, + data = testDataset, + cohort = labels[fold == i,] + ) + ) + + } + + gridSearchPredictons[[gridId]] <- list( + prediction = prediction, + param = paramSearch[[gridId]] + ) + } + + # get best para (this could be modified to enable any metric instead of AUC, just need metric input in function) + + paramGridSearch <- lapply(gridSearchPredictons, function(x){do.call(computeGridPerformance, x)}) # cvAUCmean, cvAUC, param + + optimalParamInd <- which.max(unlist(lapply(paramGridSearch, function(x) x$cvPerformance))) + + finalParam <- paramGridSearch[[optimalParamInd]]$param + + cvPrediction <- gridSearchPredictons[[optimalParamInd]]$prediction + cvPrediction$evaluationType <- 'CV' + + ParallelLogger::logInfo('Training final model using optimal parameters') + + # get the params + modelParamNames <- c("numLayers", "sizeHidden", "hiddenFactor", + "residualDropout", "hiddenDropout", "sizeEmbedding") + modelParams <- finalParam[modelParamNames] + modelParams$n_features <- n_features + fitParams <- finalParam[c("weightDecay", "learningRate")] + fitParams$epochs <- epochs + fitParams$batchSize <- batchSize + fitParams$resultsDir <- modelLocation # remove this? + # create the dir + if(!dir.exists(file.path(modelLocation))){ + dir.create(file.path(modelLocation), recursive = T) + } + + estimator <- Estimator$new( + baseModel = ResNet, + modelParameters = modelParams, + fitParameters = fitParams, + device = device + ) + + trainDataset <- Dataset_plp5( + matrixData, + labels$outcomeCount + ) + + numericalIndex <- trainDataset$getNumericalIndex + + estimator$fitWholeTrainingSet(trainDataset) + + ParallelLogger::logInfo("Calculating predictions on all train data...") + prediction <- predictDeepEstimator( + plpModel = estimator, + data = trainDataset, + cohort = labels + ) + prediction$evaluationType <- 'Train' + + prediction <- rbind( + prediction, + cvPrediction + ) + + # modify prediction + prediction <- prediction %>% + dplyr::select(-.data$rowId, -.data$index) %>% + dplyr::rename(rowId = .data$originalRowId) + + prediction$cohortStartDate <- as.Date(prediction$cohortStartDate, origin = '1970-01-01') + + + # save torch code here + + + return( + list( + estimator = estimator, + prediction = prediction, + finalParam = finalParam, + paramGridSearch = paramGridSearch, + numericalIndex = numericalIndex + ) + ) + +} + + + +ResLayer <- torch::nn_module( + name='ResLayer', + + initialize=function(sizeHidden, resHidden, normalization, + activation, hiddenDropout=NULL, residualDropout=NULL){ + self$norm <- normalization(sizeHidden) + self$linear0 <- torch::nn_linear(sizeHidden, resHidden) + self$linear1 <- torch::nn_linear(resHidden, sizeHidden) + + self$activation <- activation + if (!is.null(hiddenDropout)){ + self$hiddenDropout <- torch::nn_dropout(p=hiddenDropout) + } + if (!is.null(residualDropout)) + { + self$residualDropout <- torch::nn_dropout(p=residualDropout) + } + + self$activation <- activation() + + }, + + forward=function(x) { + z <- x + z <- self$norm(z) + z <- self$linear0(z) + z <- self$activation(z) + if (!is.null(self$hiddenDropout)) { + z <- self$hiddenDropout(z) + } + z <- self$linear1(z) + if (!is.null(self$residualDropout)) { + z <- self$residualDropout(z) + } + x <- z + x + return(x) + } +) + +ResNet <- torch::nn_module( + name='ResNet', + + initialize=function(n_features, sizeEmbedding, sizeHidden, numLayers, + hiddenFactor, activation=torch::nn_relu, + normalization=torch::nn_batch_norm1d, hiddenDropout=NULL, + residualDropout=NULL, d_out=1) { + # n_features - 1 because only binary features are embedded (not Age) + # ages is concatenated with the embedding output + # TODO need to extend to support other numerical features + self$embedding <- torch::nn_linear(n_features - 1, sizeEmbedding, bias=F) + self$first_layer <- torch::nn_linear(sizeEmbedding + 1, sizeHidden) + + resHidden <- sizeHidden * hiddenFactor + + self$layers <- torch::nn_module_list(lapply(1:numLayers, + function (x) ResLayer(sizeHidden, resHidden, + normalization, activation, + hiddenDropout, + residualDropout))) + self$lastNorm <- normalization(sizeHidden) + self$head <- torch::nn_linear(sizeHidden, d_out) + + self$lastAct <- activation() + + }, + + forward=function(x_num, x_cat) { + x_cat <- self$embedding(x_cat) + x <- torch::torch_cat(list(x_cat, x_num), dim=2L) + x <- self$first_layer(x) + + for (i in 1:length(self$layers)) { + x <- self$layers[[i]](x) + } + x <- self$lastNorm(x) + x <- self$lastAct(x) + x <- self$head(x) + x <- x$squeeze(-1) + return(x) + } +) + + + + + +listCartesian <- function(allList){ + + sizes <- lapply(allList, function(x) 1:length(x)) + combinations <- expand.grid(sizes) + + result <- list() + length(result) <- nrow(combinations) + + for(i in 1:nrow(combinations)){ + tempList <- list() + for(j in 1:ncol(combinations)){ + tempList <- c(tempList, list(allList[[j]][[combinations[i,j]]])) + } + names(tempList) <- names(allList) + result[[i]] <- tempList + } + + return(result) +} + + +# export this in PLP +computeGridPerformance <- PatientLevelPrediction:::computeGridPerformance + + diff --git a/extras/example_plp5.R b/extras/example_plp5.R new file mode 100644 index 0000000..0e65ecb --- /dev/null +++ b/extras/example_plp5.R @@ -0,0 +1,142 @@ +# testing code (requires sequential branch of FeatureExtraction): +rm(list = ls()) +library(FeatureExtraction) +library(PatientLevelPrediction) +library(DeepPatientLevelPrediction) +connectionDetails <- Eunomia::getEunomiaConnectionDetails() +Eunomia::createCohorts(connectionDetails) + +temp <- F + +covSet <- createCovariateSettings(useDemographicsGender = T, + useDemographicsAge = T, + useDemographicsRace = T, + useDemographicsEthnicity = T, + useDemographicsAgeGroup = T, + useConditionGroupEraLongTerm = T, + useDrugEraStartLongTerm = T, + endDays = -1 +) + +if(temp){ +covSetT <- createTemporalSequenceCovariateSettings(useDemographicsGender = T, + useDemographicsAge = T, + useDemographicsRace = T, + useDemographicsEthnicity = T, + useDemographicsAgeGroup = T, + useConditionEraGroupStart = T, + useDrugEraStart = T, + timePart = 'month', + timeInterval = 1, + sequenceEndDay = -1, + sequenceStartDay = -365*5) +} + + +databaseDetails <- PatientLevelPrediction::createDatabaseDetails( + connectionDetails = connectionDetails, + cdmDatabaseSchema = "main", + cohortDatabaseSchema = "main", + cohortTable = "cohort", + cohortId = 4, + outcomeIds = 3, + outcomeDatabaseSchema = "main", + outcomeTable = "cohort", + cdmDatabaseName = 'eunomia' +) + +restrictPlpDataSettings <- PatientLevelPrediction::createRestrictPlpDataSettings( + firstExposureOnly = T, + washoutPeriod = 365 +) + +plpData <- PatientLevelPrediction::getPlpData( + databaseDetails = databaseDetails, + restrictPlpDataSettings = restrictPlpDataSettings, + covariateSettings = covSet +) + +if(temp){ + plpDataT <- PatientLevelPrediction::getPlpData( + databaseDetails = databaseDetails, + restrictPlpDataSettings = restrictPlpDataSettings, + covariateSettings = covSetT + ) +} + + +populationSet <- PatientLevelPrediction::createStudyPopulationSettings( + requireTimeAtRisk = F, + riskWindowStart = 1, + riskWindowEnd = 365 + ) + +# code to train models +deepset <- setDeepNNTorch(units=list(c(128, 64), 128), layer_dropout=c(0.2), + lr =c(1e-4), decay=c(1e-5), outcome_weight = c(1.0), batch_size = c(100), + epochs= c(5), seed=NULL ) + + +#debug(fitDeepNNTorch) +# res <- runPlp(population = population, +# plpData = plpData, +# nfold = 3, +# modelSettings = deepset, +# savePlpData = F, +# savePlpResult = F, +# savePlpPlots = F, +# saveEvaluation = F) +# + + +resSet <- setResNet_plp5( + numLayers = list(5), + sizeHidden = list(256), + hiddenFactor = list(2), + residualDropout = list(0.1), + hiddenDropout = list(0.1), + normalization = list('BatchNorm'), + activation = list('RelU'), + sizeEmbedding = list(64), + weightDecay = list(1e-6), + learningRate = list(3e-4), + seed = 42, + hyperParamSearch = 'random', + randomSample = 1, + #device='cuda:0', + batchSize = 128, + epochs = 10 + ) + + +res2 <- runPlp( + plpData = plpData, + outcomeId = 3, + modelSettings = resSet, + analysisId = 'ResNet', + analysisName = 'Testing Deep Learning', + populationSettings = populationSet, + splitSettings = PatientLevelPrediction::createDefaultSplitSetting(), + sampleSettings = PatientLevelPrediction::createSampleSettings(), # none + featureEngineeringSettings = PatientLevelPrediction::createFeatureEngineeringSettings(), # none + preprocessSettings = PatientLevelPrediction::createPreprocessSettings(), + executeSettings = PatientLevelPrediction::createExecuteSettings( + runSplitData = T, + runSampleData = F, + runfeatureEngineering = F, + runPreprocessData = T, + runModelDevelopment = T, + runCovariateSummary = F + ), + saveDirectory = 'D:/testing/Deep' + ) + + +##predict.customLibrary(libraryName, predictionFunction, inputList){ +## libraryName <- 'PatientLevelPrediction' +## predictionFunction <- "createStudyPopulation" +## predictFun <- get(predictionFunction, envir = rlang::search_envs()[grep(paste0('package:', libraryName), search())][[1]]) +## +## prediction <- do.call(predictFun, inputList) +## return(prediction) +##} diff --git a/man/fitResNet_plp5.Rd b/man/fitResNet_plp5.Rd new file mode 100644 index 0000000..d18af69 --- /dev/null +++ b/man/fitResNet_plp5.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ResNet_plp5.R +\name{fitResNet_plp5} +\alias{fitResNet_plp5} +\title{fitResNet_plp5} +\usage{ +fitResNet_plp5(trainData, param, search = "grid", analysisId, ...) +} +\arguments{ +\item{param}{parameters to use for model} + +\item{...}{} + +\item{population}{the study population dataframe} + +\item{plpData}{plp data object} + +\item{outcomeId}{Id of the outcome} + +\item{cohortId}{Id of the cohort} +} +\description{ +fits a ResNet model to data +} diff --git a/man/predict.deepEstimator.Rd b/man/predict.deepEstimator.Rd deleted file mode 100644 index f5600f8..0000000 --- a/man/predict.deepEstimator.Rd +++ /dev/null @@ -1,11 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/Predict.R -\name{predict.deepEstimator} -\alias{predict.deepEstimator} -\title{predict.deepEstimator} -\usage{ -predict.deepEstimator(x) -} -\description{ -prediction function for models using estimator class -} diff --git a/man/predictDeepEstimator.Rd b/man/predictDeepEstimator.Rd new file mode 100644 index 0000000..4e8667e --- /dev/null +++ b/man/predictDeepEstimator.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ResNet_plp5.R +\name{predictDeepEstimator} +\alias{predictDeepEstimator} +\title{predictDeepEstimator} +\usage{ +predictDeepEstimator(plpModel, data, cohort) +} +\arguments{ +\item{plpModel}{the plpModel} + +\item{data}{plp data object or a torch dataset} + +\item{cohort}{a data.frame with the rowIds of the people to predict risk for} +} +\description{ +the prediction function for the binary classification deep learning models +} diff --git a/man/predict_deepEstimator.Rd b/man/predict_deepEstimator.Rd index 1ddfbd2..bbacc78 100644 --- a/man/predict_deepEstimator.Rd +++ b/man/predict_deepEstimator.Rd @@ -4,7 +4,14 @@ \alias{predict_deepEstimator} \title{predict_deepEstimator} \usage{ -predict_deepEstimator(x) +predict_deepEstimator(plpModel, population, plpData) +} +\arguments{ +\item{plpModel}{The plpModel object} + +\item{population}{Population dataframe} + +\item{plpData}{plpData object} } \description{ prediction function for models using estimator class diff --git a/man/setResNet.Rd b/man/setResNet.Rd new file mode 100644 index 0000000..35cb424 --- /dev/null +++ b/man/setResNet.Rd @@ -0,0 +1,64 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ResNet.R +\name{setResNet} +\alias{setResNet} +\title{setResNet} +\usage{ +setResNet( + numLayers = 1:16, + sizeHidden = 2^(6:10), + hiddenFactor = 1:4, + residualDropout = seq(0, 0.3, 0.05), + hiddenDropout = seq(0, 0.3, 0.05), + normalization = "BatchNorm", + activation = "RelU", + sizeEmbedding = 2^(6:9), + weightDecay = c(1e-06, 0.001), + learningRate = c(0.01, 1e-05), + seed = NULL, + hyperParamSearch = "random", + randomSample = 100, + device = "cpu", + batchSize = 1024, + epochs = 10 +) +} +\arguments{ +\item{numLayers}{Number of layers in network, default: 1:16} + +\item{sizeHidden}{Amount of neurons in each default layer, default: 2^(6:10) (64 to 1024)} + +\item{hiddenFactor}{How much to grow the amount of neurons in each ResLayer, default: 1:4} + +\item{residualDropout}{How much dropout to apply after last linear layer in ResLayer, default: seq(0, 0.3, 0.05)} + +\item{hiddenDropout}{How much dropout to apply after first linear layer in ResLayer, default: seq(0, 0.3, 0.05)} + +\item{normalization}{Which type of normalization to use. Default: 'Batchnorm'} + +\item{activation}{What kind of activation to use. Default: 'RelU'} + +\item{sizeEmbedding}{Size of embedding layer, default: 2^(6:9) (64 to 512)} + +\item{weightDecay}{Weight decay to apply, default: c(1e-6, 1e-3)} + +\item{learningRate}{Learning rate to use. default: c(1e-2, 1e-5)} + +\item{seed}{Seed to use for sampling hyperparameter space} + +\item{hyperParamSearch}{Which kind of hyperparameter search to use random sampling or exhaustive grid search. default: 'random'} + +\item{randomSample}{How many random samples from hyperparameter space to use} + +\item{device}{Which device to run analysis on, either 'cpu' or 'cuda', default: 'cpu'} + +\item{epochs}{Number of epochs to run, default: 10} + +\item{batch_size}{Size of batch, default: 1024} +} +\description{ +Creates settings for a ResNet model +} +\details{ +Model architecture from by https://arxiv.org/abs/2106.11959 +} diff --git a/man/setResNet_plp5.Rd b/man/setResNet_plp5.Rd new file mode 100644 index 0000000..a916b75 --- /dev/null +++ b/man/setResNet_plp5.Rd @@ -0,0 +1,64 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ResNet_plp5.R +\name{setResNet_plp5} +\alias{setResNet_plp5} +\title{setResNet_plp5} +\usage{ +setResNet_plp5( + numLayers = list(1:16), + sizeHidden = list(2^(6:10)), + hiddenFactor = list(1:4), + residualDropout = list(seq(0, 0.3, 0.05)), + hiddenDropout = list(seq(0, 0.3, 0.05)), + normalization = list("BatchNorm"), + activation = list("RelU"), + sizeEmbedding = list(2^(6:9)), + weightDecay = list(c(1e-06, 0.001)), + learningRate = list(c(0.01, 1e-05)), + seed = NULL, + hyperParamSearch = "random", + randomSample = 100, + device = "cpu", + batchSize = 1024, + epochs = 10 +) +} +\arguments{ +\item{numLayers}{Number of layers in network, default: 1:16} + +\item{sizeHidden}{Amount of neurons in each default layer, default: 2^(6:10) (64 to 1024)} + +\item{hiddenFactor}{How much to grow the amount of neurons in each ResLayer, default: 1:4} + +\item{residualDropout}{How much dropout to apply after last linear layer in ResLayer, default: seq(0, 0.3, 0.05)} + +\item{hiddenDropout}{How much dropout to apply after first linear layer in ResLayer, default: seq(0, 0.3, 0.05)} + +\item{normalization}{Which type of normalization to use. Default: 'Batchnorm'} + +\item{activation}{What kind of activation to use. Default: 'RelU'} + +\item{sizeEmbedding}{Size of embedding layer, default: 2^(6:9) (64 to 512)} + +\item{weightDecay}{Weight decay to apply, default: c(1e-6, 1e-3)} + +\item{learningRate}{Learning rate to use. default: c(1e-2, 1e-5)} + +\item{seed}{Seed to use for sampling hyperparameter space} + +\item{hyperParamSearch}{Which kind of hyperparameter search to use random sampling or exhaustive grid search. default: 'random'} + +\item{randomSample}{How many random samples from hyperparameter space to use} + +\item{device}{Which device to run analysis on, either 'cpu' or 'cuda', default: 'cpu'} + +\item{epochs}{Number of epochs to run, default: 10} + +\item{batch_size}{Size of batch, default: 1024} +} +\description{ +Creates settings for a ResNet model +} +\details{ +Model architecture from by https://arxiv.org/abs/2106.11959 +} From f569fb74670e66b39717e57e3f056a3d8ee03998 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Wed, 26 Jan 2022 11:21:25 +0100 Subject: [PATCH 035/140] dataset no longer used indices since train-test split is handled by subsets --- R/Dataset.R | 50 ++++++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/R/Dataset.R b/R/Dataset.R index a8d9574..0190879 100644 --- a/R/Dataset.R +++ b/R/Dataset.R @@ -1,38 +1,38 @@ +#' @export Dataset <- torch::dataset( name = 'Dataset', - initialize = function(data, labels, indices, numericalIndex) { + initialize = function(data, labels, numericalIndex) { - self$collate_fn <- sparseCollate + # self$collate_fn <- sparseCollate # add labels - self$target <- torch::torch_tensor(labels[indices]) + self$target <- torch::torch_tensor(labels) # Weight to add in loss function to positive class self$posWeight <- ((self$target==0)$sum()/self$target$sum())$item() # add features - dataCat <- data[indices,-numericalIndex] + dataCat <- data[,-numericalIndex] self$cat <- torch::torch_tensor(as.matrix(dataCat), dtype=torch::torch_float32()) - + + # comment out the sparse matrix for now, is really slow need to find # a better solution for converting it to dense before feeding to model # matrix <- as(dataCat, 'dgTMatrix') # convert to triplet sparse format # sparseIndices <- torch::torch_tensor(matrix(c(matrix@i + 1, matrix@j + 1), ncol=2), dtype = torch::torch_long()) # values <- torch::torch_tensor(matrix(c(matrix@x)), dtype = torch::torch_float32()) - # self$cat <- torch::torch_sparse_coo_tensor(indices=sparseIndices$t(), - # values=values$squeeze(), + # self$cat <- torch::torch_sparse_coo_tensor(indices=sparseIndices$t(), + # values=values$squeeze(), # dtype=torch::torch_float32())$coalesce() - self$num <- torch::torch_tensor(as.matrix(data[indices,numericalIndex, drop = F]), dtype=torch::torch_float32()) + self$num <- torch::torch_tensor(as.matrix(data[,numericalIndex, drop = F]), dtype=torch::torch_float32()) }, - - .getitem = function(item) { - return(list(cat = self$cat[item], - num = self$num[item,], + .getbatch = function(item) { + return(list(cat = self$cat[item], + num = self$num[item], target = self$target[item])) }, - .length = function() { - self$target$size()[[1]] # shape[1] + length(self$target) # shape[1] } ) @@ -43,21 +43,23 @@ sparseCollate <- function(batch) { if (inherits(elem, "torch_tensor")) { # temporary fix using a tryCatch until torch in R author adds # an is_sparse method or exposes tensor$layout - tryCatch(return(torch::torch_stack(batch,dim = 1)$to_dense()), - error=function(e) return(torch::torch_stack(batch, dim = 1))) - - # if (Reduce("*", elem$size()) > elem$numel()) { - # return(torch::torch_stack(batch,dim = 1)$to_dense()) - # } - # - # return(torch::torch_stack(batch, dim = 1)) + return (torch::torch_stack(batch, dim = 1)) + # tryCatch(return(torch::torch_stack(batch,dim = 1)$to_dense()), + # error=function(e) return(torch::torch_stack(batch, dim = 1))) } else if (is.list(elem)) { - # preserve names of elements named_seq <- seq_along(elem) names(named_seq) <- names(elem) lapply(named_seq, function(i) {sparseCollate(lapply(batch, function(x) x[[i]]))}) } -} \ No newline at end of file +} + + + + + + + + From 55f6ec82e965ea3ad9b18747a448be5aaf9c8d58 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Thu, 27 Jan 2022 11:55:58 +0100 Subject: [PATCH 036/140] add earlystop --- R/Estimator.R | 129 +++++++++++++++++++++----------------------------- 1 file changed, 54 insertions(+), 75 deletions(-) diff --git a/R/Estimator.R b/R/Estimator.R index 6e66200..ac2544f 100644 --- a/R/Estimator.R +++ b/R/Estimator.R @@ -1,33 +1,19 @@ -Estimator <- R6::R6Class('Estimator', +#' @export +Estimator <- R6::R6Class( + classname = 'Estimator', + lock_objects = FALSE, public = list( - device = NULL, - mode = NULL, - modelParameters = NULL, - epochs = NULL, - learningRate = NULL, - l2Norm = NULL, - batchSize = NULL, - resultsDir = NULL, - prefix = NULL, - previousEpochs = NULL, - optimizer = NULL, - criterion = NULL, - bestScore = NULL, - bestEpoch = NULL, - model = NULL, - earlyStopper = NULL, - posWeight = NULL, initialize = function(baseModel, modelParameters, fitParameters, optimizer=torch::optim_adam, criterion=torch::nn_bce_with_logits_loss, device='cpu', - patience=3){ + patience=NULL){ self$device <- device self$model <- do.call(baseModel, modelParameters) self$modelParameters <- modelParameters - + self$epochs <- self$itemOrDefaults(fitParameters, 'epochs', 10) self$learningRate <- self$itemOrDefaults(fitParameters,'learningRate', 1e-3) self$l2Norm <- self$itemOrDefaults(fitParameters, 'weightDecay', 1e-5) @@ -49,7 +35,12 @@ Estimator <- R6::R6Class('Estimator', weight_decay=self$l2Norm) self$criterion <- criterion(torch::torch_tensor(self$posWeight, device=self$device)) - self$earlyStopper <- EarlyStopping$new(patience=patience) + + if (!is.null(patience)) { + self$earlyStopper <- EarlyStopping$new(patience=patience) + } else { + self$earlyStopper <- FALSE + } self$bestScore <- NULL @@ -68,50 +59,58 @@ Estimator <- R6::R6Class('Estimator', batch_size = self$batchSize, shuffle = F) - modelStateDict <- list() - epoch <- list() + # modelStateDict <- list() + # epoch <- list() + times <- list() - lr <- c() + # lr <- c() for (epochI in 1:self$epochs) { # fit the model + startTime <- Sys.time() self$fitEpoch(dataloader) + endTime <- Sys.time() # predict on test data scores <- self$score(testDataloader) - + delta <- endTime - startTime currentEpoch <- epochI + self$previousEpochs - lr <- c(lr, self$optimizer$param_groups[[1]]$lr) - ParallelLogger::logInfo('Epochs: ', currentEpoch, ' | Val AUC: ', - round(scores$auc,3), ' | Val Loss: ', - round(scores$loss,3), ' | LR: ', - self$optimizer$param_groups[[1]]$lr) - valLosses <- c(valLosses, scores$loss) - valAUCs <- c(valAUCs, scores$auc) - self$earlyStopper$call(scores$auc) - if (self$earlyStopper$improved) { - # here it saves the results to lists rather than files - modelStateDict[[epochI]] <- self$model$state_dict() - epoch[[epochI]] <- currentEpoch - } - if (self$earlyStopper$earlyStop) { - ParallelLogger::logInfo('Early stopping, validation AUC stopped improving') - self$finishFit(valAUCs, modelStateDict, valLosses, epoch) - return(invisible(self)) - } + # lr <- c(lr, self$optimizer$param_groups[[1]]$lr) + ParallelLogger::logInfo('Epochs: ', currentEpoch, + ' | Val AUC: ', round(scores$auc,3), + ' | Val Loss: ', round(scores$loss,3), + ' | Time: ', round(delta, 3), ' ', + units(delta)) + + # valLosses <- c(valLosses, scores$loss) + # valAUCs <- c(valAUCs, scores$auc) + times <- c(times, round(delta, 3)) + # if (self$earlyStopper){ + # self$earlyStopper$call(scores$auc) + # if (self$earlyStopper$improved) { + # # here it saves the results to lists rather than files + # modelStateDict[[epochI]] <- self$model$state_dict() + # epoch[[epochI]] <- currentEpoch + # } + # if (self$earlyStopper$earlyStop) { + # ParallelLogger::logInfo('Early stopping, validation AUC stopped improving') + # ParallelLogger::logInfo('Average time per epoch was: ', mean(as.numeric(times)), ' ' , units(delta)) + # self$finishFit(valAUCs, modelStateDict, valLosses, epoch) + # return(invisible(self)) + # } + # } else { + # modelStateDict[[epochI]] <- self$model$state_dict() + # epoch[[epochI]] <- currentEpoch + # } } - self$finishFit(valAUCs, modelStateDict, valLosses, epoch) + ParallelLogger::logInfo('Average time per epoch was: ', mean(as.numeric(times)), ' ' , units(delta)) + # self$finishFit(valAUCs, modelStateDict, valLosses, epoch) invisible(self) }, # trains for one epoch fitEpoch = function(dataloader){ - t <- Sys.time() - batch_loss <- 0 - i <- 1 - self$model$train() - coro::loop(for (b in dataloader) { self$optimizer$zero_grad() cat <- b[[1]]$to(device=self$device) @@ -121,20 +120,7 @@ Estimator <- R6::R6Class('Estimator', loss <- self$criterion(out, target) loss$backward() self$optimizer$step() - - batch_loss = batch_loss + loss - - if (i %% 10 == 0) { - elapsed_time <- Sys.time() - t - ParallelLogger::logInfo('Loss: ', round((batch_loss/1)$item(), 3), ' | Time: ', - round(elapsed_time,digits = 2), units(elapsed_time)) - t <- Sys.time() - batch_loss = 0 - } - - - i <- i + 1 - }) + }) }, @@ -163,8 +149,7 @@ Estimator <- R6::R6Class('Estimator', fitWholeTrainingSet = function(dataset) { dataloader <- torch::dataloader(dataset, batch_size=self$batchSize, - shuffle=TRUE, - drop_last=FALSE) + shuffle=TRUE) for (epoch in 1:self$epochs) { self$fitEpoch(dataloader) } @@ -208,8 +193,7 @@ Estimator <- R6::R6Class('Estimator', predictProba = function(dataset) { dataloader <- torch::dataloader(dataset, batch_size = self$batchSize, - shuffle=F, - collate_fn = dataset$collate_fn) + shuffle=F) torch::with_no_grad({ predictions <- c() self$model$eval() @@ -252,15 +236,10 @@ Estimator <- R6::R6Class('Estimator', ) ) -EarlyStopping <- R6::R6Class('EarlyStopping', +EarlyStopping <- R6::R6Class( + classname = 'EarlyStopping', + lock_objects = FALSE, public = list( - patience = NULL, - delta = NULL, - counter = NULL, - bestScore = NULL, - earlyStop = NULL, - improved = NULL, - previousScore = NULL, initialize = function(patience=3, delta=0) { self$patience <- patience self$counter <- 0 From 33a9e42e6c380796362e654fd49fc41d9e1af86e Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Thu, 27 Jan 2022 11:56:16 +0100 Subject: [PATCH 037/140] add early stop --- R/Estimator.R | 52 ++++++++++++++++++++++----------------------------- 1 file changed, 22 insertions(+), 30 deletions(-) diff --git a/R/Estimator.R b/R/Estimator.R index ac2544f..84aafbc 100644 --- a/R/Estimator.R +++ b/R/Estimator.R @@ -20,11 +20,6 @@ Estimator <- R6::R6Class( self$batchSize <- self$itemOrDefaults(fitParameters, 'batchSize', 1024) self$posWeight <- self$itemOrDefaults(fitParameters, 'posWeight', 1) - # don´t save checkpoints unless you get a resultDir - self$resultsDir <- self$itemOrDefaults(fitParameters, 'resultsDir', NULL) - if (!is.null(self$resultsDir)) { - dir.create(self$resultsDir, recursive=TRUE, showWarnings=FALSE) - } self$prefix <- self$itemOrDefaults(fitParameters, 'prefix', self$model$name) self$previousEpochs <- self$itemOrDefaults(fitParameters, 'previousEpochs', 0) @@ -42,7 +37,6 @@ Estimator <- R6::R6Class( self$earlyStopper <- FALSE } - self$bestScore <- NULL self$bestEpoch <- NULL }, @@ -59,11 +53,10 @@ Estimator <- R6::R6Class( batch_size = self$batchSize, shuffle = F) - # modelStateDict <- list() - # epoch <- list() + modelStateDict <- list() + epoch <- list() times <- list() - # lr <- c() for (epochI in 1:self$epochs) { # fit the model @@ -75,36 +68,35 @@ Estimator <- R6::R6Class( scores <- self$score(testDataloader) delta <- endTime - startTime currentEpoch <- epochI + self$previousEpochs - # lr <- c(lr, self$optimizer$param_groups[[1]]$lr) ParallelLogger::logInfo('Epochs: ', currentEpoch, ' | Val AUC: ', round(scores$auc,3), ' | Val Loss: ', round(scores$loss,3), ' | Time: ', round(delta, 3), ' ', units(delta)) - # valLosses <- c(valLosses, scores$loss) - # valAUCs <- c(valAUCs, scores$auc) + valLosses <- c(valLosses, scores$loss) + valAUCs <- c(valAUCs, scores$auc) times <- c(times, round(delta, 3)) - # if (self$earlyStopper){ - # self$earlyStopper$call(scores$auc) - # if (self$earlyStopper$improved) { - # # here it saves the results to lists rather than files - # modelStateDict[[epochI]] <- self$model$state_dict() - # epoch[[epochI]] <- currentEpoch - # } - # if (self$earlyStopper$earlyStop) { - # ParallelLogger::logInfo('Early stopping, validation AUC stopped improving') - # ParallelLogger::logInfo('Average time per epoch was: ', mean(as.numeric(times)), ' ' , units(delta)) - # self$finishFit(valAUCs, modelStateDict, valLosses, epoch) - # return(invisible(self)) - # } - # } else { - # modelStateDict[[epochI]] <- self$model$state_dict() - # epoch[[epochI]] <- currentEpoch - # } + if (self$earlyStopper){ + self$earlyStopper$call(scores$auc) + if (self$earlyStopper$improved) { + # here it saves the results to lists rather than files + modelStateDict[[epochI]] <- self$model$state_dict() + epoch[[epochI]] <- currentEpoch + } + if (self$earlyStopper$earlyStop) { + ParallelLogger::logInfo('Early stopping, validation AUC stopped improving') + ParallelLogger::logInfo('Average time per epoch was: ', mean(as.numeric(times)), ' ' , units(delta)) + self$finishFit(valAUCs, modelStateDict, valLosses, epoch) + return(invisible(self)) + } + } else { + modelStateDict[[epochI]] <- self$model$state_dict() + epoch[[epochI]] <- currentEpoch + } } ParallelLogger::logInfo('Average time per epoch was: ', mean(as.numeric(times)), ' ' , units(delta)) - # self$finishFit(valAUCs, modelStateDict, valLosses, epoch) + self$finishFit(valAUCs, modelStateDict, valLosses, epoch) invisible(self) }, From c40e903d9587593ed83e6215e40885ed8c2ba847 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Thu, 27 Jan 2022 11:57:06 +0100 Subject: [PATCH 038/140] use subset instead of dataset --- R/Predict.R | 5 +++-- R/ResNet.R | 39 +++++++++++++++++---------------------- 2 files changed, 20 insertions(+), 24 deletions(-) diff --git a/R/Predict.R b/R/Predict.R index 625e150..e703627 100644 --- a/R/Predict.R +++ b/R/Predict.R @@ -252,8 +252,9 @@ predict_deepEstimator <- function(plpModel, population, plpData, ...) { sparseMatrix <- toSparseMDeep(plpData, population) indices <- population$rowId numericalIndex <- sparseMatrix$map$newCovariateId[sparseMatrix$map$oldCovariateId==1002] - dataset <- Dataset(sparseMatrix$data, population$outcomeCount,indices=indices, - numericalIndex = numericalIndex) + dataset <- torch::dataset_subset(Dataset(sparseMatrix$data, population$outcomeCount, + numericalIndex = numericalIndex), indices) + prediction <- plpModel$model$predictProba(dataset) prediction <- population %>% mutate(value=prediction) return(prediction) diff --git a/R/ResNet.R b/R/ResNet.R index ffdab97..3e21307 100644 --- a/R/ResNet.R +++ b/R/ResNet.R @@ -110,8 +110,8 @@ fitResNet <- function(population, plpData, param, outcomeId, cohortId, ...) { # now train the final model bestInd <- which.max(abs(unlist(scores)-0.5))[1] param.best <- param[bestInd,] - uniqueEpochs <- unique(hyperSummary$bestEpochs[[bestInd]]) - param.best$epochs <- uniqueEpochs[which.max(tabulate(match(hyperSummary$bestEpochs[[bestInd]], uniqueEpochs)))] + param.best$epochs <- floor(mean(hyperSummary$bestEpochs[[bestInd]]) + 0.5) + outLoc <- tempfile(pattern = 'resNet') outLoc <- file.path(outLoc, paste0('finalModel')) param.best$resultsDir <- outLoc @@ -174,19 +174,15 @@ trainResNet <- function(sparseMatrix, population,...,train=T) { ParallelLogger::logInfo(paste('Training deep neural network using Torch with ',length(index_vect),' fold CV')) foldAuc <- c() foldEpochs <- c() - for(index in 1:length(index_vect)){ + dataset <- Dataset(sparseMatrix$data, population$outcomeCount, + numericalIndex = numericalIndex) + fitParams['posWeight'] <- dataset$posWeight + for(index in 1:length(index_vect)) { ParallelLogger::logInfo(paste('Fold ',index, ' -- with ', sum(population$indexes!=index & population$indexes > 0),'train rows')) testIndices <- population$rowId[population$indexes==index] trainIndices <- population$rowId[(population$indexes!=index) & (population$indexes > 0)] - trainDataset <- Dataset(sparseMatrix$data[population$rowId,], - population$outcomeCount, - indices= population$rowId%in%trainIndices, - numericalIndex=numericalIndex) - testDataset <- Dataset(sparseMatrix$data[population$rowId,], - population$outcomeCount, - indices = population$rowId%in%testIndices, - numericalIndex = numericalIndex) - fitParams['posWeight'] <- trainDataset$posWeight + trainDataset <- torch::dataset_subset(dataset, trainIndices) + testDataset <- torch::dataset_subset(dataset, testIndices) estimator <- Estimator$new(baseModel=ResNet, modelParameters=modelParam, fitParameters=fitParams, @@ -204,18 +200,18 @@ trainResNet <- function(sparseMatrix, population,...,train=T) { } else { ParallelLogger::logInfo('Training deep neural network using Torch on whole training set') - fitParams$resultsDir <- param$resultsDir - estimator <- Estimator$new(baseModel = ResNet, - modelParameters = modelParam, - fitParameters = fitParams, - device=param$device) - trainIndices <- population$rowId[population$indexes > 0] - trainDataset <- Dataset(sparseMatrix$data[population$rowId,], + dataset <- Dataset(sparseMatrix$data[population$rowId,], population$outcomeCount, - indices=population$rowId%in%trainIndices, numericalIndex=numericalIndex) + trainDataset <- torch::dataset_subset(dataset, trainIndices) + + fitParams['posWeight'] <- trainDataset$posWeight + estimator <- Estimator$new(baseModel = ResNet, + modelParameters = modelParam, + fitParameters = fitParams, + device=param$device) estimator$fitWholeTrainingSet(trainDataset) @@ -241,14 +237,12 @@ trainResNet <- function(sparseMatrix, population,...,train=T) { ResLayer <- torch::nn_module( name='ResLayer', - initialize=function(sizeHidden, resHidden, normalization, activation, hiddenDropout=NULL, residualDropout=NULL){ self$norm <- normalization(sizeHidden) self$linear0 <- torch::nn_linear(sizeHidden, resHidden) self$linear1 <- torch::nn_linear(resHidden, sizeHidden) - self$activation <- activation if (!is.null(hiddenDropout)){ self$hiddenDropout <- torch::nn_dropout(p=hiddenDropout) } @@ -278,6 +272,7 @@ ResLayer <- torch::nn_module( } ) +#' @export ResNet <- torch::nn_module( name='ResNet', From 344e5693bc59cfe030b218c14837f5bc91decb3b Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Thu, 27 Jan 2022 11:58:04 +0100 Subject: [PATCH 039/140] example --- extras/example.R | 123 +++++++++-------------------------------------- 1 file changed, 23 insertions(+), 100 deletions(-) diff --git a/extras/example.R b/extras/example.R index 5626dc9..fb193de 100644 --- a/extras/example.R +++ b/extras/example.R @@ -3,98 +3,30 @@ rm(list = ls()) library(FeatureExtraction) library(PatientLevelPrediction) library(DeepPatientLevelPrediction) -connectionDetails <- Eunomia::getEunomiaConnectionDetails() -Eunomia::createCohorts(connectionDetails) temp <- F -covSet <- createCovariateSettings(useDemographicsGender = T, - useDemographicsAge = T, - useDemographicsRace = T, - useDemographicsEthnicity = T, - useDemographicsAgeGroup = T, - useConditionGroupEraLongTerm = T, - useDrugEraStartLongTerm = T, - endDays = -1 +data(plpDataSimulationProfile) +sampleSize <- 2000 +plpData <- simulatePlpData( + plpDataSimulationProfile, + n = sampleSize ) - -if(temp){ -covSetT <- createTemporalSequenceCovariateSettings(useDemographicsGender = T, - useDemographicsAge = T, - useDemographicsRace = T, - useDemographicsEthnicity = T, - useDemographicsAgeGroup = T, - useConditionEraGroupStart = T, - useDrugEraStart = T, - timePart = 'month', - timeInterval = 1, - sequenceEndDay = -1, - sequenceStartDay = -365*5) -} - -plpData <- PatientLevelPrediction::getPlpData(connectionDetails = connectionDetails, - cdmDatabaseSchema = "main", - cohortId = 1, - outcomeIds = 3, - cohortDatabaseSchema = "main", - cohortTable = "cohort", - outcomeDatabaseSchema = "main", - outcomeTable = "cohort", - firstExposureOnly = T, - washoutPeriod = 365, - covariateSettings = covSet +population <- createStudyPopulation( + plpData, + outcomeId = 2, + binary = TRUE, + firstExposureOnly = FALSE, + washoutPeriod = 0, + removeSubjectsWithPriorOutcome = FALSE, + priorOutcomeLookback = 99999, + requireTimeAtRisk = FALSE, + minTimeAtRisk = 0, + riskWindowStart = 0, + riskWindowEnd = 365, + verbosity = "INFO" ) -if(temp){ -plpDataT <- PatientLevelPrediction::getPlpData(connectionDetails = connectionDetails, - cdmDatabaseSchema = "main", - cohortId = 1, - outcomeIds = 3, - cohortDatabaseSchema = "main", - cohortTable = "cohort", - outcomeDatabaseSchema = "main", - outcomeTable = "cohort", - firstExposureOnly = T, - washoutPeriod = 365, - covariateSettings = covSetT - ) -} - - -population <- PatientLevelPrediction::createStudyPopulation(plpData = plpData, - outcomeId = 3, - requireTimeAtRisk = F, - riskWindowStart = 1, - riskWindowEnd = 365) - -##sparseMat <- toSparseRTorch(plpData, population, map=NULL, temporal=T) -if(F){ -x <- toSparseMDeep(plpData ,population, - map=NULL, - temporal=F) - -x2 <- toSparseMDeep(plpDataT ,population, - map=NULL, - temporal=T) -} - -# code to train models -deepset <- setDeepNNTorch(units=list(c(128, 64), 128), layer_dropout=c(0.2), - lr =c(1e-4), decay=c(1e-5), outcome_weight = c(1.0), batch_size = c(100), - epochs= c(5), seed=NULL ) - - -#debug(fitDeepNNTorch) -# res <- runPlp(population = population, -# plpData = plpData, -# nfold = 3, -# modelSettings = deepset, -# savePlpData = F, -# savePlpResult = F, -# savePlpPlots = F, -# saveEvaluation = F) -# - resSet <- setResNet(numLayers=5, sizeHidden=256, hiddenFactor=2, residualDropout=c(0.1), @@ -104,24 +36,15 @@ resSet <- setResNet(numLayers=5, sizeHidden=256, hiddenFactor=2, learningRate=c(3e-4), seed=42, hyperParamSearch='random', randomSample=1, device='cuda:0', - batchSize=128, - epochs=10) - + batchSize=512, + epochs=1) +debug(trainResNet) res2 <- runPlp(population = population, plpData = plpData, nfold = 3, - modelSettings = resSet, + modelSettings = resSet, savePlpData = F, savePlpResult = F, savePlpPlots = F, - saveEvaluation = F) - -##predict.customLibrary(libraryName, predictionFunction, inputList){ -## libraryName <- 'PatientLevelPrediction' -## predictionFunction <- "createStudyPopulation" -## predictFun <- get(predictionFunction, envir = rlang::search_envs()[grep(paste0('package:', libraryName), search())][[1]]) -## -## prediction <- do.call(predictFun, inputList) -## return(prediction) -##} + saveEvaluation = F) \ No newline at end of file From 70aae25c2e5a3466552516560ede01faf71cd0e2 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Thu, 27 Jan 2022 12:01:41 +0100 Subject: [PATCH 040/140] deleted man files --- man/predict.deepEstimator.Rd | 11 ----------- man/predict_deepEstimator.Rd | 11 ----------- 2 files changed, 22 deletions(-) delete mode 100644 man/predict.deepEstimator.Rd delete mode 100644 man/predict_deepEstimator.Rd diff --git a/man/predict.deepEstimator.Rd b/man/predict.deepEstimator.Rd deleted file mode 100644 index f5600f8..0000000 --- a/man/predict.deepEstimator.Rd +++ /dev/null @@ -1,11 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/Predict.R -\name{predict.deepEstimator} -\alias{predict.deepEstimator} -\title{predict.deepEstimator} -\usage{ -predict.deepEstimator(x) -} -\description{ -prediction function for models using estimator class -} diff --git a/man/predict_deepEstimator.Rd b/man/predict_deepEstimator.Rd deleted file mode 100644 index 1ddfbd2..0000000 --- a/man/predict_deepEstimator.Rd +++ /dev/null @@ -1,11 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/Predict.R -\name{predict_deepEstimator} -\alias{predict_deepEstimator} -\title{predict_deepEstimator} -\usage{ -predict_deepEstimator(x) -} -\description{ -prediction function for models using estimator class -} From ac54b867034eb5c489a082d8d076ba69c364ae7f Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Fri, 4 Feb 2022 17:06:07 +0100 Subject: [PATCH 041/140] Speed improvements dataset --- R/Dataset.R | 15 ++++----------- R/ResNet_plp5.R | 43 +++++++++++++++++++------------------------ 2 files changed, 23 insertions(+), 35 deletions(-) diff --git a/R/Dataset.R b/R/Dataset.R index a82557d..723c87a 100644 --- a/R/Dataset.R +++ b/R/Dataset.R @@ -1,9 +1,7 @@ #' @export Dataset_plp5 <- torch::dataset( name = 'Dataset', - initialize = function(data, labels = NULL, numericalIndex = NULL) { - git # determine numeric if(is.null(numericalIndex)){ colBin <- apply(data, 2, function(x) sum(x==1 | x==0)) @@ -21,7 +19,6 @@ Dataset_plp5 <- torch::dataset( self$target <- torch::torch_tensor(rep(0, nrow(data))) } - # Weight to add in loss function to positive class self$posWeight <- ((self$target==0)$sum()/self$target$sum())$item() @@ -46,14 +43,10 @@ Dataset_plp5 <- torch::dataset( ) }, - .getitem = function(item) { - return( - list( - cat = self$cat[item], - num = self$num[item,], - target = self$target[item] - ) - ) + .getbatch = function(item) { + return(list(cat = self$cat[item], + num = self$num[item], + target = self$target[item])) }, .length = function() { diff --git a/R/ResNet_plp5.R b/R/ResNet_plp5.R index 94692cd..05fa737 100644 --- a/R/ResNet_plp5.R +++ b/R/ResNet_plp5.R @@ -302,19 +302,15 @@ gridCvDeep <- function( fold <- labels$index ParallelLogger::logInfo(paste0('Max fold: ', max(fold))) + dataset <- Dataset_plp5(matrixData, labels$outcomeCount) + modelParams$cat_features <- dataset$cat$shape[2] + modelParams$num_features <- dataset$num$shape[2] + for( i in 1:max(fold)){ ParallelLogger::logInfo(paste0('Fold ',i)) - trainDataset <- Dataset_plp5( - matrixData[fold != i,], - labels$outcomeCount[fold != i] - ) - testDataset <- Dataset_plp5( - matrixData[fold == i,], - labels$outcomeCount[fold == i], - trainDataset$getNumericalIndex - ) - + trainDataset <- torch::dataset_subset(dataset, indices=which(fold!=i)) + testDataset <- torch::dataset_subset(dataset, indices=which(fold==i)) fitParams['posWeight'] <- trainDataset$posWeight estimator <- Estimator$new( @@ -365,32 +361,34 @@ gridCvDeep <- function( modelParamNames <- c("numLayers", "sizeHidden", "hiddenFactor", "residualDropout", "hiddenDropout", "sizeEmbedding") modelParams <- finalParam[modelParamNames] - modelParams$n_features <- n_features fitParams <- finalParam[c("weightDecay", "learningRate")] fitParams$epochs <- epochs fitParams$batchSize <- batchSize - fitParams$resultsDir <- modelLocation # remove this? # create the dir if(!dir.exists(file.path(modelLocation))){ dir.create(file.path(modelLocation), recursive = T) } + trainDataset <- Dataset_plp5( + matrixData, + labels$outcomeCount + ) + modelParams$cat_features <- trainDataset$cat$shape[2] + modelParams$num_features <- trainDataset$num$shape[2] + + estimator <- Estimator$new( baseModel = ResNet, modelParameters = modelParams, fitParameters = fitParams, device = device - ) - - trainDataset <- Dataset_plp5( - matrixData, - labels$outcomeCount - ) + ) numericalIndex <- trainDataset$getNumericalIndex estimator$fitWholeTrainingSet(trainDataset) + ParallelLogger::logInfo("Calculating predictions on all train data...") prediction <- predictDeepEstimator( plpModel = estimator, @@ -471,15 +469,12 @@ ResLayer <- torch::nn_module( ResNet <- torch::nn_module( name='ResNet', - initialize=function(n_features, sizeEmbedding, sizeHidden, numLayers, + initialize=function(cat_features, num_features, sizeEmbedding, sizeHidden, numLayers, hiddenFactor, activation=torch::nn_relu, normalization=torch::nn_batch_norm1d, hiddenDropout=NULL, residualDropout=NULL, d_out=1) { - # n_features - 1 because only binary features are embedded (not Age) - # ages is concatenated with the embedding output - # TODO need to extend to support other numerical features - self$embedding <- torch::nn_linear(n_features - 1, sizeEmbedding, bias=F) - self$first_layer <- torch::nn_linear(sizeEmbedding + 1, sizeHidden) + self$embedding <- torch::nn_linear(cat_features, sizeEmbedding, bias=F) + self$first_layer <- torch::nn_linear(sizeEmbedding + num_features, sizeHidden) resHidden <- sizeHidden * hiddenFactor From 3aec4772e51f587b4bcb37f43c0619c365227171 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Fri, 4 Feb 2022 17:06:41 +0100 Subject: [PATCH 042/140] save deepEstimatorModel --- R/Estimator.R | 15 +++++++++++---- R/ResNet_plp5.R | 26 ++++++++++++++++++-------- 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/R/Estimator.R b/R/Estimator.R index c411369..35565c2 100644 --- a/R/Estimator.R +++ b/R/Estimator.R @@ -13,6 +13,7 @@ Estimator <- R6::R6Class( self$device <- device self$model <- do.call(baseModel, modelParameters) self$modelParameters <- modelParameters + self$fitParameters <- fitParameters self$epochs <- self$itemOrDefaults(fitParameters, 'epochs', 10) self$learningRate <- self$itemOrDefaults(fitParameters,'learningRate', 1e-3) @@ -145,15 +146,21 @@ Estimator <- R6::R6Class( for (epoch in 1:self$epochs) { self$fitEpoch(dataloader) } + + }, + + # save model and those parameters needed to reconstruct it + save = function(path, name) { + savePath <- file.path(path, name) torch::torch_save(list(modelStateDict=self$model$state_dict(), modelParameters=self$modelParameters, fitParameters=self$fitParameters, epoch=self$epochs), - file.path(self$resultsDir, paste0( - self$prefix, '_epochs:', self$epochs) - )) + savePath + ) + return(savePath) - }, + }, # calculates loss and auc after training for one epoch score = function(dataloader){ diff --git a/R/ResNet_plp5.R b/R/ResNet_plp5.R index 05fa737..a7bd562 100644 --- a/R/ResNet_plp5.R +++ b/R/ResNet_plp5.R @@ -212,7 +212,7 @@ fitResNet_plp5 <- function( class(result) <- "plpModel" attr(result, "predictionFunction") <- "predictDeepEstimator" attr(result, "modelType") <- "binary" - attr(result, "saveType") <- attr(param, 'saveType') + attr(result, "saveType") <- attr(param, 'settings')$saveType return(result) } @@ -252,8 +252,21 @@ predictDeepEstimator <- function( # get predictions prediction <- cohort - prediction$value <- plpModel$model$predictProba(data) + if(is.character(plpModel$model)){ + model <- torch::torch_load(file.path(plpModel$model, 'DeepEstimatorModel.pt'), device='cpu') + estimator <- Estimator$new( + baseModel = plpModel$settings$modelSettings$model, + modelParameters = model$modelParameters, + fitParameters = model$fitParameters, + device = plpModel$settings$modelSettings$extraSettings$device + ) + prediction$value <- estimator$predictProba(data) + } else { + prediction$value <- plpModel$model$predictProba(data) + } + + attr(prediction, "metaData")$modelType <- attr(plpModel, 'modelType') return(prediction) @@ -273,13 +286,11 @@ gridCvDeep <- function( ){ - ParallelLogger::logInfo(paste0("Rnning CV for ",modelName," model")) + ParallelLogger::logInfo(paste0("Running CV for ",modelName," model")) ########################################################################### - n_features <- ncol(matrixData) - gridSearchPredictons <- list() length(gridSearchPredictons) <- length(paramSearch) @@ -289,7 +300,6 @@ gridCvDeep <- function( modelParamNames <- c("numLayers", "sizeHidden", "hiddenFactor", "residualDropout", "hiddenDropout", "sizeEmbedding") modelParams <- paramSearch[[gridId]][modelParamNames] - modelParams$n_features <- n_features fitParams <- paramSearch[[gridId]][c("weightDecay", "learningRate")] fitParams$epochs <- epochs @@ -411,11 +421,11 @@ gridCvDeep <- function( # save torch code here - + estimatorFile <- estimator$save(modelLocation, 'DeepEstimatorModel.pt') return( list( - estimator = estimator, + estimator = modelLocation, prediction = prediction, finalParam = finalParam, paramGridSearch = paramGridSearch, From c989dd408c0fcd0b541f54a0e9354bd5732bb3ab Mon Sep 17 00:00:00 2001 From: ted9219 Date: Mon, 7 Feb 2022 22:02:29 +0900 Subject: [PATCH 043/140] modify DeepNNTorch for PLP v5 --- NAMESPACE | 3 + R/Dataset.R | 3 + R/DeepNNTorch_plp5.R | 365 ++++++++++++++++++++++++++++++++++++++++++ man/setDeepNNTorch.Rd | 3 +- 4 files changed, 373 insertions(+), 1 deletion(-) create mode 100644 R/DeepNNTorch_plp5.R diff --git a/NAMESPACE b/NAMESPACE index ff740a3..73edb48 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -1,5 +1,8 @@ # Generated by roxygen2: do not edit by hand +export(Dataset_plp5) +export(Estimator) +export(ResNet) export(fitDeepNNTorch) export(fitResNet) export(fitResNet_plp5) diff --git a/R/Dataset.R b/R/Dataset.R index 723c87a..d1ebb06 100644 --- a/R/Dataset.R +++ b/R/Dataset.R @@ -22,6 +22,9 @@ Dataset_plp5 <- torch::dataset( # Weight to add in loss function to positive class self$posWeight <- ((self$target==0)$sum()/self$target$sum())$item() + # for DeepNNTorch + self$all <- torch::torch_tensor(as.matrix(data), dtype = torch::torch_float32()) + # add features dataCat <- data[, !numericalIndex] self$cat <- torch::torch_tensor(as.matrix(dataCat), dtype=torch::torch_float32()) diff --git a/R/DeepNNTorch_plp5.R b/R/DeepNNTorch_plp5.R new file mode 100644 index 0000000..3c8f6e3 --- /dev/null +++ b/R/DeepNNTorch_plp5.R @@ -0,0 +1,365 @@ +setDeepNNTorch <- function( + units=list(c(128, 64), 128), + layer_dropout=c(0.2), + lr =c(1e-4), + decay=c(1e-5), + outcome_weight = c(1.0), + batch_size = c(10000), + epochs= c(100), + device = 'cpu', + seed=NULL){ + + # ensure_installed("torch") + + param <- expand.grid(units=units, + layer_dropout=layer_dropout, + lr =lr, decay=decay, outcome_weight=outcome_weight,epochs= epochs, + seed=ifelse(is.null(seed),'NULL', seed)) + + param$units1=unlist(lapply(param$units, function(x) x[1])) + param$units2=unlist(lapply(param$units, function(x) x[2])) + param$units3=unlist(lapply(param$units, function(x) x[3])) + + attr(param, 'settings') <- list( + selectorType = "byPid", # is this correct? + crossValidationInPrior = T, + modelType = 'DeepNN', + seed = seed[1], + name = "DeepNNTorch", + units = units, + layer_dropout = layer_dropout, + lr = lr, + decay = decay, + outcome_weight = outcome_weight, + batch_size = batch_size, + device = device, + epochs = epochs + ) + + attr(param, 'modelType') <- 'binary' + attr(param, 'saveType') <- 'RtoJson' + + result <- list(fitFunction='fitDeepNNTorch', + param=param) + + class(result) <- 'modelSettings' + + return(result) + +} + +#' @export +fitDeepNNTorch <- function( + trainData, + param, + search='grid', + analysisId, + ...){ + + start <- Sys.time() + + # check covariateData + if (!FeatureExtraction::isCovariateData(plpData$covariateData)){ + stop('DeepNNTorch requires correct covariateData') + } + + # get the settings from the param + settings <- attr(param, 'settings') + + if(!is.null(trainData$folds)){ + trainData$labels <- merge(trainData$labels, trainData$folds, by = 'rowId') + } + + mappedData <- PatientLevelPrediction::toSparseM( + plpData = trainData, + map = NULL + ) + + matrixData <- mappedData$dataMatrix + labels <- mappedData$labels + covariateRef <- mappedData$covariateRef + + outLoc <- PatientLevelPrediction:::createTempModelLoc() #export + + cvResult <- do.call( + what = gridCvDeepNN, + args = list( + matrixData = matrixData, + labels = labels, + seed = settings$seed, + modelName = settings$name, + device = settings$device, + batchSize = settings$batchSize, + epochs = settings$epochs, + modelLocation = outLoc, + paramSearch = param + ) + ) + + hyperSummary <- do.call(rbind, lapply(cvResult$paramGridSearch, function(x) x$hyperSummary)) + + prediction <- cvResult$prediction + + incs <- rep(1, nrow(covariateRef)) + covariateRef$included <- incs + covariateRef$covariateValue <- 0 + + comp <- start - Sys.time() + + result <- list( + model = cvResult$estimator, #file.path(outLoc), + + prediction = prediction, + + settings = list( + plpDataSettings = attr(trainData, "metaData")$plpDataSettings, + covariateSettings = attr(trainData, "metaData")$covariateSettings, + populationSettings = attr(trainData, "metaData")$populationSettings, + featureEngineering = attr(trainData$covariateData, "metaData")$featureEngineering, + tidyCovariates = attr(trainData$covariateData, "metaData")$tidyCovariateDataSettings, + requireDenseMatrix = F, + modelSettings = list( + model = settings$name, + param = param, + finalModelParameters = cvResult$finalParam, + extraSettings = attr(param, 'settings') + ), + splitSettings = attr(trainData, "metaData")$splitSettings, + sampleSettings = attr(trainData, "metaData")$sampleSettings + ), + + trainDetails = list( + analysisId = analysisId, + cdmDatabaseSchema = attr(trainData, "metaData")$cdmDatabaseSchema, + outcomeId = attr(trainData, "metaData")$outcomeId, + cohortId = attr(trainData, "metaData")$cohortId, + attrition = attr(trainData, "metaData")$attrition, + trainingTime = comp, + trainingDate = Sys.Date(), + hyperParamSearch = hyperSummary + ), + + covariateImportance = covariateRef + ) + + class(result) <- "plpModel" + attr(result, "predictionFunction") <- "predictDeepNN" + attr(result, "modelType") <- "binary" + attr(result, "saveType") <- attr(param, 'settings')$saveType + + return(result) +} + + +gridCvDeepNN <- function( + matrixData, + labels, + seed, + modelName, + device, + batchSize, + epochs, + modelLocation, + paramSearch +){ + + + ParallelLogger::logInfo(paste0("Running CV for ",modelName," model")) + + ########################################################################### + + + gridSearchPredictons <- list() + length(gridSearchPredictons) <- nrow(paramSearch) + + for(gridId in 1:nrow(paramSearch)){ + + # get the params + modelParamNames <- c("layer_dropout", "lr", "decay", "outcome_weight", "epochs", "units1", "units2", "units3") + modelParams <- paramSearch[gridId, modelParamNames] + + fitParams <- paramSearch[gridId, c("lr", "decay")] + fitParams$epochs <- epochs + fitParams$batchSize <- batchSize + + + # initiate prediction + prediction <- c() + + fold <- labels$index + ParallelLogger::logInfo(paste0('Max fold: ', max(fold))) + + dataset <- Dataset_plp5(matrixData, labels$outcomeCount) + # modelParams$cat_features <- dataset$cat$shape[2] + # modelParams$num_features <- dataset$num$shape[2] + + for(i in 1:max(fold)){ + + if(is.na(modelParams$units2)){ + model <- singleLayerNN(inputN = ncol(matrixData), + layer1 = modelParams$units1, + outputN = 2, + layer_dropout = modelParams$layer_dropout) + + } else if(is.na(modelParams$units3)){ + model <- doubleLayerNN(inputN = ncol(matrixData), + layer1 = modelParams$units1, + layer2 = modelParams$units2, + outputN = 2, + layer_dropout = modelParams$layer_dropout) + } else{ + model <- tripleLayerNN(inputN = ncol(matrixData), + layer1 = modelParams$units1, + layer2 = modelParams$units2, + layer3 = modelParams$units3, + outputN = 2, + layer_dropout = modelParams$layer_dropout) + } + + criterion = torch::nn_bce_loss() #Binary crossentropy only + optimizer = torch::optim_adam(model$parameters, lr = fitParams$lr) + + # Need earlyStopping + # Need setting decay + + ParallelLogger::logInfo(paste0('Fold ',i)) + trainDataset <- torch::dataset_subset(dataset, indices=which(fold!=i)) + testDataset <- torch::dataset_subset(dataset, indices=which(fold==i)) + + # batches <- split(trainDataset, ceiling(seq_along(trainDataset)/batch_size)) + + for(j in 1:epochs){ + # for(batchRowIds in batches){ + optimizer$zero_grad() + y_pred = model(trainDataset$dataset$all[trainDataset$indices]) + loss = criterion(y_pred[,1], trainDataset$dataset$target[trainDataset$indices]) + loss$backward() + optimizer$step() + + if(j%%1 == 0){ + cat("Epoch:", j, "out of ", epochs , ": Loss:", loss$item(), "\n") + } + # } + } + model$eval() + + ParallelLogger::logInfo("Calculating predictions on left out fold set...") + + pred <- model(testDataset$dataset$all[testDataset$indices]) + predictionTable <- labels[labels$index == i,] + predictionTable$value <- as.array(pred$to())[,1] + + if(!'plpModel' %in% class(model)){ + model <- list(model = model) + attr(model, 'modelType') <- 'binary' + } + attr(predictionTable, "metaData")$modelType <- attr(model, 'modelType') + + prediction <- rbind(prediction, predictionTable) + + } + gridSearchPredictons[[gridId]] <- list( + prediction = prediction, + param = paramSearch[gridId,] + ) + } + + + # get best para (this could be modified to enable any metric instead of AUC, just need metric input in function) + + paramGridSearch <- lapply(gridSearchPredictons, function(x){do.call(PatientLevelPrediction:::computeGridPerformance, x)}) # cvAUCmean, cvAUC, param + + optimalParamInd <- which.max(unlist(lapply(paramGridSearch, function(x) x$cvPerformance))) + + finalParam <- paramGridSearch[[optimalParamInd]]$param + + cvPrediction <- gridSearchPredictons[[optimalParamInd]]$prediction + cvPrediction$evaluationType <- 'CV' + + ParallelLogger::logInfo('Training final model using optimal parameters') + + # get the params + modelParamNames <- c("layer_dropout", "lr", "decay", "outcome_weight", "epochs", "units1", "units2", "units3") + modelParams <- finalParam[modelParamNames] + fitParams <- finalParam[c("lr", "decay")] + fitParams$epochs <- epochs + fitParams$batchSize <- batchSize + # create the dir + if(!dir.exists(file.path(modelLocation))){ + dir.create(file.path(modelLocation), recursive = T) + } + + trainDataset <- Dataset_plp5( + matrixData, + labels$outcomeCount + ) + + # modelParams$cat_features <- trainDataset$cat$shape[2] + # modelParams$num_features <- trainDataset$num$shape[2] + + # trainDataset <- torch::dataset_subset(dataset, indices=which(fold!=i)) + + if(is.na(modelParams$units2)){ + model <- singleLayerNN(inputN = ncol(matrixData), + layer1 = modelParams$units1, + outputN = 2, + layer_dropout = modelParams$layer_dropout) + + } else if(is.na(modelParams$units3)){ + model <- doubleLayerNN(inputN = ncol(matrixData), + layer1 = modelParams$units1, + layer2 = modelParams$units2, + outputN = 2, + layer_dropout = modelParams$layer_dropout) + } else{ + model <- tripleLayerNN(inputN = ncol(matrixData), + layer1 = modelParams$units1, + layer2 = modelParams$units2, + layer3 = modelParams$units3, + outputN = 2, + layer_dropout = modelParams$layer_dropout) + } + + criterion = torch::nn_bce_loss() #Binary crossentropy only + optimizer = torch::optim_adam(model$parameters, lr = fitParams$lr) + optimizer$zero_grad() + y_pred = model(trainDataset$all) + loss = criterion(y_pred[,1], trainDataset$target) + loss$backward() + optimizer$step() + model$eval() + + ParallelLogger::logInfo("Calculating predictions on all train data...") + + prediction <- labels + prediction$value <- as.array(y_pred$to())[,1] + prediction$evaluationType <- 'Train' + + prediction <- rbind( + prediction, + cvPrediction + ) + + # modify prediction + prediction <- prediction %>% + dplyr::select(-.data$rowId, -.data$index) %>% + dplyr::rename(rowId = .data$originalRowId) + + prediction$cohortStartDate <- as.Date(prediction$cohortStartDate, origin = '1970-01-01') + + + # save torch code here + torch_save(model, file.path(modelLocation, 'DeepNNTorchModel.rds')) + + return( + list( + estimator = modelLocation, + prediction = prediction, + finalParam = finalParam, + paramGridSearch = paramGridSearch + ) + ) + + } + + diff --git a/man/setDeepNNTorch.Rd b/man/setDeepNNTorch.Rd index 9a6a3a1..6de3f9a 100644 --- a/man/setDeepNNTorch.Rd +++ b/man/setDeepNNTorch.Rd @@ -10,8 +10,9 @@ setDeepNNTorch( lr = c(1e-04), decay = c(1e-05), outcome_weight = c(1), - batch_size = c(100), + batch_size = c(10000), epochs = c(100), + device = "cpu", seed = NULL ) } From 7d4be162d6d028ce666f3c72361cc2671c1f06a6 Mon Sep 17 00:00:00 2001 From: ted9219 Date: Wed, 9 Feb 2022 20:31:47 +0900 Subject: [PATCH 044/140] add prediction function for DeepNNtorch_plp5 --- R/DeepNNTorch_plp5.R | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/R/DeepNNTorch_plp5.R b/R/DeepNNTorch_plp5.R index 3c8f6e3..1d79cbf 100644 --- a/R/DeepNNTorch_plp5.R +++ b/R/DeepNNTorch_plp5.R @@ -1,3 +1,4 @@ +#' @export setDeepNNTorch <- function( units=list(c(128, 64), 128), layer_dropout=c(0.2), @@ -13,7 +14,7 @@ setDeepNNTorch <- function( param <- expand.grid(units=units, layer_dropout=layer_dropout, - lr =lr, decay=decay, outcome_weight=outcome_weight,epochs= epochs, + lr =lr, decay=decay, outcome_weight=outcome_weight, epochs= epochs, seed=ifelse(is.null(seed),'NULL', seed)) param$units1=unlist(lapply(param$units, function(x) x[1])) @@ -150,6 +151,43 @@ fitDeepNNTorch <- function( return(result) } +#' @export +predictDeepNN <- function( + plpModel, + data, + cohort +){ + + if(!'plpModel' %in% class(plpModel)){ + plpModel <- list(model = plpModel) + attr(plpModel, 'modelType') <- 'binary' + } + + if("plpData" %in% class(data)){ + + dataMat <- PatientLevelPrediction::toSparseM( + plpData = data, + cohort = cohort, + map = plpModel$covariateImportance %>% + dplyr::select(.data$columnId, .data$covariateId) + ) + + data <- Dataset_plp5(dataMat$dataMatrix) # add numeric details.. + } + + # get predictions + prediction <- cohort + + if(is.character(plpModel$model)) model <- torch::torch_load(file.path(plpModel$model, 'DeepNNTorchModel.rds'), device='cpu') + + y_pred = model(data$all) + prediction$value <- as.array(y_pred$to())[,1] + + attr(prediction, "metaData")$modelType <- attr(plpModel, 'modelType') + + return(prediction) +} + gridCvDeepNN <- function( matrixData, From 31c1ce0f6922b626224d9c92e4399ac3a0f839fb Mon Sep 17 00:00:00 2001 From: ted9219 Date: Wed, 9 Feb 2022 20:33:20 +0900 Subject: [PATCH 045/140] export functions --- R/Topologies.R | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/R/Topologies.R b/R/Topologies.R index 7ce98b5..bc01383 100644 --- a/R/Topologies.R +++ b/R/Topologies.R @@ -1,3 +1,4 @@ +#' @export singleLayerNN <- function(inputN, layer1, outputN = 2, layer_dropout){ net <- torch::nn_module( "classic_net", @@ -20,7 +21,7 @@ singleLayerNN <- function(inputN, layer1, outputN = 2, layer_dropout){ return(net()) } - +#' @export doubleLayerNN <- function(inputN, layer1, layer2, outputN, layer_dropout){ @@ -47,7 +48,7 @@ doubleLayerNN <- function(inputN, layer1, return(net()) } - +#' @export tripleLayerNN <- function(inputN, layer1, layer2, layer3, outputN, layer_dropout){ From 04573b9ccc15dae2a18b42a129521d80d008a72a Mon Sep 17 00:00:00 2001 From: ted9219 Date: Mon, 21 Mar 2022 20:22:05 +0900 Subject: [PATCH 046/140] fix DeepNN according to updated PLPv5 --- R/DeepNNTorch_plp5.R | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/R/DeepNNTorch_plp5.R b/R/DeepNNTorch_plp5.R index 1d79cbf..757bff8 100644 --- a/R/DeepNNTorch_plp5.R +++ b/R/DeepNNTorch_plp5.R @@ -180,8 +180,8 @@ predictDeepNN <- function( if(is.character(plpModel$model)) model <- torch::torch_load(file.path(plpModel$model, 'DeepNNTorchModel.rds'), device='cpu') - y_pred = model(data$all) - prediction$value <- as.array(y_pred$to())[,1] + y_pred = model(data$all) + prediction$value <- as.array(y_pred$to())[,1] attr(prediction, "metaData")$modelType <- attr(plpModel, 'modelType') From 16e6bf486f82e487e650e4a81fe883e723f10db5 Mon Sep 17 00:00:00 2001 From: ted9219 Date: Mon, 21 Mar 2022 20:23:05 +0900 Subject: [PATCH 047/140] add TabNet --- R/TabNet.R | 432 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 432 insertions(+) create mode 100644 R/TabNet.R diff --git a/R/TabNet.R b/R/TabNet.R new file mode 100644 index 0000000..1894a52 --- /dev/null +++ b/R/TabNet.R @@ -0,0 +1,432 @@ +#' @export +setTabNetTorch <- function( + batch_size = 256, + penalty = 1e-3, + clip_value = NULL, + loss = "auto", + epochs = 5, + drop_last = FALSE, + decision_width = 8, + attention_width = 8, + num_steps = 3, + feature_reusage = 1.3, + mask_type = "sparsemax", + virtual_batch_size = 128, + valid_split = 0, + learn_rate = 2e-2, + optimizer = "adam", + lr_scheduler = NULL, + lr_decay = 0.1, + step_size = 30, + checkpoint_epochs = 10, + cat_emb_dim = 1, + num_independent = 2, + num_shared = 2, + momentum = 0.02, + pretraining_ratio = 0.5, + verbose = FALSE, + device = "auto", + importance_sample_size = 1e5, + seed=NULL, + hyperParamSearch = 'random', + randomSample = 100){ + + # ensure_installed("torch") + + if (is.null(seed)) { + seed <- as.integer(sample(1e5, 1)) + } + + paramGrid <- list( + penalty = penalty, + decision_width = decision_width, + attention_width = attention_width, + num_steps = num_steps, + feature_reusage = feature_reusage, + virtual_batch_size = virtual_batch_size, + valid_split = valid_split, + learn_rate = learn_rate, + lr_decay = lr_decay, + step_size = step_size, + checkpoint_epochs = checkpoint_epochs, + cat_emb_dim = cat_emb_dim, + num_independent = num_independent, + num_shared = num_shared, + momentum = momentum, + pretraining_ratio = pretraining_ratio, + importance_sample_size = importance_sample_size + ) + + param <- listCartesian(paramGrid) + + # if (hyperParamSearch=='random'){ + # param <- param[sample(length(param), randomSample)] + # } + + attr(param, 'settings') <- list( + modelType = 'DeepNNTorch', + seed = seed, + name = "TabNetTorch", + batch_size = batch_size, + epochs = epochs, + drop_last = drop_last, + clip_value = clip_value, + loss = loss, + mask_type = mask_type, + optimizer = optimizer, + lr_scheduler = lr_scheduler, + verbose = verbose, + device = device + ) + + attr(param, 'modelType') <- 'binary' + attr(param, 'saveType') <- 'file' + + result <- list(fitFunction='fitTabNetTorch', + param=param) + + class(result) <- 'modelSettings' + + return(result) + +} + +#' @export +fitTabNetTorch <- function( + trainData, + param, + search='grid', + analysisId, + ...){ + + start <- Sys.time() + + # check covariateData + if (!FeatureExtraction::isCovariateData(plpData$covariateData)){ + stop('TabNetTorch requires correct covariateData') + } + + # get the settings from the param + settings <- attr(param, 'settings') + + if(!is.null(trainData$folds)){ + trainData$labels <- merge(trainData$labels, trainData$folds, by = 'rowId') + } + + mappedData <- PatientLevelPrediction::toSparseM( + plpData = trainData, + map = NULL + ) + + matrixData <- mappedData$dataMatrix + labels <- mappedData$labels + covariateRef <- mappedData$covariateRef + + outLoc <- PatientLevelPrediction:::createTempModelLoc() #export + + cvResult <- do.call( + what = gridCvTabNetTorch, + args = list( + matrixData = matrixData, + labels = labels, + seed = settings$seed, + modelName = settings$name, + device = settings$device, + batch_size = settings$batch_size, + epochs = settings$epochs, + modelLocation = outLoc, + paramSearch = param + ) + ) + + hyperSummary <- do.call(rbind, lapply(cvResult$paramGridSearch, function(x) x$hyperSummary)) + + prediction <- cvResult$prediction + + incs <- rep(1, nrow(covariateRef)) + covariateRef$included <- incs + covariateRef$covariateValue <- 0 + + comp <- start - Sys.time() + + result <- list( + model = cvResult$estimator, #file.path(outLoc), + + prediction = prediction, + + settings = list( + plpDataSettings = attr(trainData, "metaData")$plpDataSettings, + covariateSettings = attr(trainData, "metaData")$covariateSettings, + populationSettings = attr(trainData, "metaData")$populationSettings, + featureEngineering = attr(trainData$covariateData, "metaData")$featureEngineering, + tidyCovariates = attr(trainData$covariateData, "metaData")$tidyCovariateDataSettings, + requireDenseMatrix = F, + modelSettings = list( + model = settings$name, + param = param, + finalModelParameters = cvResult$finalParam, + extraSettings = attr(param, 'settings') + ), + splitSettings = attr(trainData, "metaData")$splitSettings, + sampleSettings = attr(trainData, "metaData")$sampleSettings + ), + + trainDetails = list( + analysisId = analysisId, + cdmDatabaseSchema = attr(trainData, "metaData")$cdmDatabaseSchema, + outcomeId = attr(trainData, "metaData")$outcomeId, + cohortId = attr(trainData, "metaData")$cohortId, + attrition = attr(trainData, "metaData")$attrition, + trainingTime = comp, + trainingDate = Sys.Date(), + hyperParamSearch = hyperSummary + ), + + covariateImportance = covariateRef + ) + + class(result) <- "plpModel" + attr(result, "predictionFunction") <- "predictDeepNN" + attr(result, "modelType") <- "binary" + attr(result, "saveType") <- attr(param, 'settings')$saveType + + return(result) +} + +gridCvTabNetTorch <- function( + matrixData, + labels, + seed, + batch_size, + epochs, + drop_last, + clip_value, + loss, + mask_type, + optimizer, + lr_scheduler, + verbose, + device, + paramSearch +){ + + + ParallelLogger::logInfo(paste0("Running CV for ",modelName," model")) + + ########################################################################### + + + gridSearchPredictons <- list() + length(gridSearchPredictons) <- length(paramSearch) + + for(gridId in 1:length(paramSearch)){ + + # get the params + + config <- tabnet_config(batch_size = batch_size, + penalty = paramSearch$penalty, + clip_value = clip_value, + loss = loss, + epochs = epochs, + drop_last = drop_last, + decision_width = paramSearch$decision_width, + attention_width = paramSearch$attention_width, + num_steps = paramSearch$num_steps, + feature_reusage = paramSearch$feature_reusage, + mask_type = mask_type, + virtual_batch_size = paramSearch$virtual_batch_size, + valid_split = paramSearch$valid_split, + learn_rate = paramSearch$learn_rate, + optimizer = optimizer, + lr_scheduler = lr_scheduler, + lr_decay = paramSearch$lr_decay, + step_size = paramSearch$step_size, + checkpoint_epochs = paramSearch$checkpoint_epochs, + cat_emb_dim = paramSearch$cat_emb_dim, + num_independent = paramSearch$num_independent, + num_shared = paramSearch$num_shared, + momentum = paramSearch$momentum, + pretraining_ratio = paramSearch$pretraining_ratio, + verbose = verbose, + device = device, + importance_sample_size = paramSearch$importance_sample_size, + seed = seed) + + + # initiate prediction + prediction <- c() + + fold <- labels$index + ParallelLogger::logInfo(paste0('Max fold: ', max(fold))) + + # dataset <- Dataset_plp5(matrixData, labels$outcomeCount) + # modelParams$cat_features <- dataset$cat$shape[2] + # modelParams$num_features <- dataset$num$shape[2] + + # rec <- recipes::recipe(dataset$target ~ ., data = dataset$all) + # fit <- tabnet_fit(x = as.data.frame(as.matrix(matrixData)), y =labels$outcomeCount , epoch = epochs) + + for(i in 1:max(fold)){ + + ParallelLogger::logInfo(paste0('Fold ',i)) + trainDataset <- as.data.frame(as.matrix(matrixData)[fold != i,]) + trainLabel <- labels[fold != i,] + + testDataset <-as.data.frame(as.matrix(matrixData)[fold == i,]) + testLabel <- labels[fold == i,] + + model <- tabnet_fit(x = trainDataset, y = trainLabel$outcomeCount, config = config) + + ParallelLogger::logInfo("Calculating predictions on left out fold set...") + + pred <- predict(model, testDataset) + predictionTable <- testLabel + predictionTable$value <- pred$.pred + + if(!'plpModel' %in% class(model)){ + model <- list(model = model) + attr(model, 'modelType') <- 'binary' + } + attr(predictionTable, "metaData")$modelType <- attr(model, 'modelType') + + prediction <- rbind(prediction, predictionTable) + + } + + gridSearchPredictons[[gridId]] <- list( + prediction = prediction, + param = paramSearch[[gridId]] + ) + } + + + # get best para (this could be modified to enable any metric instead of AUC, just need metric input in function) + + paramGridSearch <- lapply(gridSearchPredictons, function(x){do.call(PatientLevelPrediction:::computeGridPerformance, x)}) # cvAUCmean, cvAUC, param + + optimalParamInd <- which.max(unlist(lapply(paramGridSearch, function(x) x$cvPerformance))) + + finalParam <- paramGridSearch[[optimalParamInd]]$param + + cvPrediction <- gridSearchPredictons[[optimalParamInd]]$prediction + cvPrediction$evaluationType <- 'CV' + + ParallelLogger::logInfo('Training final model using optimal parameters') + + # get the params + + finalParam$batch_size = batch_size + finalParam$epochs = epochs + finalParam$drop_last = drop_last + finalParam$clip_value = clip_value + finalParam$loss = loss + finalParam$mask_type = mask_type + finalParam$ optimizer = optimizer + finalParam$lr_scheduler = lr_scheduler + finalParam$verbose = verbose + finalParam$device = device + + config <- tabnet_config(finalParam) + + # create the dir + if(!dir.exists(file.path(modelLocation))){ + dir.create(file.path(modelLocation), recursive = T) + } + + trainDataset <- as.data.frame(as.matrix(matrixData)) + trainLabel <- labels + + model <- tabnet_fit(x = trainDataset, y = trainLabel$outcomeCount, config = config) + + ParallelLogger::logInfo("Calculating predictions on all train data...") + + pred <- predict(model, trainDataset) + prediction <- trainLabel + predictionTable$value <- pred$.pred + prediction$evaluationType <- 'Train' + + prediction <- rbind( + prediction, + cvPrediction + ) + + # modify prediction + prediction <- prediction %>% + dplyr::select(-.data$rowId, -.data$index) %>% + dplyr::rename(rowId = .data$originalRowId) + + prediction$cohortStartDate <- as.Date(prediction$cohortStartDate, origin = '1970-01-01') + + + # save torch code here + torch_save(model, file.path(modelLocation, 'TabNetTorchModel.rds')) + + return( + list( + estimator = modelLocation, + prediction = prediction, + finalParam = finalParam, + paramGridSearch = paramGridSearch + ) + ) + +} + +#' @export +predictTabNetTorch <- function( + plpModel, + data, + cohort +){ + + if(!'plpModel' %in% class(plpModel)){ + plpModel <- list(model = plpModel) + attr(plpModel, 'modelType') <- 'binary' + } + + if("plpData" %in% class(data)){ + + dataMat <- PatientLevelPrediction::toSparseM( + plpData = data, + cohort = cohort, + map = plpModel$covariateImportance %>% + dplyr::select(.data$columnId, .data$covariateId) + ) + + data <- as.data.frame(as.matrix(dataMat$dataMatrix)) + } + + # get predictions + prediction <- cohort + + if(is.character(plpModel$model)) model <- torch::torch_load(file.path(plpModel$model, 'TabNetTorchModel.rds'), device='cpu') + + pred <- predict(model, data) + prediction$value <- pred$.pred + + attr(prediction, "metaData")$modelType <- attr(plpModel, 'modelType') + + return(prediction) +} + +listCartesian <- function(allList){ + + sizes <- lapply(allList, function(x) 1:length(x)) + combinations <- expand.grid(sizes) + + result <- list() + length(result) <- nrow(combinations) + + for(i in 1:nrow(combinations)){ + tempList <- list() + for(j in 1:ncol(combinations)){ + tempList <- c(tempList, list(allList[[j]][[combinations[i,j]]])) + } + names(tempList) <- names(allList) + result[[i]] <- tempList + } + + return(result) +} + + From b0e512c4110de1f295297cdcb943a282ba8aab49 Mon Sep 17 00:00:00 2001 From: Egill Fridgeirsson Date: Mon, 28 Mar 2022 14:13:35 +0200 Subject: [PATCH 048/140] updated TabNet to get it working. Added feature importance --- R/TabNet.R | 154 ++++++++++++++++------------------------------------- 1 file changed, 45 insertions(+), 109 deletions(-) diff --git a/R/TabNet.R b/R/TabNet.R index 1894a52..761c9ff 100644 --- a/R/TabNet.R +++ b/R/TabNet.R @@ -64,9 +64,6 @@ setTabNetTorch <- function( # } attr(param, 'settings') <- list( - modelType = 'DeepNNTorch', - seed = seed, - name = "TabNetTorch", batch_size = batch_size, epochs = epochs, drop_last = drop_last, @@ -81,6 +78,7 @@ setTabNetTorch <- function( attr(param, 'modelType') <- 'binary' attr(param, 'saveType') <- 'file' + attr(param, 'name') <- "TabNetTorch" result <- list(fitFunction='fitTabNetTorch', param=param) @@ -88,7 +86,6 @@ setTabNetTorch <- function( class(result) <- 'modelSettings' return(result) - } #' @export @@ -106,8 +103,6 @@ fitTabNetTorch <- function( stop('TabNetTorch requires correct covariateData') } - # get the settings from the param - settings <- attr(param, 'settings') if(!is.null(trainData$folds)){ trainData$labels <- merge(trainData$labels, trainData$folds, by = 'rowId') @@ -129,26 +124,29 @@ fitTabNetTorch <- function( args = list( matrixData = matrixData, labels = labels, - seed = settings$seed, - modelName = settings$name, - device = settings$device, - batch_size = settings$batch_size, - epochs = settings$epochs, modelLocation = outLoc, paramSearch = param ) ) - hyperSummary <- do.call(rbind, lapply(cvResult$paramGridSearch, function(x) x$hyperSummary)) prediction <- cvResult$prediction - incs <- rep(1, nrow(covariateRef)) - covariateRef$included <- incs - covariateRef$covariateValue <- 0 + variableImportance <- cvResult$variableImportance + incs <- seq_len(nrow(variableImportance)) + variableImportance$columnId <- incs + + browser() + covariateRef <- covariateRef %>% merge(variableImportance, by = 'columnId', + all.x = TRUE) %>% + dplyr::mutate(included=1) %>% + dplyr::rename(covariateValue=importance) %>% + dplyr::select(!variables) + covariateRef$covariateValue[is.na(covariateRef$covariateValue)] <- 0 + covariateRef$included[is.na(covariateRef$included)] <- 0 + comp <- start - Sys.time() - result <- list( model = cvResult$estimator, #file.path(outLoc), @@ -162,7 +160,7 @@ fitTabNetTorch <- function( tidyCovariates = attr(trainData$covariateData, "metaData")$tidyCovariateDataSettings, requireDenseMatrix = F, modelSettings = list( - model = settings$name, + model = attr(param, 'name'), param = param, finalModelParameters = cvResult$finalParam, extraSettings = attr(param, 'settings') @@ -186,9 +184,9 @@ fitTabNetTorch <- function( ) class(result) <- "plpModel" - attr(result, "predictionFunction") <- "predictDeepNN" + attr(result, "predictionFunction") <- "predictTabNetTorch" attr(result, "modelType") <- "binary" - attr(result, "saveType") <- attr(param, 'settings')$saveType + attr(result, "saveType") <- attr(param, 'saveType') return(result) } @@ -196,22 +194,13 @@ fitTabNetTorch <- function( gridCvTabNetTorch <- function( matrixData, labels, - seed, - batch_size, - epochs, - drop_last, - clip_value, - loss, - mask_type, - optimizer, - lr_scheduler, - verbose, - device, - paramSearch + paramSearch, + modelLocation ){ + fitSettings <- attr(paramSearch, 'settings') - ParallelLogger::logInfo(paste0("Running CV for ",modelName," model")) + ParallelLogger::logInfo(paste0("Running CV for ",attr(paramSearch, 'name')," model")) ########################################################################### @@ -220,38 +209,9 @@ gridCvTabNetTorch <- function( length(gridSearchPredictons) <- length(paramSearch) for(gridId in 1:length(paramSearch)){ - + # get the params - - config <- tabnet_config(batch_size = batch_size, - penalty = paramSearch$penalty, - clip_value = clip_value, - loss = loss, - epochs = epochs, - drop_last = drop_last, - decision_width = paramSearch$decision_width, - attention_width = paramSearch$attention_width, - num_steps = paramSearch$num_steps, - feature_reusage = paramSearch$feature_reusage, - mask_type = mask_type, - virtual_batch_size = paramSearch$virtual_batch_size, - valid_split = paramSearch$valid_split, - learn_rate = paramSearch$learn_rate, - optimizer = optimizer, - lr_scheduler = lr_scheduler, - lr_decay = paramSearch$lr_decay, - step_size = paramSearch$step_size, - checkpoint_epochs = paramSearch$checkpoint_epochs, - cat_emb_dim = paramSearch$cat_emb_dim, - num_independent = paramSearch$num_independent, - num_shared = paramSearch$num_shared, - momentum = paramSearch$momentum, - pretraining_ratio = paramSearch$pretraining_ratio, - verbose = verbose, - device = device, - importance_sample_size = paramSearch$importance_sample_size, - seed = seed) - + config <- do.call(tabnet::tabnet_config, args=c(paramSearch[[gridId]], fitSettings)) # initiate prediction prediction <- c() @@ -273,23 +233,14 @@ gridCvTabNetTorch <- function( trainLabel <- labels[fold != i,] testDataset <-as.data.frame(as.matrix(matrixData)[fold == i,]) - testLabel <- labels[fold == i,] - - model <- tabnet_fit(x = trainDataset, y = trainLabel$outcomeCount, config = config) + + model <- tabnet::tabnet_fit(x = trainDataset, y = trainLabel$outcomeCount, config = config) ParallelLogger::logInfo("Calculating predictions on left out fold set...") - pred <- predict(model, testDataset) - predictionTable <- testLabel - predictionTable$value <- pred$.pred - - if(!'plpModel' %in% class(model)){ - model <- list(model = model) - attr(model, 'modelType') <- 'binary' - } - attr(predictionTable, "metaData")$modelType <- attr(model, 'modelType') - - prediction <- rbind(prediction, predictionTable) + prediction <- rbind(prediction, predictTabNetTorch(plpModel = model, + data = testDataset, + cohort = labels[fold == i,])) } @@ -312,21 +263,10 @@ gridCvTabNetTorch <- function( cvPrediction$evaluationType <- 'CV' ParallelLogger::logInfo('Training final model using optimal parameters') - # get the params + finalParam <- c(finalParam, fitSettings) - finalParam$batch_size = batch_size - finalParam$epochs = epochs - finalParam$drop_last = drop_last - finalParam$clip_value = clip_value - finalParam$loss = loss - finalParam$mask_type = mask_type - finalParam$ optimizer = optimizer - finalParam$lr_scheduler = lr_scheduler - finalParam$verbose = verbose - finalParam$device = device - - config <- tabnet_config(finalParam) + config <- do.call(tabnet::tabnet_config, finalParam) # create the dir if(!dir.exists(file.path(modelLocation))){ @@ -334,15 +274,14 @@ gridCvTabNetTorch <- function( } trainDataset <- as.data.frame(as.matrix(matrixData)) - trainLabel <- labels - model <- tabnet_fit(x = trainDataset, y = trainLabel$outcomeCount, config = config) + model <- tabnet::tabnet_fit(x = trainDataset, y = labels$outcomeCount, config = config) ParallelLogger::logInfo("Calculating predictions on all train data...") - pred <- predict(model, trainDataset) - prediction <- trainLabel - predictionTable$value <- pred$.pred + prediction <- predictTabNetTorch(plpModel = model, + data = trainDataset, + cohort = labels) prediction$evaluationType <- 'Train' prediction <- rbind( @@ -359,14 +298,14 @@ gridCvTabNetTorch <- function( # save torch code here - torch_save(model, file.path(modelLocation, 'TabNetTorchModel.rds')) - + saveRDS(model, file.path(modelLocation, 'TabNetTorchModel.Rds')) return( list( estimator = modelLocation, prediction = prediction, finalParam = finalParam, - paramGridSearch = paramGridSearch + paramGridSearch = paramGridSearch, + variableImportance = model$fit$importances ) ) @@ -378,12 +317,7 @@ predictTabNetTorch <- function( data, cohort ){ - - if(!'plpModel' %in% class(plpModel)){ - plpModel <- list(model = plpModel) - attr(plpModel, 'modelType') <- 'binary' - } - + if("plpData" %in% class(data)){ dataMat <- PatientLevelPrediction::toSparseM( @@ -398,13 +332,15 @@ predictTabNetTorch <- function( # get predictions prediction <- cohort + if(is.character(plpModel$model)) { + plpModel <- readRDS(file.path(plpModel$model, 'TabNetTorchModel.Rds')) + } + - if(is.character(plpModel$model)) model <- torch::torch_load(file.path(plpModel$model, 'TabNetTorchModel.rds'), device='cpu') - - pred <- predict(model, data) - prediction$value <- pred$.pred + pred <- predict(plpModel, data) + prediction$value <- as.vector(as.matrix(torch::torch_sigmoid(pred$.pred))) - attr(prediction, "metaData")$modelType <- attr(plpModel, 'modelType') + attr(prediction, "metaData")$modelType <- 'binary' return(prediction) } From 4a022ef058d4feba39118a954ced6caffa962b68 Mon Sep 17 00:00:00 2001 From: Jenna Reps Date: Sun, 3 Apr 2022 20:58:28 -0400 Subject: [PATCH 049/140] vignette - added code to create docs/website - added initial vignette --- extras/PackageMaintenance.R | 38 +++++ inst/doc/BuildingDeepModels.tex | 260 +++++++++++++++++++++++++++++++ vignettes/BuildingDeepModels.Rmd | 73 +++++++++ 3 files changed, 371 insertions(+) create mode 100644 extras/PackageMaintenance.R create mode 100644 inst/doc/BuildingDeepModels.tex create mode 100644 vignettes/BuildingDeepModels.Rmd diff --git a/extras/PackageMaintenance.R b/extras/PackageMaintenance.R new file mode 100644 index 0000000..c76ed7a --- /dev/null +++ b/extras/PackageMaintenance.R @@ -0,0 +1,38 @@ +# @file PackageMaintenance +# +# Copyright 2022 Observational Health Data Sciences and Informatics +# +# This file is part of DeepPatientLevelPrediction +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# recreate the html index when new documentation +pkgdown::build_site() +OhdsiRTools::fixHadesLogo() + +# Format and check code +OhdsiRTools::formatRFolder() +OhdsiRTools::checkUsagePackage("DeepPatientLevelPrediction") +OhdsiRTools::updateCopyrightYearFolder() +devtools::spell_check() + +# Create manual and vignettes +unlink("extras/DeepPatientLevelPrediction.pdf") +system("R CMD Rd2pdf ./ --output=extras/DeepPatientLevelPrediction.pdf") + +rmarkdown::render("vignettes/BuildingDeepModels.Rmd", + output_file = "../inst/doc/BuildingDeepModels.pdf", + rmarkdown::pdf_document(latex_engine = "pdflatex", + toc = TRUE, + toc_depth = 3, + number_sections = TRUE)) diff --git a/inst/doc/BuildingDeepModels.tex b/inst/doc/BuildingDeepModels.tex new file mode 100644 index 0000000..5e0ab09 --- /dev/null +++ b/inst/doc/BuildingDeepModels.tex @@ -0,0 +1,260 @@ +% Options for packages loaded elsewhere +\PassOptionsToPackage{unicode}{hyperref} +\PassOptionsToPackage{hyphens}{url} +% +\documentclass[ +]{article} +\title{Building Deep Learning Models} +\author{Jenna Reps, Egill Fridgeirsson, Chungsoo Kim, Henrik John, Seng +Chan You, Xiaoyong Pan} +\date{2022-04-03} + +\usepackage{amsmath,amssymb} +\usepackage{lmodern} +\usepackage{iftex} +\ifPDFTeX + \usepackage[T1]{fontenc} + \usepackage[utf8]{inputenc} + \usepackage{textcomp} % provide euro and other symbols +\else % if luatex or xetex + \usepackage{unicode-math} + \defaultfontfeatures{Scale=MatchLowercase} + \defaultfontfeatures[\rmfamily]{Ligatures=TeX,Scale=1} +\fi +% Use upquote if available, for straight quotes in verbatim environments +\IfFileExists{upquote.sty}{\usepackage{upquote}}{} +\IfFileExists{microtype.sty}{% use microtype if available + \usepackage[]{microtype} + \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts +}{} +\makeatletter +\@ifundefined{KOMAClassName}{% if non-KOMA class + \IfFileExists{parskip.sty}{% + \usepackage{parskip} + }{% else + \setlength{\parindent}{0pt} + \setlength{\parskip}{6pt plus 2pt minus 1pt}} +}{% if KOMA class + \KOMAoptions{parskip=half}} +\makeatother +\usepackage{xcolor} +\IfFileExists{xurl.sty}{\usepackage{xurl}}{} % add URL line breaks if available +\IfFileExists{bookmark.sty}{\usepackage{bookmark}}{\usepackage{hyperref}} +\hypersetup{ + pdftitle={Building Deep Learning Models}, + pdfauthor={Jenna Reps, Egill Fridgeirsson, Chungsoo Kim, Henrik John, Seng Chan You, Xiaoyong Pan}, + hidelinks, + pdfcreator={LaTeX via pandoc}} +\urlstyle{same} % disable monospaced font for URLs +\usepackage[margin=1in]{geometry} +\usepackage{color} +\usepackage{fancyvrb} +\newcommand{\VerbBar}{|} +\newcommand{\VERB}{\Verb[commandchars=\\\{\}]} +\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}} +% Add ',fontsize=\small' for more characters per line +\usepackage{framed} +\definecolor{shadecolor}{RGB}{248,248,248} +\newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}} +\newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}} +\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} +\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}} +\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} +\newcommand{\BuiltInTok}[1]{#1} +\newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} +\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}} +\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} +\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} +\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}} +\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}} +\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} +\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} +\newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}} +\newcommand{\ExtensionTok}[1]{#1} +\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} +\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} +\newcommand{\ImportTok}[1]{#1} +\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} +\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}} +\newcommand{\NormalTok}[1]{#1} +\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}} +\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}} +\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}} +\newcommand{\RegionMarkerTok}[1]{#1} +\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} +\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} +\newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} +\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} +\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} +\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} +\usepackage{graphicx} +\makeatletter +\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} +\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} +\makeatother +% Scale images if necessary, so that they will not overflow the page +% margins by default, and it is still possible to overwrite the defaults +% using explicit options in \includegraphics[width, height, ...]{} +\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} +% Set default figure placement to htbp +\makeatletter +\def\fps@figure{htbp} +\makeatother +\setlength{\emergencystretch}{3em} % prevent overfull lines +\providecommand{\tightlist}{% + \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} +\setcounter{secnumdepth}{5} +\usepackage{fancyhdr} +\pagestyle{fancy} +\fancyhead{} +\fancyfoot[CO,CE]{PatientLevelPrediction Package Version 5.0.2} +\fancyfoot[CO,CE]{DeepPatientLevelPrediction Package Version 0.0.1} +\fancyfoot[LE,RO]{\thepage} +\renewcommand{\headrulewidth}{0.4pt} +\renewcommand{\footrulewidth}{0.4pt} +\ifLuaTeX + \usepackage{selnolig} % disable illegal ligatures +\fi + +\begin{document} +\maketitle + +{ +\setcounter{tocdepth}{3} +\tableofcontents +} +\hypertarget{introduction}{% +\section{Introduction}\label{introduction}} + +Patient level prediction aims to use historic data to learn a function +between an input (a patient's features such as age/gender/comorbidities +at index) and an output (whether the patient experienced an outcome +during some time-at-risk). Deep learning is example of the the current +state-of-the-art classifiers that can be implemented to learn the +function between inputs and outputs. + +Deep Learning models are widely used to automatically learn high-level +feature representations from the data, and have achieved remarkable +results in image processing, speech recognition and computational +biology. Recently, interesting results have been shown using large +observational healthcare data (e.g., electronic healthcare data or +claims data), but more extensive research is needed to assess the power +of Deep Learning in this domain. + +This vignette describes how you can use the Observational Health Data +Sciences and Informatics (OHDSI) +\href{http://github.com/OHDSI/PatientLevelPrediction}{\texttt{PatientLevelPrediction}} +package and +\href{http://github.com/OHDSI/DeepPatientLevelPrediction}{\texttt{DeepPatientLevelPrediction}} +package to build Deep Learning models. This vignette assumes you have +read and are comfortable with building patient level prediction models +as described in the +\href{https://github.com/OHDSI/PatientLevelPrediction/blob/main/inst/doc/BuildingPredictiveModels.pdf}{\texttt{BuildingPredictiveModels} +vignette}. Furthermore, this vignette assumes you are familiar with Deep +Learning methods. + +\hypertarget{background}{% +\section{Background}\label{background}} + +Deep Learning models are build by stacking an often large number of +neural network layers that perform feature engineering steps, e.g +embedding, and are collapsed in a final softmax layer (basically a +logistic regression layer). These algorithms need a lot of data to +converge to a good representation, but currently the sizes of the large +observational healthcare databases are growing fast which would make +Deep Learning an interesting approach to test within OHDSI's +\href{https://academic.oup.com/jamia/article/25/8/969/4989437}{Patient-Level +Prediction Framework}. The current implementation allows us to perform +research at scale on the value and limitations of Deep Learning using +observational healthcare data. + +In the package we have used +\href{https://cran.r-project.org/web/packages/torch/index.html}{torch} +and +\href{https://cran.r-project.org/web/packages/tabnet/index.html}{tabnet} +but we invite the community to add other backends. + +Many network architectures have recently been proposed and we have +implemented a number of them, however, this list will grow in the near +future. It is important to understand that some of these architectures +require a 2D data matrix, +i.e.~\textbar patient\textbar x\textbar feature\textbar, and others use +a 3D data matrix +\textbar patient\textbar x\textbar feature\textbar x\textbar time\textbar. +The \href{www.github.com/ohdsi/FeatureExtraction}{FeatureExtraction +Package} has been extended to enable the extraction of both data formats +as will be described with examples below. + +Note that training Deep Learning models is computationally intensive, +our implementation therefore supports both GPU and CPU. It will +automatically check whether there is GPU or not in your computer. A GPU +is highly recommended for Deep Learning! + +\hypertarget{non-temporal-architectures}{% +\section{Non-Temporal Architectures}\label{non-temporal-architectures}} + +We implemented the following non-temporal (2D data matrix) +architectures: + +\begin{verbatim} +1) ... +\end{verbatim} + +For the above two methods, we implemented support for a stacked +autoencoder and a variational autoencoder to reduce the feature +dimension as a first step. These autoencoders learn efficient data +encodings in an unsupervised manner by stacking multiple layers in a +neural network. Compared to the standard implementations of LR and MLP +these implementations can use the GPU power to speed up the gradient +descent approach in the back propagation to optimize the weights of the +classifier. + +\#\#Example + +\hypertarget{acknowledgments}{% +\section{Acknowledgments}\label{acknowledgments}} + +Considerable work has been dedicated to provide the +\texttt{DeepPatientLevelPrediction} package. + +\begin{Shaded} +\begin{Highlighting}[] +\FunctionTok{citation}\NormalTok{(}\StringTok{"PatientLevelPrediction"}\NormalTok{)} +\end{Highlighting} +\end{Shaded} + +\begin{verbatim} +## +## To cite PatientLevelPrediction in publications use: +## +## Reps JM, Schuemie MJ, Suchard MA, Ryan PB, Rijnbeek P (2018). +## "Design and implementation of a standardized framework to generate +## and evaluate patient-level prediction models using observational +## healthcare data." _Journal of the American Medical Informatics +## Association_, *25*(8), 969-975. . +## +## A BibTeX entry for LaTeX users is +## +## @Article{, +## author = {J. M. Reps and M. J. Schuemie and M. A. Suchard and P. B. Ryan and P. Rijnbeek}, +## title = {Design and implementation of a standardized framework to generate and evaluate patient-level prediction models using observational healthcare data}, +## journal = {Journal of the American Medical Informatics Association}, +## volume = {25}, +## number = {8}, +## pages = {969-975}, +## year = {2018}, +## url = {https://doi.org/10.1093/jamia/ocy032}, +## } +\end{verbatim} + +\textbf{Please reference this paper if you use the PLP Package in your +work:} + +\href{http://dx.doi.org/10.1093/jamia/ocy032}{Reps JM, Schuemie MJ, +Suchard MA, Ryan PB, Rijnbeek PR. Design and implementation of a +standardized framework to generate and evaluate patient-level prediction +models using observational healthcare data. J Am Med Inform Assoc. +2018;25(8):969-975.} + +\end{document} diff --git a/vignettes/BuildingDeepModels.Rmd b/vignettes/BuildingDeepModels.Rmd new file mode 100644 index 0000000..49edcc1 --- /dev/null +++ b/vignettes/BuildingDeepModels.Rmd @@ -0,0 +1,73 @@ +--- +title: "Building Deep Learning Models" +author: "Jenna Reps, Egill Fridgeirsson, Chungsoo Kim, Henrik John, Seng Chan You, Xiaoyong Pan" +date: '`r Sys.Date()`' +header-includes: + - \usepackage{fancyhdr} + - \pagestyle{fancy} + - \fancyhead{} + - \fancyfoot[CO,CE]{PatientLevelPrediction Package Version `r utils::packageVersion("PatientLevelPrediction")`} + - \fancyfoot[CO,CE]{DeepPatientLevelPrediction Package Version `r utils::packageVersion("DeepPatientLevelPrediction")`} + - \fancyfoot[LE,RO]{\thepage} + - \renewcommand{\headrulewidth}{0.4pt} + - \renewcommand{\footrulewidth}{0.4pt} +output: + pdf_document: + includes: + in_header: preamble.tex + number_sections: yes + toc: yes + word_document: + toc: yes + html_document: + number_sections: yes + toc: yes +--- + + +```{r setup, include=FALSE} +knitr::opts_chunk$set(echo = TRUE) +``` + +# Introduction + +Patient level prediction aims to use historic data to learn a function between an input (a patient's features such as age/gender/comorbidities at index) and an output (whether the patient experienced an outcome during some time-at-risk). Deep learning is example of the the current state-of-the-art classifiers that can be implemented to learn the function between inputs and outputs. + +Deep Learning models are widely used to automatically learn high-level feature representations from the data, and have achieved remarkable results in image processing, speech recognition and computational biology. Recently, interesting results have been shown using large observational healthcare data (e.g., electronic healthcare data or claims data), but more extensive research is needed to assess the power of Deep Learning in this domain. + +This vignette describes how you can use the Observational Health Data Sciences and Informatics (OHDSI) [`PatientLevelPrediction`](http://github.com/OHDSI/PatientLevelPrediction) package and [`DeepPatientLevelPrediction`](http://github.com/OHDSI/DeepPatientLevelPrediction) package to build Deep Learning models. This vignette assumes you have read and are comfortable with building patient level prediction models as described in the [`BuildingPredictiveModels` vignette](https://github.com/OHDSI/PatientLevelPrediction/blob/main/inst/doc/BuildingPredictiveModels.pdf). Furthermore, this vignette assumes you are familiar with Deep Learning methods. + +# Background + +Deep Learning models are build by stacking an often large number of neural network layers that perform feature engineering steps, e.g embedding, and are collapsed in a final softmax layer (basically a logistic regression layer). These algorithms need a lot of data to converge to a good representation, but currently the sizes of the large observational healthcare databases are growing fast which would make Deep Learning an interesting approach to test within OHDSI's [Patient-Level Prediction Framework](https://academic.oup.com/jamia/article/25/8/969/4989437). The current implementation allows us to perform research at scale on the value and limitations of Deep Learning using observational healthcare data. + +In the package we have used [torch](https://cran.r-project.org/web/packages/torch/index.html) and [tabnet](https://cran.r-project.org/web/packages/tabnet/index.html) but we invite the community to add other backends. + +Many network architectures have recently been proposed and we have implemented a number of them, however, this list will grow in the near future. It is important to understand that some of these architectures require a 2D data matrix, i.e. |patient|x|feature|, and others use a 3D data matrix |patient|x|feature|x|time|. The [FeatureExtraction Package](www.github.com\ohdsi\FeatureExtraction) has been extended to enable the extraction of both data formats as will be described with examples below. + +Note that training Deep Learning models is computationally intensive, our implementation therefore supports both GPU and CPU. It will automatically check whether there is GPU or not in your computer. A GPU is highly recommended for Deep Learning! + +# Non-Temporal Architectures +We implemented the following non-temporal (2D data matrix) architectures: + + 1) ... + +For the above two methods, we implemented support for a stacked autoencoder and a variational autoencoder to reduce the feature dimension as a first step. These autoencoders learn efficient data encodings in an unsupervised manner by stacking multiple layers in a neural network. Compared to the standard implementations of LR and MLP these implementations can use the GPU power to speed up the gradient descent approach in the back propagation to optimize the weights of the classifier. + +##Example + + +# Acknowledgments + +Considerable work has been dedicated to provide the `DeepPatientLevelPrediction` package. + +```{r tidy=TRUE,eval=TRUE} +citation("PatientLevelPrediction") +``` + +**Please reference this paper if you use the PLP Package in your work:** + +[Reps JM, Schuemie MJ, Suchard MA, Ryan PB, Rijnbeek PR. Design and implementation of a standardized framework to generate and evaluate patient-level prediction models using observational healthcare data. J Am Med Inform Assoc. 2018;25(8):969-975.](http://dx.doi.org/10.1093/jamia/ocy032) \ No newline at end of file From 8f0cdf435089e75cb2b256ab3244ad662366500b Mon Sep 17 00:00:00 2001 From: jreps Date: Sun, 3 Apr 2022 21:05:28 -0400 Subject: [PATCH 050/140] adding website --- docs/404.html | 157 ++++++++ docs/articles/BuildingDeepModels.html | 178 +++++++++ .../header-attrs-2.11/header-attrs.js | 12 + docs/articles/index.html | 156 ++++++++ docs/authors.html | 168 ++++++++ docs/bootstrap-toc.css | 60 +++ docs/bootstrap-toc.js | 159 ++++++++ docs/docsearch.css | 148 +++++++ docs/docsearch.js | 85 ++++ docs/index.html | 203 ++++++++++ docs/link.svg | 12 + docs/pkgdown.css | 367 ++++++++++++++++++ docs/pkgdown.js | 108 ++++++ docs/pkgdown.yml | 7 + docs/reference/Rplot001.png | Bin 0 -> 1011 bytes docs/reference/fitResNet_plp5.html | 191 +++++++++ docs/reference/index.html | 281 ++++++++++++++ docs/reference/predictAndromeda.html | 205 ++++++++++ docs/reference/predictDeepEstimator.html | 179 +++++++++ docs/reference/predictPlp.html | 190 +++++++++ docs/reference/predictProbabilities.html | 187 +++++++++ docs/reference/setCIReNN.html | 298 ++++++++++++++ docs/reference/setCNNTorch.html | 201 ++++++++++ docs/reference/setCovNN.html | 223 +++++++++++ docs/reference/setCovNN2.html | 223 +++++++++++ docs/reference/setDeepNN.html | 213 ++++++++++ docs/reference/setDeepNNTorch.html | 214 ++++++++++ docs/reference/setRNNTorch.html | 200 ++++++++++ docs/reference/setResNet.html | 251 ++++++++++++ docs/reference/setResNet_plp5.html | 251 ++++++++++++ docs/reference/toSparseMDeep.html | 214 ++++++++++ docs/reference/toSparseRTorch.html | 202 ++++++++++ docs/reference/transferLearning.html | 231 +++++++++++ inst/doc/BuildingDeepModels.pdf | Bin 0 -> 182193 bytes inst/doc/BuildingDeepModels.tex | 260 ------------- 35 files changed, 5774 insertions(+), 260 deletions(-) create mode 100644 docs/404.html create mode 100644 docs/articles/BuildingDeepModels.html create mode 100644 docs/articles/BuildingDeepModels_files/header-attrs-2.11/header-attrs.js create mode 100644 docs/articles/index.html create mode 100644 docs/authors.html create mode 100644 docs/bootstrap-toc.css create mode 100644 docs/bootstrap-toc.js create mode 100644 docs/docsearch.css create mode 100644 docs/docsearch.js create mode 100644 docs/index.html create mode 100644 docs/link.svg create mode 100644 docs/pkgdown.css create mode 100644 docs/pkgdown.js create mode 100644 docs/pkgdown.yml create mode 100644 docs/reference/Rplot001.png create mode 100644 docs/reference/fitResNet_plp5.html create mode 100644 docs/reference/index.html create mode 100644 docs/reference/predictAndromeda.html create mode 100644 docs/reference/predictDeepEstimator.html create mode 100644 docs/reference/predictPlp.html create mode 100644 docs/reference/predictProbabilities.html create mode 100644 docs/reference/setCIReNN.html create mode 100644 docs/reference/setCNNTorch.html create mode 100644 docs/reference/setCovNN.html create mode 100644 docs/reference/setCovNN2.html create mode 100644 docs/reference/setDeepNN.html create mode 100644 docs/reference/setDeepNNTorch.html create mode 100644 docs/reference/setRNNTorch.html create mode 100644 docs/reference/setResNet.html create mode 100644 docs/reference/setResNet_plp5.html create mode 100644 docs/reference/toSparseMDeep.html create mode 100644 docs/reference/toSparseRTorch.html create mode 100644 docs/reference/transferLearning.html create mode 100644 inst/doc/BuildingDeepModels.pdf delete mode 100644 inst/doc/BuildingDeepModels.tex diff --git a/docs/404.html b/docs/404.html new file mode 100644 index 0000000..be8d9d9 --- /dev/null +++ b/docs/404.html @@ -0,0 +1,157 @@ + + + + + + + + +Page not found (404) • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ +
+
+ + +Content not found. Please use links in the navbar. + +
+ + + +
+ + + +
+ + +
+

Site built with pkgdown 1.6.1.

+
+ +
+
+ + + + + + + + diff --git a/docs/articles/BuildingDeepModels.html b/docs/articles/BuildingDeepModels.html new file mode 100644 index 0000000..b33ab41 --- /dev/null +++ b/docs/articles/BuildingDeepModels.html @@ -0,0 +1,178 @@ + + + + + + + +Building Deep Learning Models • DeepPatientLevelPrediction + + + + + + + + + + +
+
+ + + + +
+
+ + + + + +
+

+Introduction

+

Patient level prediction aims to use historic data to learn a function between an input (a patient’s features such as age/gender/comorbidities at index) and an output (whether the patient experienced an outcome during some time-at-risk). Deep learning is example of the the current state-of-the-art classifiers that can be implemented to learn the function between inputs and outputs.

+

Deep Learning models are widely used to automatically learn high-level feature representations from the data, and have achieved remarkable results in image processing, speech recognition and computational biology. Recently, interesting results have been shown using large observational healthcare data (e.g., electronic healthcare data or claims data), but more extensive research is needed to assess the power of Deep Learning in this domain.

+

This vignette describes how you can use the Observational Health Data Sciences and Informatics (OHDSI) PatientLevelPrediction package and DeepPatientLevelPrediction package to build Deep Learning models. This vignette assumes you have read and are comfortable with building patient level prediction models as described in the BuildingPredictiveModels vignette. Furthermore, this vignette assumes you are familiar with Deep Learning methods.

+
+
+

+Background

+

Deep Learning models are build by stacking an often large number of neural network layers that perform feature engineering steps, e.g embedding, and are collapsed in a final softmax layer (basically a logistic regression layer). These algorithms need a lot of data to converge to a good representation, but currently the sizes of the large observational healthcare databases are growing fast which would make Deep Learning an interesting approach to test within OHDSI’s Patient-Level Prediction Framework. The current implementation allows us to perform research at scale on the value and limitations of Deep Learning using observational healthcare data.

+

In the package we have used torch and tabnet but we invite the community to add other backends.

+

Many network architectures have recently been proposed and we have implemented a number of them, however, this list will grow in the near future. It is important to understand that some of these architectures require a 2D data matrix, i.e. |patient|x|feature|, and others use a 3D data matrix |patient|x|feature|x|time|. The FeatureExtraction Package has been extended to enable the extraction of both data formats as will be described with examples below.

+

Note that training Deep Learning models is computationally intensive, our implementation therefore supports both GPU and CPU. It will automatically check whether there is GPU or not in your computer. A GPU is highly recommended for Deep Learning!

+
+
+

+Non-Temporal Architectures

+

We implemented the following non-temporal (2D data matrix) architectures:

+
1) ...
+

For the above two methods, we implemented support for a stacked autoencoder and a variational autoencoder to reduce the feature dimension as a first step. These autoencoders learn efficient data encodings in an unsupervised manner by stacking multiple layers in a neural network. Compared to the standard implementations of LR and MLP these implementations can use the GPU power to speed up the gradient descent approach in the back propagation to optimize the weights of the classifier.

+

##Example

+
+
+

+Acknowledgments

+

Considerable work has been dedicated to provide the DeepPatientLevelPrediction package.

+
+citation("PatientLevelPrediction")
+
## 
+## To cite PatientLevelPrediction in publications use:
+## 
+## Reps JM, Schuemie MJ, Suchard MA, Ryan PB, Rijnbeek P (2018). "Design
+## and implementation of a standardized framework to generate and evaluate
+## patient-level prediction models using observational healthcare data."
+## _Journal of the American Medical Informatics Association_, *25*(8),
+## 969-975. <URL: https://doi.org/10.1093/jamia/ocy032>.
+## 
+## A BibTeX entry for LaTeX users is
+## 
+##   @Article{,
+##     author = {J. M. Reps and M. J. Schuemie and M. A. Suchard and P. B. Ryan and P. Rijnbeek},
+##     title = {Design and implementation of a standardized framework to generate and evaluate patient-level prediction models using observational healthcare data},
+##     journal = {Journal of the American Medical Informatics Association},
+##     volume = {25},
+##     number = {8},
+##     pages = {969-975},
+##     year = {2018},
+##     url = {https://doi.org/10.1093/jamia/ocy032},
+##   }
+

Please reference this paper if you use the PLP Package in your work:

+

Reps JM, Schuemie MJ, Suchard MA, Ryan PB, Rijnbeek PR. Design and implementation of a standardized framework to generate and evaluate patient-level prediction models using observational healthcare data. J Am Med Inform Assoc. 2018;25(8):969-975.

+
+
+ + + +
+ + + +
+ +
+

Site built with pkgdown 1.6.1.

+
+ +
+
+ + + + + + diff --git a/docs/articles/BuildingDeepModels_files/header-attrs-2.11/header-attrs.js b/docs/articles/BuildingDeepModels_files/header-attrs-2.11/header-attrs.js new file mode 100644 index 0000000..dd57d92 --- /dev/null +++ b/docs/articles/BuildingDeepModels_files/header-attrs-2.11/header-attrs.js @@ -0,0 +1,12 @@ +// Pandoc 2.9 adds attributes on both header and div. We remove the former (to +// be compatible with the behavior of Pandoc < 2.8). +document.addEventListener('DOMContentLoaded', function(e) { + var hs = document.querySelectorAll("div.section[class*='level'] > :first-child"); + var i, h, a; + for (i = 0; i < hs.length; i++) { + h = hs[i]; + if (!/^h[1-6]$/i.test(h.tagName)) continue; // it should be a header h1-h6 + a = h.attributes; + while (a.length > 0) h.removeAttribute(a[0].name); + } +}); diff --git a/docs/articles/index.html b/docs/articles/index.html new file mode 100644 index 0000000..82787ec --- /dev/null +++ b/docs/articles/index.html @@ -0,0 +1,156 @@ + + + + + + + + +Articles • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ +
+
+ + +
+

All vignettes

+

+ +
+
Building Deep Learning Models
+
+
+
+
+
+ + +
+ + +
+

Site built with pkgdown 1.6.1.

+
+ +
+
+ + + + + + + + diff --git a/docs/authors.html b/docs/authors.html new file mode 100644 index 0000000..43aa926 --- /dev/null +++ b/docs/authors.html @@ -0,0 +1,168 @@ + + + + + + + + +Authors • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ +
+
+ + +
    +
  • +

    Jenna Reps. Author, maintainer. +

    +
  • +
  • +

    Seng Chan You. Author. +

    +
  • +
  • +

    Egill Fridgeirsson. Author. +

    +
  • +
  • +

    Chungsoo Kim. Author. +

    +
  • +
+ +
+ +
+ + + +
+ + +
+

Site built with pkgdown 1.6.1.

+
+ +
+
+ + + + + + + + diff --git a/docs/bootstrap-toc.css b/docs/bootstrap-toc.css new file mode 100644 index 0000000..5a85941 --- /dev/null +++ b/docs/bootstrap-toc.css @@ -0,0 +1,60 @@ +/*! + * Bootstrap Table of Contents v0.4.1 (http://afeld.github.io/bootstrap-toc/) + * Copyright 2015 Aidan Feldman + * Licensed under MIT (https://github.com/afeld/bootstrap-toc/blob/gh-pages/LICENSE.md) */ + +/* modified from https://github.com/twbs/bootstrap/blob/94b4076dd2efba9af71f0b18d4ee4b163aa9e0dd/docs/assets/css/src/docs.css#L548-L601 */ + +/* All levels of nav */ +nav[data-toggle='toc'] .nav > li > a { + display: block; + padding: 4px 20px; + font-size: 13px; + font-weight: 500; + color: #767676; +} +nav[data-toggle='toc'] .nav > li > a:hover, +nav[data-toggle='toc'] .nav > li > a:focus { + padding-left: 19px; + color: #563d7c; + text-decoration: none; + background-color: transparent; + border-left: 1px solid #563d7c; +} +nav[data-toggle='toc'] .nav > .active > a, +nav[data-toggle='toc'] .nav > .active:hover > a, +nav[data-toggle='toc'] .nav > .active:focus > a { + padding-left: 18px; + font-weight: bold; + color: #563d7c; + background-color: transparent; + border-left: 2px solid #563d7c; +} + +/* Nav: second level (shown on .active) */ +nav[data-toggle='toc'] .nav .nav { + display: none; /* Hide by default, but at >768px, show it */ + padding-bottom: 10px; +} +nav[data-toggle='toc'] .nav .nav > li > a { + padding-top: 1px; + padding-bottom: 1px; + padding-left: 30px; + font-size: 12px; + font-weight: normal; +} +nav[data-toggle='toc'] .nav .nav > li > a:hover, +nav[data-toggle='toc'] .nav .nav > li > a:focus { + padding-left: 29px; +} +nav[data-toggle='toc'] .nav .nav > .active > a, +nav[data-toggle='toc'] .nav .nav > .active:hover > a, +nav[data-toggle='toc'] .nav .nav > .active:focus > a { + padding-left: 28px; + font-weight: 500; +} + +/* from https://github.com/twbs/bootstrap/blob/e38f066d8c203c3e032da0ff23cd2d6098ee2dd6/docs/assets/css/src/docs.css#L631-L634 */ +nav[data-toggle='toc'] .nav > .active > ul { + display: block; +} diff --git a/docs/bootstrap-toc.js b/docs/bootstrap-toc.js new file mode 100644 index 0000000..1cdd573 --- /dev/null +++ b/docs/bootstrap-toc.js @@ -0,0 +1,159 @@ +/*! + * Bootstrap Table of Contents v0.4.1 (http://afeld.github.io/bootstrap-toc/) + * Copyright 2015 Aidan Feldman + * Licensed under MIT (https://github.com/afeld/bootstrap-toc/blob/gh-pages/LICENSE.md) */ +(function() { + 'use strict'; + + window.Toc = { + helpers: { + // return all matching elements in the set, or their descendants + findOrFilter: function($el, selector) { + // http://danielnouri.org/notes/2011/03/14/a-jquery-find-that-also-finds-the-root-element/ + // http://stackoverflow.com/a/12731439/358804 + var $descendants = $el.find(selector); + return $el.filter(selector).add($descendants).filter(':not([data-toc-skip])'); + }, + + generateUniqueIdBase: function(el) { + var text = $(el).text(); + var anchor = text.trim().toLowerCase().replace(/[^A-Za-z0-9]+/g, '-'); + return anchor || el.tagName.toLowerCase(); + }, + + generateUniqueId: function(el) { + var anchorBase = this.generateUniqueIdBase(el); + for (var i = 0; ; i++) { + var anchor = anchorBase; + if (i > 0) { + // add suffix + anchor += '-' + i; + } + // check if ID already exists + if (!document.getElementById(anchor)) { + return anchor; + } + } + }, + + generateAnchor: function(el) { + if (el.id) { + return el.id; + } else { + var anchor = this.generateUniqueId(el); + el.id = anchor; + return anchor; + } + }, + + createNavList: function() { + return $(''); + }, + + createChildNavList: function($parent) { + var $childList = this.createNavList(); + $parent.append($childList); + return $childList; + }, + + generateNavEl: function(anchor, text) { + var $a = $(''); + $a.attr('href', '#' + anchor); + $a.text(text); + var $li = $('
  • '); + $li.append($a); + return $li; + }, + + generateNavItem: function(headingEl) { + var anchor = this.generateAnchor(headingEl); + var $heading = $(headingEl); + var text = $heading.data('toc-text') || $heading.text(); + return this.generateNavEl(anchor, text); + }, + + // Find the first heading level (`

    `, then `

    `, etc.) that has more than one element. Defaults to 1 (for `

    `). + getTopLevel: function($scope) { + for (var i = 1; i <= 6; i++) { + var $headings = this.findOrFilter($scope, 'h' + i); + if ($headings.length > 1) { + return i; + } + } + + return 1; + }, + + // returns the elements for the top level, and the next below it + getHeadings: function($scope, topLevel) { + var topSelector = 'h' + topLevel; + + var secondaryLevel = topLevel + 1; + var secondarySelector = 'h' + secondaryLevel; + + return this.findOrFilter($scope, topSelector + ',' + secondarySelector); + }, + + getNavLevel: function(el) { + return parseInt(el.tagName.charAt(1), 10); + }, + + populateNav: function($topContext, topLevel, $headings) { + var $context = $topContext; + var $prevNav; + + var helpers = this; + $headings.each(function(i, el) { + var $newNav = helpers.generateNavItem(el); + var navLevel = helpers.getNavLevel(el); + + // determine the proper $context + if (navLevel === topLevel) { + // use top level + $context = $topContext; + } else if ($prevNav && $context === $topContext) { + // create a new level of the tree and switch to it + $context = helpers.createChildNavList($prevNav); + } // else use the current $context + + $context.append($newNav); + + $prevNav = $newNav; + }); + }, + + parseOps: function(arg) { + var opts; + if (arg.jquery) { + opts = { + $nav: arg + }; + } else { + opts = arg; + } + opts.$scope = opts.$scope || $(document.body); + return opts; + } + }, + + // accepts a jQuery object, or an options object + init: function(opts) { + opts = this.helpers.parseOps(opts); + + // ensure that the data attribute is in place for styling + opts.$nav.attr('data-toggle', 'toc'); + + var $topContext = this.helpers.createChildNavList(opts.$nav); + var topLevel = this.helpers.getTopLevel(opts.$scope); + var $headings = this.helpers.getHeadings(opts.$scope, topLevel); + this.helpers.populateNav($topContext, topLevel, $headings); + } + }; + + $(function() { + $('nav[data-toggle="toc"]').each(function(i, el) { + var $nav = $(el); + Toc.init($nav); + }); + }); +})(); diff --git a/docs/docsearch.css b/docs/docsearch.css new file mode 100644 index 0000000..e5f1fe1 --- /dev/null +++ b/docs/docsearch.css @@ -0,0 +1,148 @@ +/* Docsearch -------------------------------------------------------------- */ +/* + Source: https://github.com/algolia/docsearch/ + License: MIT +*/ + +.algolia-autocomplete { + display: block; + -webkit-box-flex: 1; + -ms-flex: 1; + flex: 1 +} + +.algolia-autocomplete .ds-dropdown-menu { + width: 100%; + min-width: none; + max-width: none; + padding: .75rem 0; + background-color: #fff; + background-clip: padding-box; + border: 1px solid rgba(0, 0, 0, .1); + box-shadow: 0 .5rem 1rem rgba(0, 0, 0, .175); +} + +@media (min-width:768px) { + .algolia-autocomplete .ds-dropdown-menu { + width: 175% + } +} + +.algolia-autocomplete .ds-dropdown-menu::before { + display: none +} + +.algolia-autocomplete .ds-dropdown-menu [class^=ds-dataset-] { + padding: 0; + background-color: rgb(255,255,255); + border: 0; + max-height: 80vh; +} + +.algolia-autocomplete .ds-dropdown-menu .ds-suggestions { + margin-top: 0 +} + +.algolia-autocomplete .algolia-docsearch-suggestion { + padding: 0; + overflow: visible +} + +.algolia-autocomplete .algolia-docsearch-suggestion--category-header { + padding: .125rem 1rem; + margin-top: 0; + font-size: 1.3em; + font-weight: 500; + color: #00008B; + border-bottom: 0 +} + +.algolia-autocomplete .algolia-docsearch-suggestion--wrapper { + float: none; + padding-top: 0 +} + +.algolia-autocomplete .algolia-docsearch-suggestion--subcategory-column { + float: none; + width: auto; + padding: 0; + text-align: left +} + +.algolia-autocomplete .algolia-docsearch-suggestion--content { + float: none; + width: auto; + padding: 0 +} + +.algolia-autocomplete .algolia-docsearch-suggestion--content::before { + display: none +} + +.algolia-autocomplete .ds-suggestion:not(:first-child) .algolia-docsearch-suggestion--category-header { + padding-top: .75rem; + margin-top: .75rem; + border-top: 1px solid rgba(0, 0, 0, .1) +} + +.algolia-autocomplete .ds-suggestion .algolia-docsearch-suggestion--subcategory-column { + display: block; + padding: .1rem 1rem; + margin-bottom: 0.1; + font-size: 1.0em; + font-weight: 400 + /* display: none */ +} + +.algolia-autocomplete .algolia-docsearch-suggestion--title { + display: block; + padding: .25rem 1rem; + margin-bottom: 0; + font-size: 0.9em; + font-weight: 400 +} + +.algolia-autocomplete .algolia-docsearch-suggestion--text { + padding: 0 1rem .5rem; + margin-top: -.25rem; + font-size: 0.8em; + font-weight: 400; + line-height: 1.25 +} + +.algolia-autocomplete .algolia-docsearch-footer { + width: 110px; + height: 20px; + z-index: 3; + margin-top: 10.66667px; + float: right; + font-size: 0; + line-height: 0; +} + +.algolia-autocomplete .algolia-docsearch-footer--logo { + background-image: url("data:image/svg+xml;utf8,"); + background-repeat: no-repeat; + background-position: 50%; + background-size: 100%; + overflow: hidden; + text-indent: -9000px; + width: 100%; + height: 100%; + display: block; + transform: translate(-8px); +} + +.algolia-autocomplete .algolia-docsearch-suggestion--highlight { + color: #FF8C00; + background: rgba(232, 189, 54, 0.1) +} + + +.algolia-autocomplete .algolia-docsearch-suggestion--text .algolia-docsearch-suggestion--highlight { + box-shadow: inset 0 -2px 0 0 rgba(105, 105, 105, .5) +} + +.algolia-autocomplete .ds-suggestion.ds-cursor .algolia-docsearch-suggestion--content { + background-color: rgba(192, 192, 192, .15) +} diff --git a/docs/docsearch.js b/docs/docsearch.js new file mode 100644 index 0000000..b35504c --- /dev/null +++ b/docs/docsearch.js @@ -0,0 +1,85 @@ +$(function() { + + // register a handler to move the focus to the search bar + // upon pressing shift + "/" (i.e. "?") + $(document).on('keydown', function(e) { + if (e.shiftKey && e.keyCode == 191) { + e.preventDefault(); + $("#search-input").focus(); + } + }); + + $(document).ready(function() { + // do keyword highlighting + /* modified from https://jsfiddle.net/julmot/bL6bb5oo/ */ + var mark = function() { + + var referrer = document.URL ; + var paramKey = "q" ; + + if (referrer.indexOf("?") !== -1) { + var qs = referrer.substr(referrer.indexOf('?') + 1); + var qs_noanchor = qs.split('#')[0]; + var qsa = qs_noanchor.split('&'); + var keyword = ""; + + for (var i = 0; i < qsa.length; i++) { + var currentParam = qsa[i].split('='); + + if (currentParam.length !== 2) { + continue; + } + + if (currentParam[0] == paramKey) { + keyword = decodeURIComponent(currentParam[1].replace(/\+/g, "%20")); + } + } + + if (keyword !== "") { + $(".contents").unmark({ + done: function() { + $(".contents").mark(keyword); + } + }); + } + } + }; + + mark(); + }); +}); + +/* Search term highlighting ------------------------------*/ + +function matchedWords(hit) { + var words = []; + + var hierarchy = hit._highlightResult.hierarchy; + // loop to fetch from lvl0, lvl1, etc. + for (var idx in hierarchy) { + words = words.concat(hierarchy[idx].matchedWords); + } + + var content = hit._highlightResult.content; + if (content) { + words = words.concat(content.matchedWords); + } + + // return unique words + var words_uniq = [...new Set(words)]; + return words_uniq; +} + +function updateHitURL(hit) { + + var words = matchedWords(hit); + var url = ""; + + if (hit.anchor) { + url = hit.url_without_anchor + '?q=' + escape(words.join(" ")) + '#' + hit.anchor; + } else { + url = hit.url + '?q=' + escape(words.join(" ")); + } + + return url; +} diff --git a/docs/index.html b/docs/index.html new file mode 100644 index 0000000..4be64ab --- /dev/null +++ b/docs/index.html @@ -0,0 +1,203 @@ + + + + + + + +Deep learning function for patient level prediction using data in the OMOP Common Data Model • DeepPatientLevelPrediction + + + + + + + + + + +
    +
    + + + + +
    +
    + +
    + + +
    +
    +

    +Introduction

    +

    DeepPatientLevelPrediction is an R package for building and validating deep learning patient-level predictive models using data in the OMOP Common Data Model format and OHDSI PatientLevelPrediction framework.

    +

    Reps JM, Schuemie MJ, Suchard MA, Ryan PB, Rijnbeek PR. Design and implementation of a standardized framework to generate and evaluate patient-level prediction models using observational healthcare data. J Am Med Inform Assoc. 2018;25(8):969-975.

    +
    +
    +

    +Features

    +
      +
    • add
    • +
    +
    +
    +

    +Technology

    +

    DeepPatientLevelPrediction is an R package, with some functions implemented in C++ and python.

    +
    +
    +

    +System Requirements

    +

    Requires R (version 3.3.0 or higher). Installation on Windows requires RTools. Libraries used in DeepPatientLevelPrediction require Java and Python.

    +

    The python installation is required for some of the machine learning algorithms. We advise to install Python 3.7 using Anaconda (https://www.continuum.io/downloads).

    +
    +
    +

    +Getting Started

    +
      +
    • add
    • +
    +
    +
    +

    +User Documentation

    +

    Documentation can be found on the package website.

    +

    PDF versions of the documentation are also available, as mentioned above.

    +
    +
    +

    +Support

    + +
    +
    +

    +Contributing

    +

    Read here how you can contribute to this package.

    +
    +
    +

    +License

    +

    DeepPatientLevelPrediction is licensed under Apache License 2.0

    +
    +
    +

    +Development

    +

    DeepPatientLevelPrediction is being developed in R Studio.

    +
    + +
    + + +
    + + +
    + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + diff --git a/docs/link.svg b/docs/link.svg new file mode 100644 index 0000000..88ad827 --- /dev/null +++ b/docs/link.svg @@ -0,0 +1,12 @@ + + + + + + diff --git a/docs/pkgdown.css b/docs/pkgdown.css new file mode 100644 index 0000000..1273238 --- /dev/null +++ b/docs/pkgdown.css @@ -0,0 +1,367 @@ +/* Sticky footer */ + +/** + * Basic idea: https://philipwalton.github.io/solved-by-flexbox/demos/sticky-footer/ + * Details: https://github.com/philipwalton/solved-by-flexbox/blob/master/assets/css/components/site.css + * + * .Site -> body > .container + * .Site-content -> body > .container .row + * .footer -> footer + * + * Key idea seems to be to ensure that .container and __all its parents__ + * have height set to 100% + * + */ + +html, body { + height: 100%; +} + +body { + position: relative; +} + +body > .container { + display: flex; + height: 100%; + flex-direction: column; +} + +body > .container .row { + flex: 1 0 auto; +} + +footer { + margin-top: 45px; + padding: 35px 0 36px; + border-top: 1px solid #e5e5e5; + color: #666; + display: flex; + flex-shrink: 0; +} +footer p { + margin-bottom: 0; +} +footer div { + flex: 1; +} +footer .pkgdown { + text-align: right; +} +footer p { + margin-bottom: 0; +} + +img.icon { + float: right; +} + +img { + max-width: 100%; +} + +/* Fix bug in bootstrap (only seen in firefox) */ +summary { + display: list-item; +} + +/* Typographic tweaking ---------------------------------*/ + +.contents .page-header { + margin-top: calc(-60px + 1em); +} + +dd { + margin-left: 3em; +} + +/* Section anchors ---------------------------------*/ + +a.anchor { + margin-left: -30px; + display:inline-block; + width: 30px; + height: 30px; + visibility: hidden; + + background-image: url(./link.svg); + background-repeat: no-repeat; + background-size: 20px 20px; + background-position: center center; +} + +.hasAnchor:hover a.anchor { + visibility: visible; +} + +@media (max-width: 767px) { + .hasAnchor:hover a.anchor { + visibility: hidden; + } +} + + +/* Fixes for fixed navbar --------------------------*/ + +.contents h1, .contents h2, .contents h3, .contents h4 { + padding-top: 60px; + margin-top: -40px; +} + +/* Navbar submenu --------------------------*/ + +.dropdown-submenu { + position: relative; +} + +.dropdown-submenu>.dropdown-menu { + top: 0; + left: 100%; + margin-top: -6px; + margin-left: -1px; + border-radius: 0 6px 6px 6px; +} + +.dropdown-submenu:hover>.dropdown-menu { + display: block; +} + +.dropdown-submenu>a:after { + display: block; + content: " "; + float: right; + width: 0; + height: 0; + border-color: transparent; + border-style: solid; + border-width: 5px 0 5px 5px; + border-left-color: #cccccc; + margin-top: 5px; + margin-right: -10px; +} + +.dropdown-submenu:hover>a:after { + border-left-color: #ffffff; +} + +.dropdown-submenu.pull-left { + float: none; +} + +.dropdown-submenu.pull-left>.dropdown-menu { + left: -100%; + margin-left: 10px; + border-radius: 6px 0 6px 6px; +} + +/* Sidebar --------------------------*/ + +#pkgdown-sidebar { + margin-top: 30px; + position: -webkit-sticky; + position: sticky; + top: 70px; +} + +#pkgdown-sidebar h2 { + font-size: 1.5em; + margin-top: 1em; +} + +#pkgdown-sidebar h2:first-child { + margin-top: 0; +} + +#pkgdown-sidebar .list-unstyled li { + margin-bottom: 0.5em; +} + +/* bootstrap-toc tweaks ------------------------------------------------------*/ + +/* All levels of nav */ + +nav[data-toggle='toc'] .nav > li > a { + padding: 4px 20px 4px 6px; + font-size: 1.5rem; + font-weight: 400; + color: inherit; +} + +nav[data-toggle='toc'] .nav > li > a:hover, +nav[data-toggle='toc'] .nav > li > a:focus { + padding-left: 5px; + color: inherit; + border-left: 1px solid #878787; +} + +nav[data-toggle='toc'] .nav > .active > a, +nav[data-toggle='toc'] .nav > .active:hover > a, +nav[data-toggle='toc'] .nav > .active:focus > a { + padding-left: 5px; + font-size: 1.5rem; + font-weight: 400; + color: inherit; + border-left: 2px solid #878787; +} + +/* Nav: second level (shown on .active) */ + +nav[data-toggle='toc'] .nav .nav { + display: none; /* Hide by default, but at >768px, show it */ + padding-bottom: 10px; +} + +nav[data-toggle='toc'] .nav .nav > li > a { + padding-left: 16px; + font-size: 1.35rem; +} + +nav[data-toggle='toc'] .nav .nav > li > a:hover, +nav[data-toggle='toc'] .nav .nav > li > a:focus { + padding-left: 15px; +} + +nav[data-toggle='toc'] .nav .nav > .active > a, +nav[data-toggle='toc'] .nav .nav > .active:hover > a, +nav[data-toggle='toc'] .nav .nav > .active:focus > a { + padding-left: 15px; + font-weight: 500; + font-size: 1.35rem; +} + +/* orcid ------------------------------------------------------------------- */ + +.orcid { + font-size: 16px; + color: #A6CE39; + /* margins are required by official ORCID trademark and display guidelines */ + margin-left:4px; + margin-right:4px; + vertical-align: middle; +} + +/* Reference index & topics ----------------------------------------------- */ + +.ref-index th {font-weight: normal;} + +.ref-index td {vertical-align: top; min-width: 100px} +.ref-index .icon {width: 40px;} +.ref-index .alias {width: 40%;} +.ref-index-icons .alias {width: calc(40% - 40px);} +.ref-index .title {width: 60%;} + +.ref-arguments th {text-align: right; padding-right: 10px;} +.ref-arguments th, .ref-arguments td {vertical-align: top; min-width: 100px} +.ref-arguments .name {width: 20%;} +.ref-arguments .desc {width: 80%;} + +/* Nice scrolling for wide elements --------------------------------------- */ + +table { + display: block; + overflow: auto; +} + +/* Syntax highlighting ---------------------------------------------------- */ + +pre { + word-wrap: normal; + word-break: normal; + border: 1px solid #eee; +} + +pre, code { + background-color: #f8f8f8; + color: #333; +} + +pre code { + overflow: auto; + word-wrap: normal; + white-space: pre; +} + +pre .img { + margin: 5px 0; +} + +pre .img img { + background-color: #fff; + display: block; + height: auto; +} + +code a, pre a { + color: #375f84; +} + +a.sourceLine:hover { + text-decoration: none; +} + +.fl {color: #1514b5;} +.fu {color: #000000;} /* function */ +.ch,.st {color: #036a07;} /* string */ +.kw {color: #264D66;} /* keyword */ +.co {color: #888888;} /* comment */ + +.message { color: black; font-weight: bolder;} +.error { color: orange; font-weight: bolder;} +.warning { color: #6A0366; font-weight: bolder;} + +/* Clipboard --------------------------*/ + +.hasCopyButton { + position: relative; +} + +.btn-copy-ex { + position: absolute; + right: 0; + top: 0; + visibility: hidden; +} + +.hasCopyButton:hover button.btn-copy-ex { + visibility: visible; +} + +/* headroom.js ------------------------ */ + +.headroom { + will-change: transform; + transition: transform 200ms linear; +} +.headroom--pinned { + transform: translateY(0%); +} +.headroom--unpinned { + transform: translateY(-100%); +} + +/* mark.js ----------------------------*/ + +mark { + background-color: rgba(255, 255, 51, 0.5); + border-bottom: 2px solid rgba(255, 153, 51, 0.3); + padding: 1px; +} + +/* vertical spacing after htmlwidgets */ +.html-widget { + margin-bottom: 10px; +} + +/* fontawesome ------------------------ */ + +.fab { + font-family: "Font Awesome 5 Brands" !important; +} + +/* don't display links in code chunks when printing */ +/* source: https://stackoverflow.com/a/10781533 */ +@media print { + code a:link:after, code a:visited:after { + content: ""; + } +} diff --git a/docs/pkgdown.js b/docs/pkgdown.js new file mode 100644 index 0000000..7e7048f --- /dev/null +++ b/docs/pkgdown.js @@ -0,0 +1,108 @@ +/* http://gregfranko.com/blog/jquery-best-practices/ */ +(function($) { + $(function() { + + $('.navbar-fixed-top').headroom(); + + $('body').css('padding-top', $('.navbar').height() + 10); + $(window).resize(function(){ + $('body').css('padding-top', $('.navbar').height() + 10); + }); + + $('[data-toggle="tooltip"]').tooltip(); + + var cur_path = paths(location.pathname); + var links = $("#navbar ul li a"); + var max_length = -1; + var pos = -1; + for (var i = 0; i < links.length; i++) { + if (links[i].getAttribute("href") === "#") + continue; + // Ignore external links + if (links[i].host !== location.host) + continue; + + var nav_path = paths(links[i].pathname); + + var length = prefix_length(nav_path, cur_path); + if (length > max_length) { + max_length = length; + pos = i; + } + } + + // Add class to parent
  • , and enclosing
  • if in dropdown + if (pos >= 0) { + var menu_anchor = $(links[pos]); + menu_anchor.parent().addClass("active"); + menu_anchor.closest("li.dropdown").addClass("active"); + } + }); + + function paths(pathname) { + var pieces = pathname.split("/"); + pieces.shift(); // always starts with / + + var end = pieces[pieces.length - 1]; + if (end === "index.html" || end === "") + pieces.pop(); + return(pieces); + } + + // Returns -1 if not found + function prefix_length(needle, haystack) { + if (needle.length > haystack.length) + return(-1); + + // Special case for length-0 haystack, since for loop won't run + if (haystack.length === 0) { + return(needle.length === 0 ? 0 : -1); + } + + for (var i = 0; i < haystack.length; i++) { + if (needle[i] != haystack[i]) + return(i); + } + + return(haystack.length); + } + + /* Clipboard --------------------------*/ + + function changeTooltipMessage(element, msg) { + var tooltipOriginalTitle=element.getAttribute('data-original-title'); + element.setAttribute('data-original-title', msg); + $(element).tooltip('show'); + element.setAttribute('data-original-title', tooltipOriginalTitle); + } + + if(ClipboardJS.isSupported()) { + $(document).ready(function() { + var copyButton = ""; + + $(".examples, div.sourceCode").addClass("hasCopyButton"); + + // Insert copy buttons: + $(copyButton).prependTo(".hasCopyButton"); + + // Initialize tooltips: + $('.btn-copy-ex').tooltip({container: 'body'}); + + // Initialize clipboard: + var clipboardBtnCopies = new ClipboardJS('[data-clipboard-copy]', { + text: function(trigger) { + return trigger.parentNode.textContent; + } + }); + + clipboardBtnCopies.on('success', function(e) { + changeTooltipMessage(e.trigger, 'Copied!'); + e.clearSelection(); + }); + + clipboardBtnCopies.on('error', function() { + changeTooltipMessage(e.trigger,'Press Ctrl+C or Command+C to copy'); + }); + }); + } +})(window.jQuery || window.$) diff --git a/docs/pkgdown.yml b/docs/pkgdown.yml new file mode 100644 index 0000000..eede402 --- /dev/null +++ b/docs/pkgdown.yml @@ -0,0 +1,7 @@ +pandoc: 2.11.4 +pkgdown: 1.6.1 +pkgdown_sha: ~ +articles: + BuildingDeepModels: BuildingDeepModels.html +last_built: 2022-04-04T01:03Z + diff --git a/docs/reference/Rplot001.png b/docs/reference/Rplot001.png new file mode 100644 index 0000000000000000000000000000000000000000..17a358060aed2a86950757bbd25c6f92c08c458f GIT binary patch literal 1011 zcmeAS@N?(olHy`uVBq!ia0y~yV0-|=9Be?5+AI5}0x7m6Z+90U4Fo@(ch>_c&H|6f zVg?3oArNM~bhqvg0|WD9PZ!6KiaBo&GBN^{G%5UFpXcEKVvd5*5Eu=C0SJK)8A6*F U7`aXvEC5;V>FVdQ&MBb@00SN#Z2$lO literal 0 HcmV?d00001 diff --git a/docs/reference/fitResNet_plp5.html b/docs/reference/fitResNet_plp5.html new file mode 100644 index 0000000..4cf6071 --- /dev/null +++ b/docs/reference/fitResNet_plp5.html @@ -0,0 +1,191 @@ + + + + + + + + +fitResNet_plp5 — fitResNet_plp5 • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    fits a ResNet model to data

    +
    + +
    fitResNet_plp5(trainData, param, search = "grid", analysisId, ...)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + +
    param

    parameters to use for model

    ...
    population

    the study population dataframe

    plpData

    plp data object

    outcomeId

    Id of the outcome

    cohortId

    Id of the cohort

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/index.html b/docs/reference/index.html new file mode 100644 index 0000000..4306b80 --- /dev/null +++ b/docs/reference/index.html @@ -0,0 +1,281 @@ + + + + + + + + +Function reference • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +

    All functions

    +

    +
    +

    fitResNet_plp5()

    +

    fitResNet_plp5

    +

    predictAndromeda()

    +

    Generated predictions from a regression model

    +

    predictDeepEstimator()

    +

    predictDeepEstimator

    +

    predictPlp()

    +

    predictPlp

    +

    predictProbabilities()

    +

    Create predictive probabilities

    +

    setCIReNN()

    +

    Create setting for CIReNN model

    +

    setCNNTorch()

    +

    Create setting for CNN model with python

    +

    setCovNN()

    +

    Create setting for multi-resolution CovNN model (stucture based on https://arxiv.org/pdf/1608.00647.pdf CNN1)

    +

    setCovNN2()

    +

    Create setting for CovNN2 model - convolution across input and time - https://arxiv.org/pdf/1608.00647.pdf

    +

    setDeepNN()

    +

    Create setting for DeepNN model

    +

    setDeepNNTorch()

    +

    Create setting for DeepNN model using Torch for R

    +

    setResNet()

    +

    setResNet

    +

    setResNet_plp5()

    +

    setResNet_plp5

    +

    setRNNTorch()

    +

    Create setting for RNN model with python

    +

    toSparseMDeep()

    +

    Convert the plpData in COO format into a sparse R matrix +Converts the standard plpData to a sparse matrix +This function converts the covariate file from ffdf in COO format into a sparse matrix from +the package Matrix

    +

    toSparseRTorch()

    +

    Convert the plpData in COO format into a sparse Torch tensor

    +

    transferLearning()

    +

    [Under development] Transfer learning

    +
    + + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/predictAndromeda.html b/docs/reference/predictAndromeda.html new file mode 100644 index 0000000..4e6e742 --- /dev/null +++ b/docs/reference/predictAndromeda.html @@ -0,0 +1,205 @@ + + + + + + + + +Generated predictions from a regression model — predictAndromeda • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Generated predictions from a regression model

    +
    + +
    predictAndromeda(
    +  coefficients,
    +  population,
    +  covariateData,
    +  modelType = "logistic"
    +)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    coefficients

    A names numeric vector where the names are the covariateIds, except for the +first value which is expected to be the intercept.

    population

    A data frame containing the population to do the prediction for

    covariateData

    An andromeda object containing the covariateData with predefined columns +(see below).

    modelType

    Current supported types are "logistic", "poisson", "cox" or "survival".

    + +

    Details

    + +

    These columns are expected in the outcome object:

    + + + +
    rowId(integer)Row ID is used to link multiple covariates (x) to a single outcome (y)
    time(real)For models that use time (e.g. Poisson or Cox regression) this contains time
    (e.g. number of days)
    +

    These columns are expected in the covariates object:

    + + + +
    rowId(integer)Row ID is used to link multiple covariates (x) to a single outcome +(y)
    covariateId(integer)A numeric identifier of a covariate
    covariateValue(real)The value of the specified covariate
    + + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/predictDeepEstimator.html b/docs/reference/predictDeepEstimator.html new file mode 100644 index 0000000..6a8c24f --- /dev/null +++ b/docs/reference/predictDeepEstimator.html @@ -0,0 +1,179 @@ + + + + + + + + +predictDeepEstimator — predictDeepEstimator • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    the prediction function for the binary classification deep learning models

    +
    + +
    predictDeepEstimator(plpModel, data, cohort)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    plpModel

    the plpModel

    data

    plp data object or a torch dataset

    cohort

    a data.frame with the rowIds of the people to predict risk for

    + + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/predictPlp.html b/docs/reference/predictPlp.html new file mode 100644 index 0000000..300423e --- /dev/null +++ b/docs/reference/predictPlp.html @@ -0,0 +1,190 @@ + + + + + + + + +predictPlp — predictPlp • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Predict the risk of the outcome using the input plpModel for the input plpData

    +
    + +
    predictPlp(plpModel, population, plpData, index = NULL)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    plpModel

    An object of type plpModel - a patient level prediction model

    population

    The population created using createStudyPopulation() who will have their risks predicted

    plpData

    An object of type plpData - the patient level prediction +data extracted from the CDM.

    index

    A data frame containing rowId: a vector of rowids and index: a vector of doubles the same length as the rowIds. If used, only the rowIds with a negative index value are used to calculate the prediction.

    + +

    Value

    + +

    A dataframe containing the prediction for each person in the population with an attribute metaData containing prediction details.

    +

    Details

    + +

    The function applied the trained model on the plpData to make predictions

    + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/predictProbabilities.html b/docs/reference/predictProbabilities.html new file mode 100644 index 0000000..27b8233 --- /dev/null +++ b/docs/reference/predictProbabilities.html @@ -0,0 +1,187 @@ + + + + + + + + +Create predictive probabilities — predictProbabilities • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Create predictive probabilities

    +
    + +
    predictProbabilities(predictiveModel, population, covariateData)
    + +

    Arguments

    + + + + + + + + + + + + + + +
    predictiveModel

    An object of type predictiveModel as generated using +fitPlp.

    population

    The population to calculate the prediction for

    covariateData

    The covariateData containing the covariates for the population

    + +

    Value

    + +

    The value column in the result data.frame is: logistic: probabilities of the outcome, poisson: +Poisson rate (per day) of the outome, survival: hazard rate (per day) of the outcome.

    +

    Details

    + +

    Generates predictions for the population specified in plpData given the model.

    + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/setCIReNN.html b/docs/reference/setCIReNN.html new file mode 100644 index 0000000..514c026 --- /dev/null +++ b/docs/reference/setCIReNN.html @@ -0,0 +1,298 @@ + + + + + + + + +Create setting for CIReNN model — setCIReNN • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Create setting for CIReNN model

    +
    + +
    setCIReNN(
    +  numberOfRNNLayer = c(1),
    +  units = c(128, 64),
    +  recurrentDropout = c(0.2),
    +  layerDropout = c(0.2),
    +  lr = c(1e-04),
    +  decay = c(1e-05),
    +  outcomeWeight = c(0),
    +  batchSize = c(100),
    +  epochs = c(100),
    +  earlyStoppingMinDelta = c(1e-04),
    +  earlyStoppingPatience = c(10),
    +  bayes = T,
    +  useDeepEnsemble = F,
    +  numberOfEnsembleNetwork = 5,
    +  useVae = T,
    +  vaeDataSamplingProportion = 0.1,
    +  vaeValidationSplit = 0.2,
    +  vaeBatchSize = 100L,
    +  vaeLatentDim = 10L,
    +  vaeIntermediateDim = 256L,
    +  vaeEpoch = 100L,
    +  vaeEpislonStd = 1,
    +  useGPU = FALSE,
    +  maxGPUs = 2,
    +  seed = 1234
    +)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    numberOfRNNLayer

    The number of RNN layer, only 1, 2, or 3 layers available now. eg. 1, c(1,2), c(1,2,3)

    units

    The number of units of RNN layer - as a list of vectors

    recurrentDropout

    The reccurrent dropout rate (regularisation)

    layerDropout

    The layer dropout rate (regularisation)

    lr

    Learning rate

    decay

    Learning rate decay over each update.

    outcomeWeight

    The weight of the outcome class in the loss function. Default is 0, which will be replaced by balanced weight.

    batchSize

    The number of data points to use per training batch

    epochs

    Number of times to iterate over dataset

    earlyStoppingMinDelta

    minimum change in the monitored quantity to qualify as an improvement for early stopping, i.e. an absolute change of less than min_delta in loss of validation data, will count as no improvement.

    earlyStoppingPatience

    Number of epochs with no improvement after which training will be stopped.

    bayes

    logical (either TRUE or FALSE) value for using Bayesian Drop Out Layer to measure uncertainty. If it is TRUE, both Epistemic and Aleatoric uncertainty will be measured through Bayesian Drop Out layer

    useDeepEnsemble

    logical (either TRUE or FALSE) value for using Deep Ensemble (Lakshminarayanan et al., 2017) to measure uncertainty. It cannot be used together with Bayesian deep learing.

    numberOfEnsembleNetwork

    Integer. Number of network used for Deep Ensemble (Lakshminarayanan et al recommended 5).

    useVae

    logical (either TRUE or FALSE) value for using Variational AutoEncoder before RNN

    vaeDataSamplingProportion

    Data sampling proportion for VAE

    vaeValidationSplit

    Validation split proportion for VAE

    vaeBatchSize

    batch size for VAE

    vaeLatentDim

    Number of latent dimesion for VAE

    vaeIntermediateDim

    Number of intermediate dimesion for VAE

    vaeEpoch

    Number of times to interate over dataset for VAE

    vaeEpislonStd

    Epsilon

    useGPU

    logical (either TRUE or FALSE) value. If you have GPUs in your machine, and want to use multiple GPU for deep learning, set this value as TRUE

    maxGPUs

    Integer, If you will use GPU, how many GPUs will be used for deep learning in VAE? GPU parallelisation for deep learning will be activated only when parallel vae is true. Integer >= 2 or list of integers, number of GPUs or list of GPU IDs on which to create model replicas.

    seed

    Random seed used by deep learning model

    + + +

    Examples

    +
    if (FALSE) { +model.CIReNN <- setCIReNN() +} +
    +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/setCNNTorch.html b/docs/reference/setCNNTorch.html new file mode 100644 index 0000000..2cec5d7 --- /dev/null +++ b/docs/reference/setCNNTorch.html @@ -0,0 +1,201 @@ + + + + + + + + +Create setting for CNN model with python — setCNNTorch • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Create setting for CNN model with python

    +
    + +
    setCNNTorch(
    +  nbfilters = c(16, 32),
    +  epochs = c(20, 50),
    +  seed = 0,
    +  class_weight = 0,
    +  type = "CNN"
    +)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + +
    nbfilters

    The number of filters

    epochs

    The number of epochs

    seed

    A seed for the model

    class_weight

    The class weight used for imbalanced data: + 0: Inverse ratio between positives and negatives +-1: Focal loss

    type

    It can be normal 'CNN', 'CNN_LSTM', CNN_MLF' with multiple kernels with different kernel size, +'CNN_MIX', 'ResNet' and 'CNN_MULTI'

    + + +

    Examples

    +
    if (FALSE) { +model.cnnTorch <- setCNNTorch() +} +
    +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/setCovNN.html b/docs/reference/setCovNN.html new file mode 100644 index 0000000..4293b5c --- /dev/null +++ b/docs/reference/setCovNN.html @@ -0,0 +1,223 @@ + + + + + + + + +Create setting for multi-resolution CovNN model (stucture based on https://arxiv.org/pdf/1608.00647.pdf CNN1) — setCovNN • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Create setting for multi-resolution CovNN model (stucture based on https://arxiv.org/pdf/1608.00647.pdf CNN1)

    +
    + +
    setCovNN(
    +  batchSize = 1000,
    +  outcomeWeight = 1,
    +  lr = 1e-05,
    +  decay = 1e-06,
    +  dropout = 0,
    +  epochs = 10,
    +  filters = 3,
    +  kernelSize = 10,
    +  loss = "binary_crossentropy",
    +  seed = NULL
    +)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    batchSize

    The number of samples to used in each batch during model training

    outcomeWeight

    The weight assined to the outcome (make greater than 1 to reduce unballanced label issue)

    lr

    The learning rate

    decay

    The decay of the learning rate

    dropout

    [currently not used] the dropout rate for regularisation

    epochs

    The number of times data is used to train the model (e.g., epoches=1 means data only used once to train)

    filters

    The number of columns output by each convolution

    kernelSize

    The number of time dimensions used for each convolution

    loss

    The loss function implemented

    seed

    The random seed

    + + +

    Examples

    +
    if (FALSE) { +model.CovNN <- setCovNN() +} +
    +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/setCovNN2.html b/docs/reference/setCovNN2.html new file mode 100644 index 0000000..357ffee --- /dev/null +++ b/docs/reference/setCovNN2.html @@ -0,0 +1,223 @@ + + + + + + + + +Create setting for CovNN2 model - convolution across input and time - https://arxiv.org/pdf/1608.00647.pdf — setCovNN2 • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Create setting for CovNN2 model - convolution across input and time - https://arxiv.org/pdf/1608.00647.pdf

    +
    + +
    setCovNN2(
    +  batchSize = 1000,
    +  outcomeWeight = 1,
    +  lr = 1e-05,
    +  decay = 1e-06,
    +  dropout = 0,
    +  epochs = 10,
    +  filters = 3,
    +  kernelSize = 10,
    +  loss = "binary_crossentropy",
    +  seed = NULL
    +)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    batchSize

    The number of samples to used in each batch during model training

    outcomeWeight

    The weight assined to the outcome (make greater than 1 to reduce unballanced label issue)

    lr

    The learning rate

    decay

    The decay of the learning rate

    dropout

    [currently not used] the dropout rate for regularisation

    epochs

    The number of times data is used to train the model (e.g., epoches=1 means data only used once to train)

    filters

    The number of columns output by each convolution

    kernelSize

    The number of time dimensions used for each convolution

    loss

    The loss function implemented

    seed

    The random seed

    + + +

    Examples

    +
    if (FALSE) { +model.CovNN <- setCovNN() +} +
    +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/setDeepNN.html b/docs/reference/setDeepNN.html new file mode 100644 index 0000000..ba7fc46 --- /dev/null +++ b/docs/reference/setDeepNN.html @@ -0,0 +1,213 @@ + + + + + + + + +Create setting for DeepNN model — setDeepNN • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Create setting for DeepNN model

    +
    + +
    setDeepNN(
    +  units = list(c(128, 64), 128),
    +  layer_dropout = c(0.2),
    +  lr = c(1e-04),
    +  decay = c(1e-05),
    +  outcome_weight = c(1),
    +  batch_size = c(100),
    +  epochs = c(100),
    +  seed = NULL
    +)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    units

    The number of units of the deep network - as a list of vectors

    layer_dropout

    The layer dropout rate (regularisation)

    lr

    Learning rate

    decay

    Learning rate decay over each update.

    outcome_weight

    The weight of the outcome class in the loss function

    batch_size

    The number of data points to use per training batch

    epochs

    Number of times to iterate over dataset

    seed

    Random seed used by deep learning model

    + + +

    Examples

    +
    if (FALSE) { +model <- setDeepNN() +} +
    +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/setDeepNNTorch.html b/docs/reference/setDeepNNTorch.html new file mode 100644 index 0000000..1f9c6e0 --- /dev/null +++ b/docs/reference/setDeepNNTorch.html @@ -0,0 +1,214 @@ + + + + + + + + +Create setting for DeepNN model using Torch for R — setDeepNNTorch • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Create setting for DeepNN model using Torch for R

    +
    + +
    setDeepNNTorch(
    +  units = list(c(128, 64), 128),
    +  layer_dropout = c(0.2),
    +  lr = c(1e-04),
    +  decay = c(1e-05),
    +  outcome_weight = c(1),
    +  batch_size = c(10000),
    +  epochs = c(100),
    +  device = "cpu",
    +  seed = NULL
    +)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    units

    The number of units of the deep network - as a list of vectors

    layer_dropout

    The layer dropout rate (regularisation)

    lr

    Learning rate

    decay

    Learning rate decay over each update.

    outcome_weight

    The weight of the outcome class in the loss function

    batch_size

    The number of data points to use per training batch

    epochs

    Number of times to iterate over dataset

    seed

    Random seed used by deep learning model

    + + +

    Examples

    +
    if (FALSE) { +model <- setDeepNN() +} +
    +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/setRNNTorch.html b/docs/reference/setRNNTorch.html new file mode 100644 index 0000000..09e070f --- /dev/null +++ b/docs/reference/setRNNTorch.html @@ -0,0 +1,200 @@ + + + + + + + + +Create setting for RNN model with python — setRNNTorch • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Create setting for RNN model with python

    +
    + +
    setRNNTorch(
    +  hidden_size = c(50, 100),
    +  epochs = c(20, 50),
    +  seed = 0,
    +  class_weight = 0,
    +  type = "RNN"
    +)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + +
    hidden_size

    The hidden size

    epochs

    The number of epochs

    seed

    A seed for the model

    class_weight

    The class weight used for imbalanced data: + 0: Inverse ratio between positives and negatives +-1: Focal loss

    type

    It can be normal 'RNN', 'BiRNN' (bidirectional RNN) and 'GRU'

    + + +

    Examples

    +
    if (FALSE) { +model.rnnTorch <- setRNNTorch() +} +
    +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/setResNet.html b/docs/reference/setResNet.html new file mode 100644 index 0000000..e615d28 --- /dev/null +++ b/docs/reference/setResNet.html @@ -0,0 +1,251 @@ + + + + + + + + +setResNet — setResNet • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Creates settings for a ResNet model

    +
    + +
    setResNet(
    +  numLayers = 1:16,
    +  sizeHidden = 2^(6:10),
    +  hiddenFactor = 1:4,
    +  residualDropout = seq(0, 0.3, 0.05),
    +  hiddenDropout = seq(0, 0.3, 0.05),
    +  normalization = "BatchNorm",
    +  activation = "RelU",
    +  sizeEmbedding = 2^(6:9),
    +  weightDecay = c(1e-06, 0.001),
    +  learningRate = c(0.01, 1e-05),
    +  seed = NULL,
    +  hyperParamSearch = "random",
    +  randomSample = 100,
    +  device = "cpu",
    +  batchSize = 1024,
    +  epochs = 10
    +)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    numLayers

    Number of layers in network, default: 1:16

    sizeHidden

    Amount of neurons in each default layer, default: 2^(6:10) (64 to 1024)

    hiddenFactor

    How much to grow the amount of neurons in each ResLayer, default: 1:4

    residualDropout

    How much dropout to apply after last linear layer in ResLayer, default: seq(0, 0.3, 0.05)

    hiddenDropout

    How much dropout to apply after first linear layer in ResLayer, default: seq(0, 0.3, 0.05)

    normalization

    Which type of normalization to use. Default: 'Batchnorm'

    activation

    What kind of activation to use. Default: 'RelU'

    sizeEmbedding

    Size of embedding layer, default: 2^(6:9) (64 to 512)

    weightDecay

    Weight decay to apply, default: c(1e-6, 1e-3)

    learningRate

    Learning rate to use. default: c(1e-2, 1e-5)

    seed

    Seed to use for sampling hyperparameter space

    hyperParamSearch

    Which kind of hyperparameter search to use random sampling or exhaustive grid search. default: 'random'

    randomSample

    How many random samples from hyperparameter space to use

    device

    Which device to run analysis on, either 'cpu' or 'cuda', default: 'cpu'

    epochs

    Number of epochs to run, default: 10

    batch_size

    Size of batch, default: 1024

    + +

    Details

    + +

    Model architecture from by https://arxiv.org/abs/2106.11959

    + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/setResNet_plp5.html b/docs/reference/setResNet_plp5.html new file mode 100644 index 0000000..5ebc5b3 --- /dev/null +++ b/docs/reference/setResNet_plp5.html @@ -0,0 +1,251 @@ + + + + + + + + +setResNet_plp5 — setResNet_plp5 • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Creates settings for a ResNet model

    +
    + +
    setResNet_plp5(
    +  numLayers = list(1:16),
    +  sizeHidden = list(2^(6:10)),
    +  hiddenFactor = list(1:4),
    +  residualDropout = list(seq(0, 0.3, 0.05)),
    +  hiddenDropout = list(seq(0, 0.3, 0.05)),
    +  normalization = list("BatchNorm"),
    +  activation = list("RelU"),
    +  sizeEmbedding = list(2^(6:9)),
    +  weightDecay = list(c(1e-06, 0.001)),
    +  learningRate = list(c(0.01, 1e-05)),
    +  seed = NULL,
    +  hyperParamSearch = "random",
    +  randomSample = 100,
    +  device = "cpu",
    +  batchSize = 1024,
    +  epochs = 10
    +)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    numLayers

    Number of layers in network, default: 1:16

    sizeHidden

    Amount of neurons in each default layer, default: 2^(6:10) (64 to 1024)

    hiddenFactor

    How much to grow the amount of neurons in each ResLayer, default: 1:4

    residualDropout

    How much dropout to apply after last linear layer in ResLayer, default: seq(0, 0.3, 0.05)

    hiddenDropout

    How much dropout to apply after first linear layer in ResLayer, default: seq(0, 0.3, 0.05)

    normalization

    Which type of normalization to use. Default: 'Batchnorm'

    activation

    What kind of activation to use. Default: 'RelU'

    sizeEmbedding

    Size of embedding layer, default: 2^(6:9) (64 to 512)

    weightDecay

    Weight decay to apply, default: c(1e-6, 1e-3)

    learningRate

    Learning rate to use. default: c(1e-2, 1e-5)

    seed

    Seed to use for sampling hyperparameter space

    hyperParamSearch

    Which kind of hyperparameter search to use random sampling or exhaustive grid search. default: 'random'

    randomSample

    How many random samples from hyperparameter space to use

    device

    Which device to run analysis on, either 'cpu' or 'cuda', default: 'cpu'

    epochs

    Number of epochs to run, default: 10

    batch_size

    Size of batch, default: 1024

    + +

    Details

    + +

    Model architecture from by https://arxiv.org/abs/2106.11959

    + +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/toSparseMDeep.html b/docs/reference/toSparseMDeep.html new file mode 100644 index 0000000..4e1ca42 --- /dev/null +++ b/docs/reference/toSparseMDeep.html @@ -0,0 +1,214 @@ + + + + + + + + +Convert the plpData in COO format into a sparse R matrix +Converts the standard plpData to a sparse matrix +This function converts the covariate file from ffdf in COO format into a sparse matrix from +the package Matrix — toSparseMDeep • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Convert the plpData in COO format into a sparse R matrix +Converts the standard plpData to a sparse matrix +This function converts the covariate file from ffdf in COO format into a sparse matrix from +the package Matrix

    +
    + +
    toSparseMDeep(plpData, population, map = NULL, temporal = F)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    plpData

    An object of type plpData with covariate in coo format - the patient level prediction +data extracted from the CDM.

    population

    The population to include in the matrix

    map

    A covariate map (telling us the column number for covariates)

    temporal

    Whether you want to convert temporal data

    + +

    Value

    + +

    Returns a list, containing the data as a sparse matrix, the plpData covariateRef +and a data.frame named map that tells us what covariate corresponds to each column +This object is a list with the following components:

    +
    data

    A sparse matrix with the rows corresponding to each person in the plpData and the columns corresponding to the covariates.

    +
    covariateRef

    The plpData covariateRef.

    +
    map

    A data.frame containing the data column ids and the corresponding covariateId from covariateRef.

    + +
    + + +

    Examples

    +
    #TODO + +
    +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/toSparseRTorch.html b/docs/reference/toSparseRTorch.html new file mode 100644 index 0000000..2505e4d --- /dev/null +++ b/docs/reference/toSparseRTorch.html @@ -0,0 +1,202 @@ + + + + + + + + +Convert the plpData in COO format into a sparse Torch tensor — toSparseRTorch • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    Converts the standard plpData to a sparse tensor for Torch

    +
    + +
    toSparseRTorch(plpData, population, map = NULL, temporal = T)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + +
    plpData

    An object of type plpData with covariate in coo format - the patient level prediction +data extracted from the CDM.

    population

    The population to include in the matrix

    map

    A covariate map (telling us the column number for covariates)

    temporal

    Whether you want to convert temporal data

    + +

    Value

    + +

    Returns a list, containing the data as a sparse matrix, the plpData covariateRef +and a data.frame named map that tells us what covariate corresponds to each column +This object is a list with the following components:

    +
    data

    A sparse matrix with the rows corresponding to each person in the plpData and the columns corresponding to the covariates.

    +
    covariateRef

    The plpData covariateRef.

    +
    map

    A data.frame containing the data column ids and the corresponding covariateId from covariateRef.

    + +
    + +

    Details

    + +

    This function converts the covariate file from COO format into a sparse Torch tensor

    + +

    Examples

    +
    #TODO + +
    +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/docs/reference/transferLearning.html b/docs/reference/transferLearning.html new file mode 100644 index 0000000..53a64e2 --- /dev/null +++ b/docs/reference/transferLearning.html @@ -0,0 +1,231 @@ + + + + + + + + +[Under development] Transfer learning — transferLearning • DeepPatientLevelPrediction + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + + + + +
    + +
    +
    + + +
    +

    [Under development] Transfer learning

    +
    + +
    transferLearning(
    +  plpResult,
    +  plpData,
    +  population,
    +  fixLayers = T,
    +  includeTop = F,
    +  addLayers = c(100, 10),
    +  layerDropout = c(T, T),
    +  layerActivation = c("relu", "softmax"),
    +  outcomeWeight = 1,
    +  batchSize = 10000,
    +  epochs = 20
    +)
    + +

    Arguments

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    plpResult

    The plp result when training a kersa deep learning model on big data

    plpData

    The new data to fine tune the model on

    population

    The population for the new data

    fixLayers

    boolean specificying whether to fix weights in model being transferred

    includeTop

    If TRUE the final layer of the model being transferred is removed

    addLayers

    vector specifying nodes in each layer to add e.g. c(100,10) will add another layer with 100 nodels and then a final layer with 10

    layerDropout

    Add dropout to each new layer (binary vector length of addLayers)

    layerActivation

    Activation function for each new layer (string vector length of addLayers)

    outcomeWeight

    The weight to assign the class 1 when training the model

    batchSize

    Size of each batch for updating layers

    epochs

    Number of epoches to run

    + + +

    Examples

    +
    if (FALSE) { +modelSet <- setDeepNN() +plpResult <- runPlp(plpData, population, modelSettings = modelSet, ...) + +transferLearning(...) +} +
    +
    + +
    + + +
    + + +
    +

    Site built with pkgdown 1.6.1.

    +
    + +
    +
    + + + + + + + + diff --git a/inst/doc/BuildingDeepModels.pdf b/inst/doc/BuildingDeepModels.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1cb0114223d1b6c3b51d3810bc76051e9fd9ff67 GIT binary patch literal 182193 zcmeFYbzIfYwlEAxNeCh!&6XBqC*2JK5&~k-jdXW6NP`lBfV6=7vw9+Asv~<7U z4SIfY&U5cM&%N*a*X!rA7qe&9nprh7YpvNbDoIGO!@!(4jD16GBRDV+6lA6M97jL^ z1W^G&q^(q~KoAMgGdR%y5CoAjwbuj*3E>!88X&P?oc}<_fyf(L8rz$I;G7&Bpv!+a z0J^!op)EjWu4`{7VW@9qV2A@y+u7S1>RRA9kEJ}3iyg1J*|7P(k6krh^4g5}yp6@? zC&zD3hFx#mH?(V#iSnlqQU&qqFAe8tr{!*USH*;3b9iG?oE=jX|tHGTL`! zS$K>5c<4R1QSzGRwKCna&E&|YMlI8nZ<4xE7gDRN?6UP^1};zIIjy$(9a1nzv*pjN zm%fv5Rdzz8r#a-$95fRd4$u0Jpm-k4h^f?X*|OOiJTsrv26omOkM>C_r)t56{HdqK z!VY5Rvod5R^@5`jK^Gy;5y(4~{pz+Tv@s?`kXo-XQ;K!tI zUySCuzlFbS?0CedSXyUr6wW$W!YX>sT8Q&xr(*M4qJmUAZ8dM7Ov<;cyEf^*S=QmM zU2MCqD#>(5D7f-)Od^3%#oGP@cdii#=d)|820}#K(Vy#Bd0{)Q`c55d8)V0M;}`Gc z;YL3X8pbCIR^c}5i9JJDVSEK8^GhP9ik|khHaM0d$(`T1@Qc-kPi+?u5a9224uYOE z6qUji5eqb_d|56(rsc`MzWfEBOT+x-xU` z-Fl}REX=H*>1+IVGxBU|OAj;}$<)X5YQ!OeHN(p$En_afLl}6pq(({osbAaV-mWDdThcM?5HUu#I!Eh=<>(KzF6sd@CEzRHrp`-^($&U@m+U7@UB z@?T>Z8F#ME8}wsr-bIJDm8iJ3OC;XbFl$Mvx6oK4P!Y#rA!hLlX~0-ABy!c786U6K zeXK9StB1z^8QsnJTfunxBgHzVDowAox?I{wBE{?Hm`}6J(&A8Ktvsf8+pQ83?~g`i zed*%h&X^LOP!O{c$DN_#9VP6i>VQk-qM`nvqjW5SunhB2FpgQZ+XdcA_rY3GxJFLM zMM)V^QqV}PQKhX93b7^hGQN2;JcoLeG=?WbA6NgSTx1^A2xk-8C&~lD|W+ffr|2W!?CRloQb{cvG1!bSn8q4lM*b* zp60Q<6oj5O_YGmJRVo+vtZ2Y>vmHAS<# z+Ai=_S6}C*9>+qPNz`fTZs)pFhFpsKsB%rlSsV{Rl(2n}kP-_8_~`WDV6(wJZN4)& z*Y)_Pyu|=~5KW&&F`}{BZP>k`DgO4g$RY=vN6u|a-FFrvd?mF$yb<=oQ%T@}&~A&c z9eVY|etm-TVFL?GjPi>dHvI9Vb?lcLSg(u@SbikPW-UBgdz->WBO{%R7UlO?{t1{p zn?XI{>A+#Q#pM2V%Nz^PsQZHe%KwTwo|ps=rK|{FHQ+Q^)iwe{iviS-Lhk6og~NkAh^85ceVX42;7RoY?y|QO`rb}`@tAo&cX+zfvNxr0;$BU zRNQ0O3&eIXU!}CVb|9gKqO0fK^->4jm0d@@nf~Uq#%;JPYl2?UWUg=e>;p(xSB7WmXTPLF4XH6)r?DA*ps|bB$qEwQ7bw@?_L(MGdFj zsD3m*ifnv(VTE}LSc$^J^x7=!6G6W;4fVBpo))Q!+6L3LL~jPiel?TV!q~m!95a^I zriEvpQ`qj#&5KQr-SO?kplp&?5){G(kf>VcZ7 zMF*-ZB9z`nFk_Nnh@z0+q&ag=8G)oRi1}+O9=>XMlkf>mWNV41aWltj&Ye> zRm|GR%?A=|xuV2V8$Asabs}B&9c9+Uor_kBn$||M2(d2TzpB#IH%Sir#d;-{iBPa-GF! zXM7>vKk)W@hAL8Dr)c!?8m!khA|eB^3n_qu5(w-^{D?s~DK$RMk{M9f$g?FFc2|$COK7vte%SF=QH_{9(Sm|@b(gEKUlfY# zk#MszOjn&*u1>Pks)(+nJqn9u(Aq`daolYe|0!$RNvI90_+`wH#c-w$eipuEQK?L%hDsAvFi*7J-TJm_ttUMEd0Us z_h=;S>d%VE3O^W;KC|?np8Hvb#xg9Y9I!{a@QN38<#_|Mxwf{@L-2XYf;81rHtERw zHBT8#sYJ|LArp^JekA6Wd?@%#D5KvJ8c#c~eEpQH%I;n$v>SiIR6w$kN{{;D777PW zG-v$d>X*_XWD!1nydR&6@X0&p#Fd85`)!51_F#L3A>#U}y(4F7`+h1DYX8=be?$VE z`nDl~$$V7Z8Hm84n5%(>#M0=zeSy5aOZ6pblqU!iHEE82Kpoes`e>z*-?KR#88!Y2 z{eH!d$f)uv+QfmVx>y^6AWFK%hBy#$D@%JrOM5%eWq|n*2lCL+&dR}7-w^2GKt`fC zkOzharn+KQ&Y)*dpo0qr2XXVlHF1CmYynCToc9V%)Y8%lnCBS?f&31-BnBa))`uXj zOUQi*c`o544G4yu@F56>426L|SCOwKGH(6FHFB!Ia(%f94n)O4&;C*hc~eU>9Eg~e zt%0HKC4W#&hz#UDMEn_YQYgUYLqmOg&@*lr7z*V8adIHQTwE|fS{N7xp&NTdI+s)>V}6U@sE*p8bU z%n5~qIC!|gJY1arB~u)KV+!em|B|VH*{2mytH zId}oAUoHtjdL%?tK;UwlK}0Wo2%-W}dw3uDVKK3{x8{RDj7{xL9Q45YRu&LN83~p9 zz;3cP1-6sCp`)R>k}a_1E(!1c#q*GR=l_y>IN1F>?4q3aQ?4xFJWkC{qIPC z-1q;N62N$2z*a?o;9Q(wP8bNm$peP-0$;hgz?{ep{2%Dw?%y{6M9LE{`fWU@8t6 z2bdRdI2ack3`HOhi2qPxU42~xLkm-Vu$6=LrQYE!v;G7U12rm~e zgoBrdmjl85H%a~|SNy&}ew{J@{my~$@_@Mz!1)FRd=A*Sa5w@CM>}6o+3^uX1F#nq<|9ltz z+oHgM`-2pF9NtCnYy0aEx=oL6#2Y z=6}9cVShdb|3(Ge?r`WoQ2IeJ4gZZ z!>+;`h!hYN!LEWFh!iJqN?nCGK$`<_e?XVW3Ll&kDB=BTa{`y`RhR^v_z)#qD}5D1 zAZ!7`GzlpXMAgvQ9`t7mBK~~k|1Jk208{^)<-jlGrdE{gd$pcz-;PS~GW(~3T+{5= z$Zlr(#xGQe5TUE!^bC(rdhmcM6`{$T#Xoa-PZZ_%hnTa&#{R-458=?$lrHlwBd<0NS*oRZmIQBbOf zCbu_c>%h;-m)l@S=$E%fZ+N~XMaIZfrE!)Stq2FaqzqsI50 zTS858DWlG`Dh5sbHlYtw-@vDzjp403l@jRfuMwx8O|JRPjuKEu`U&Z(On*(=6u8xc z5_hLsPP`?2O`9S!Q8?Jurr&X?=jpy``9z%h+H->7a11|nZ>5u-v|?04(WsF@Y6+JU z-w@HhpG5tqaP51EuC!?QKrx$nyEEIAZVYROe!XQ<2gPf~Ol;cr+5%Rz^gu;~yJ8lq z1|Qb$`Fw{%Jt1BJU(M@G9}8It^z$JTVtuNyL8j!i@x_$RzTN|6uld}*`vvs9Y9UMT zA6^elbdDs9TB(%hvXL(wqB?6U|9owe~If zTDR86m}L&h=7Mgb$C*E&hs}Os-4irwH7vUmd_V6MH8-9INdOVc9ih;Lst73FEswSv zFdy1&=#)sYPj?6^j3BZt4I*G(u^F{_2Ttt zz9dIuG=tgS2*J&`g-NZ^@>qfX{rUW%=%6E>8Gk97w7ecx=_9rg|1sw@E~)1ja-v{~ z1iZ9R_~(>jcI?q*69)m@DW;%iU!NsSaJ_1qPlCVHgtGa8P7KMXcKYkZUo56FxU7qZ z)B-VUBVNlbOe5shEzIAfYO zRTqd#+{W;s+BGZ=|@d)0EM_ zlb-D=^g2i(muOm39k1msj)rUb^XK1IV#IIf#D{M=XAK{*e$eI5R;?$C0XJ1W{<-do zL85tXRqE%>>n|a8AS?J0W3YjpomS$ME+2k2RUAr9n;{!An}@KfUljQ*pK zd7^RE1U};!2u#RGSB=qX_5`Y*e7AY$8cDWLltiXZQHrwYWJ=;Qm2QeU@3`Xj8>gyA z8`k!WN&ZhC+rS^1t_7r*^K;>@=4FLBX}kPbUy!mQ&YH<6BB^S?sY5@Z_s1$GIJuSf zv`xfIzrbuQv>~G7POli7ZlTw5AL+c+s6$P z_E$DvwMmqAWaTBW{XfPsZ@4V(_|*Og$9JlxJa;$`4Jl0pS{FG4kwY25_Ond z9+*(xZR%A(uZKq2?}65~_3Yk-d7|4PCaFns(NWiPGYXc5%!{{RYE}<#Td1gOmuFW~ zo83ic4+qy6W>Z!Z$=%osU21$S_71i|b(q5v5o>CiBtXgF6=9F>=9Qa9m>DP|lU!ku zVncG!_1zBb^c!xYBxiJl#4FP{nk*X@ij%>hu+m$18de{zcW!Uj^{7qWi_ny=R`@~d zHDp*6xw_W8UEi2HZ563PqqU^|YEe5QA>WKf;_c(P*?IZME}et$51$2xUuiD~_8px% zjCp^+*%|+?a=b$qdEC8Vee%ZTa9wrYw#YHXv@BoObfjioh(J_hwQTIX6h8k{hqE||Wa?|hS4fLp=V{bj%r z(}K1f@48T#Mlxm%|FOTT&HYHx4<80j`NY=I+?R$0gmfM}JE*It?Xxz;k0p12e&A^F zND9;SB$Q+zf#48{dPr-3`Oa>50A4K zQit#6_kC7<1SUN9UdnxkhMi^m)R)EgT?_q2c!4%cDqb<#(nM#o!DM;Xs&tD8jny^J zH{PPNZRjWNc;qLP-49TmBX9&wp}Z9Zwb$z2E#92N!JJ zaYdnNY_=&90mb(5gE1$scG6qYbA5X!PeWh23$uNu z%;IiKY9D)jUb=W&Wtv&XP=cl?YFE{-)Gv(jwzU^eILv~iy5oDQ7qR;Z?e4&c&H-OV ztI`hJZGI)4&V5IhV*>s5n(BWptRem!t^Ui(+GVWvm&zLAvPAZuSJr^+7a2JM(eLG- ztN4=Rs)+XI=xr68BcmjibHkcRUQ z=l<8yT-DHjNyGWOc=i`*E;+ee>r#i8`nu%Y3CYtX)0eAW8uwDeml9s~{ow?E;_0_> zpK$^elK=nn|CtUne{V0&-vy>;oSd9sb=6^ zz8w370X&xQf)JMwN%xnr%Q>&`kYoSu=ZfMAUd{Js$iWQ<1EtJM9=HJ)=LTM|5I|}3 zvKP3hctKn|P(UIs-~kYbZGZwMH&8Bxf)HFl^aJ9AAu+C~xd0F3g>n5F|J8R%`)hUt z7n0&qKfmTjt_OTzP&iUOm&*fRxqv_l_Df}dDDM{`F#abMTu#Qp0aR6gO?=fx05>gQ z3Z#-QF|H_n^o&|rF7s;7sLzY01VCx@X2vykV^@9p}?~j5XvHD1|m4*A0!ITr3Y{W4~W2omu;X- z4zSOKK>8^U664Y%crF))A>aCd?kg@XAvYB1Hh;+ct98lG72fY2z|k%z<-D~1uQp%_ z#(H>9?%dao$1dbyy0jV?9H3%qk7e$Vj_T;w;2 zwt&wcHv4^XJwpI9Az(<6IWGnOpTMhmuHY{V0<&MWI1w;_FCOG~4y1o@^YQ{F;JI8G zsd8XT0z7j7+Y`w@Qt3!+;Nc5+?&1Y;@&M)mwi+M>^6eL}AW;1OjlctJ55NVucz|iI z+B^teU=+Duxq0A7hvDKty2|CkFyPtjSM#caiwB6df9v26ZCqLKlH`|uIDl~La#G|@ zLQal68~}euz6AZ!96m({e=^FLA(fi*HxZ3&v;vb4YPQroUL^AM?794Q6--<%^ zUJ88K#|fP2Knx5B&jY(+;7XB5tgA5u;CjF&2Tln<>6h(4#;%yX!n=fj80&Yu_lyg8 zbON?6;In`vP%Z#+0Vy4z6yWdx9_Iiv0;7Oh0cQdZ1i+9$A_({{ui<-8mK&%f8WYQuqT$O9ZXf8bnE{p!CwGp~BMf%_MEk0SZ|Gd{;94g%m0c>p3M zy=q@#{zHdcf5fD}f9(!14}(E}e*?T0xZ$~fe+5#E-%TJ5g#xdY$bbL+uj0$94V7L$ z8{>Sz-#kfieFOQguc{UksAdck3RKmiSI%spzMdz?BE~~u7 zX^U!mjYs1i3Jw#PHbeo%NJ}S3nN*W)#h}Z+kz1~{Zz74-dUUm_!y_SodUg?=P_Pj8)J8JBbFs49Z zGZZN5gttP^ORuqW0`8v#M&6Bi9+^qJqvPjaQAA9?&(1b|X`0#CP_Wwgu_k7}XQd)^nUTgGxIB`=vo_>E`YFxZ@U@mdIH zp2{45s67oD2_gi0Js&$|qng1(KUb&3rC(~aH6$T1M6Hl|=?E6#r{|B26uC`*Tiw|E zyLLsSaN6CrMr<@xx*`PyFV~K<@rB(tTo>73^NV7*CZ&}&{lUc~jGkZJ_`R3O&U6=7 zaiv{_y`#;!x+JKndjRt=sD9^-U}@P&7ZWoBgb|#bA>-R_<)+)Sae{9>)?V|s%1`?`+on2Wzk6kmjrB}T_MCVX z8ohMPHu6&Cc5u*9DlN6Q$B+#<*Io;{zG0Qj+lSGFjlzfKO~XWr=k@fA^R8F7yjM3Z z(V;8<2tP{q3-i}L4KIzFA_;h`jOn>SO>Lbd%d2OK?OyaeOgFsqvO>Drqr9kys~Yp= zMiWP`Rh=82>-@ADzOWEp&<=|GG3TE&Ha_Z%ChucpjrVgOOxq%IbkR$G@Po| zRE5ar3({S$w=c}uEd(z3H@w6+yuETCo(spYr?jyPdS{2e1T&FdJlxZEynFj?WcuwA zL?x={T8B1Mx$siQqP~tWn7T{YiPSqHB>v3Hown3O&jQQd5%a?Z8~$N>^TpN~;W@!G z6hGgZn(PYnFBdPmv_k@^eMv0fVhoex) zme;^v*%`rMbMGQBT(>?Qwc__f zSQh4?RDMxADqT=NDV&S&h;lf@I!)Urn3bm$czuB?-0FDbmO>k^dGEEim%9Fm$@;A2 zMWjE$Y|(``?d0S|U^gv>z=iLHpl+yF(YHp+8=o4bH!d3N&Ofq(752Z0;L8n0ZjO8T zcaT@}JaJuydQ!P%6P&cKPG78?sr&VMO+UWqASJCh*vi&lFu>}A*%hNEj0me5>*?uk zpS{#8`X!x2yEW8PRm^iSL-^gN`QbPP z@5h~|{XRkLNsZ!Y#4I;V_Il|c6|dswzn#Po>8R%)@shPHTFklGe%UR|in&XhlG@4@EW32Xm@MwxE)m5%_Nv#;;(?pl{y^IImeB{kUFK{_JqL zjxMLBiUVa??}GpZ=VG{g+^yA*ZErEnwTc_CNyV}zt)S`aa1jW?;d-BmIma>C*u9VM z+Oa2z5&04Zee#d1YRMvd*z) z6NP^fy~pQy1U-PwpULJ^-!g@VyFP4mT&;K=np{|#rcn=%1 zu0AoNtg;7st^A@b@q8jl_>!Me?PIC*XGc_1{D|{oL7XpY+M7bw3>iF5Us|f6!nbLf z9LpNrL~Ejv>Gb8T)v-24+c-~dy+^KewNii zmAk>5hX@}fImRf&fqKgNnfS}HCqYNHS-RAD3Hw$tGOP8R1j*g~bK}!dcPq*`6#OxZ zTsN}(Z~RP`a27CfSEB-f?3wS=G>&7UWFHVdmDQemBonj9v*VAuL;T|NQWqMoe^dF0 z=e?r-q36%vDFpgGKI*GFX8k+-}L>*dA>d^h^trUgsP&wYEmR~_?-wSfsy z{0#VR0pA2l$#6L0YVrABG=-VH)6j;Xr!`m_dqM(xB!db1)#$q3GTt;7Y8f<^*qXx& z9HS(XiCF39{Q5!{+5B1G;S=Y|(oU#6f)JtZxQ*~Qysn6u1y6OrpqNsdoG! z>UMJcBy*u@fzu$!X*C{w<(UB6d|T(&;`4HTujkj})L9kt#$4G56xFKa5ybpi61O=Y zF{Dp9fmkN8b483VLOVs7S()Bal@yl*pOdzxyzjg9HPXpPZj0l?$U`;Vo6i`jzu^jAjH0G<9vRLYz=~4dC(==2(C;4NaKTh^n)|Y}Id|GQd!*7#szTG}#U)*tI zz4-%^qyhGf1V%E^T!dCwnH-QKr+(C_JTSud2FKR44zDD9V$Ue&%yjT9XibbYKW!Ha zzkCdWel3$+(?wf9LPF{|Dii}%rjZZSr#7*ZPdRB%fpT8%(`^i}&1qXxd#sM|eQF`>7b%3TMibto6p540)Q<(o#kl;q{?= zFoiV+5h@kc;L|YHI|d$_M+Tm6OLQE|k{f)@d&~1DZXetvB(>LFe)DM$DhJT;wtbH}rqtTCZaUbygjk5Ggbf%ZGxBeKsoNEI_h-*&Leq@l_9wMtcKC35xYsNP70WCSlO0K%Nw?kX{X@it$Iqs=gTB`$1i0nAdZZ>u` zbDd}`k9n!OYT_CeXF&cY9iqdxa^Z$YVAK2?#Dl7rJKqt}M-$xDfujzYDkEU$c9)#6 z5^rLKxz|m1U!-#c(GtJGL4EA$Qx?c$kCN<4?pKl*v_5i-iK0f_ZC96BmM5hdrLpGn zMRe@@MGWR>yv99FXA1hVW4dsh@2b=?ErCUq@iFCdi|i_lJZ=?%!pP}QV7*l6SY@PY%W4J;}v`N&$8lAAghkGhr^PUVtRhs{Tgc2*I1;> zjJ^fs7~aOWw-X3qbWajDy{23Hmf%e?pKgIU+MZf&uv-7S{#5%hGpOXr$B=_Y&87%@ z#xXiPR^3c!q4+wxg)-Tze1eU9&N#)^D5Zs4-I`5W0;noYG&g$(&HOtRszOto-X;83uDsaxjC@iY-p_2P2J|)&NKXT>NCv@Lp7i2kGRvg&G3Lzq$G5a7q zL(HFS?k=Y3d)Vk~z63YQ$H_9i;@KH|@H(`s3G!S>J~|i(Jy=mSaqfvCmwh$Q$j)$@ z&~tu6KTPjkgeaozb<$5#a{t5b`%C^E;@?ILIf&R^M~L1x7JH7F`c3O$t;Q}qN%Yp8 zZzmFGPYs_w8FJXl2EUGGla5Q1&aEmov7}AG-5h%HT(@%_;^@J>^$5)={gZ)5jb-x! z$!kq5;shxIecI)Njyvu@Q0UOMFt+vU95J*80^arnx?6n?i->b2?Y~80flH_VhF4!x zP-EqhfwC?x4g(?87L(TjpK!or!Aa2+HVn9g#MT?Sj{1I5kNPdBnzY)>FSx^~e0s}! zN?&1!>qbDnmD&W`#vL=!W{o<1aeV0AxrLtgb+^_{R5+DH>v5TdroI|(XrUG6*?j{X zn*7W?a{P!e4-w@N`+M@L^IBR_Zz*#6ah@o@u+CRlFkY*CaUM$nsxNGKC%+iz3{C%O z*!;c{rF%mO#b0nxSw6IB??<%UpbqVdGl`pYTt7}XwUEB1P1eI2oExHO zOz4@j_?1)fQ78`G$o}-&)ML1>LdH#rQYe!|N)eg(#K1)dJ~|`r`46VA7sQWL*=p$e zVjA(ZmL^L`_A;LdId3{7MT*g=73{TqQX-V9Kr*c6@owi&Sk=sgVX?@b3s)+U%_i$bGZ@R|5>Z#G) z?{TMcqq)|+gPJozskL)p8Pb{LbbWEDho=(n?8$mHX7H=qpA}O&cYK>OwLL5PW7&rdhj44017_Nn^K68e&q;*!Ue-P!EzGpNhzjq~oe?rN%zN zmReT`BSpv)vB7WQ5>TY^-@u{}8 z%;2fqtNctUK7D*Kmbt{lx&8;fB}!~gt`*DSp-Z!JyQvei)IWxOFg9F5al%QLR_5n( zN`pnS_264Bu-#@2?{z+sI@kAR&dSz}`=q)T`+7;ZGc7+q-sqeZ>8-8)`Hc^vn=_fP zi~`Blv3G98>m)9KvC|e)wRTjNg)X#rJ7gZbm)exGD?auq9j(edqEH>GxxM9=X+AEs z#AN+3gQMZuN54XX-bE^1m@!-MEd7d0|UVl@l=Nf#SK3MG|OP}wCECa ze(HjuYClkYm%#a(xteWJ+=$Quu}tz4Cd&s)+Uh?rUN=_EO>&29CH2N6G@CCJs4d$? zE5r*qFOJPqY^qVr>tUrU)AN;_{4hHv$WT|2s2D8gdZ!|hCf_?0hSYvRnjwRr z&bUamZmKXHAF}Nhom2_7d+8qeD%6!Ludo}fc(&ZEMu>{KuEz@c8aoc#&}%?UPiABr zJi1)XREi2#&8HuB2A<{Bi8IYSM)30UC(49h8ycY4F^`rf53#M+QG5aJ5k3m2C}Xm` zf1py_(ZzegTO~Xk5LfRTz51?o^2@u)P>pG`imAJ6C`^i7~(Wc6=Fzmf1s;y(%57=odtsfdOTl?X57k+O6siNx1Iq zJJxNVFPx?VJHsI5_C=wbJlM-CA^9JNQ$3mKhrTWp-H_uOi8jb%3~08JICj+%lUzMq zawde{n09#XP*C<*ciKYA@>rZ#mw1-gy^GQe@~Gp(vc>9QScFO~zu}|Y1Lo+C(fg%Z zH;57!B0ZR71Sc3zjS!L0CH-Q1YA21`%70A3+hOBM z$28KL{(^)J5mU+*3~gP0DOj^jkPyhoa5F>m4yhPlX0t^K^UHkwCl3^j2-j|M$5%ZN z@+qkcZ%S~F`5u>WSddkgRT=O>Ywgq7-dIdR-S!G|ka>b=?P|rBlRPppGwF|RuO9UI z_7}U)1YUDJ?w7IY@9#OvQmsr z`+B_y`Km~;N~hlabjRd1Nw&GE_~d3cgn7EZSC+RYt5|XWxgzTlzj(u|mSE0A;vODc zAA*DP^ZDX$Dl3s}Q_96^Vb+ltHD5{hOy0-lFON>5Oyc(pS*nnZJo~J@y55b3>Qx`Vr{>&NrFf~M>LSOhtr9k|Cup+9XN^PsLBfcF zcwerTS=_lO=agB0<~1MbbIWi=;)52sg>QU0CT=oVgPU;%e1Zp)6ul}m$=tn!|))IX-hE5}xmzWL;iu^rI|3lkaRYx8N@ zFEe^94YVw;Hw7vlTIz0~D+(}O$I-SYStSwLp*ZqguO3{*eIkf=qi3RxY-3Kf=#-;B zs%y(iSXZRSHM69+$t=q4`o4S-qVu}sXCe=!vcj9XQ$O5)XvvrL<7*hC8FS*KJLV86(Pnn}Tm8cmJEYGVbc`N8w`Q0*XqIwyEJ!^Fk=jH{Ac^!`E@Xor3 zZ@;T>GT@>0 zLgMRsHY&3EAG!9i@2nM*GfQO4oe}rjz9)NBD93WHP+6|$f-B~0XT9Zu($x30hBgIv z4SNn8`!4!bY_iblAyT-7ttM0|%g6bkSIsvnR>mZ^mo++v+8(GW2+;U#@z-H9%+Y0w z8MP#-41H2T4`&wNHL%MuRL%pxH{zvMyY@^LuJpq5izj~xH@>Q1o`*Bx1-jxbe7W)Z z8Roe?H1d4YV)x~2UDbQ};7997<#u&*xijv0Cs>0`p+qzL7 zAb`P-+Y|OPfM8Ng@;a;XR3w}x;ZSjNd#W2tPN}Q_RuC3)&3qMO(=~6=GbBI2tY5-AeV@Xg6RCR(~SWui>fAN+Wfr~HM=bb`bfKP360aA|U8n}n5e8{%J(C|_HV zv8JTHR`81F>6esB6BA2BnVTv@tT?v%#E3;bcFF3;MQBb{T6E7*?dtypUdhux5A`X#M@u0qpa92NdwLYy~uC2!B^@Jc555b~E&SuK@j7Ecki>%^x zYY~%m=DA?&w%%d|x3B*GuN6QcuOGaZCSpaX#`eg)!c_d~Db{X>OwiZ)W2Y*m_eOnzFskv<-`8T>j9Fp3@eD9TP4Rj>2o~8fHK9WFu z-uYxo*K0=*{#b>gRHR&*^u2Q~D6mmPloQS3HIBNFhwOcvr*&+2qyedSS8pB0zIkEW z0e_OfE+uImy+_1UMmSaAu9!HNi3p|eP5+@8gCI;~CyYu~c~5fR>c}X@zQp0QbJGdy zZr3SkaVmUSciNVyykVt>=9^Nf##W^OEg!ADr|k6hEJr$qQ)qSW1a*A!AZMA+^(PvM zl;VZuvqya(Q*~EO3AI`1PuX{xw;MhBB|n;)(YrGkn1ER$tnf9kd0H)Ae>O~`kk>IU zisw8xG-8(!e9)7QM0<7L;dAg^06ukZ+ZxN(yj><7j z@Nta1b)yYGPOR1i|LC!YI{|G_Y66<&1CK9R5KQZ%tTpaqR~(v~!Ni`BJ3rryt0vbA z-_gzDnR^=OIXnBs^36nLjdx@sp%QaXgQC}))xOAOm#;hfxzTT>qGZn=NVW@2Pw=*I z#^Dk~nM+CZT2L$bw2ZfBE6bS5M|KZZe$d~^48@yVR0vYCrmdHtJZD6D4Lgx_DWQ1Hrem^JMs@;vl7XsuV+9fm=O+=>K0`Z9w~wB&h{d-S;wr9s} zsFBEabh4M4l_`8C;5s8uk`_ZgearLH zfN0QnO{-j2xu~AHgw7VYp{wGY|j zBBZ_TU%}{Ut+=%kq-8X<;?X7v$GC?>85Lt?yX09+_x;OP@1){2_l*>aj!7B@RFh_~ zmt79RJ6iAB?9uCyrR);i zGr7-8eoh;T`Ev7)USiY8xGYyV)?#=x%A36R9fy=*H$H~paDRfcnyEVP9Tb%~VXfCY z`>OQ{VLgxYFUKLCu3$OFIr8~>=;O)uV6}42#`~sCPfS$hH9;Kc+*2yX&aSAjT)f&R z*Kg0B*pB&%yIYRh)!@x7@QwbgHj|=NA|t_LP;rS}DyW5^Ypd&ukp(sq#W6y{D<5)o zV|Y~KN~lTkyL%1ZfmBuGE)rYC{&dS?!NHFgOh%O1*;j$?5Tl8{5XA02=+9M~(VHn(f4;`aQbum<&Jo2}uW+ zJ){ zy?ga+k0FvuHHWCz>~Mj^C9U_cC|+{Nb98Fm>m2W_T8v=sr}Q9sEG|}3QA8-ttC)n3 z--kc0?R5?5XMGO+78cn4k}gYzEhEqEbd>1v8&gS2eY3Uo>--q6bHd|hABl$UtqHx{ za+wi&H@cdY!eKQ(yNddw!(o(l_?4KgTrr6WcNE`wLB+l2hm6kx#RDU-ZXAIYH(@VX zH_h_th|ATSiNB>z3_e;q4`3%N06h(_hFisgry1&EPd!GTN&hJApy*M2=B*`}_+CeQ zYo=T}-6Gi7v=|rjC{wY)q=IX+9j99IrM~qjRFgz5Bh~kW(kQrYD`VqRA7c=PgdP8s zM*1M#fU2{a@_W3bBUrSaMr+%*>fe%uuIy7E;x6p7|LrrZo<-rEO`r(q@@kp2m570S8QPc zPd}V>Ui-PkzSJ5jzf8s^oEO0O5|iG$u77>-& z-uF&*o}(uuR0osz4wKNP=T0zl`hiex{M)ZbH{}6)YrfixMnWEDW8-%ewyH=(! zn(uSh%q)i$(|Oq2OS#D07`QHMN1woVsTl_v7{v%KIHK30P##!fc&sZ}NG0jxZs=bl zimxBZdC(+;8)5^|OrwZ3NP@zKYb^cFV~XAdRGr4lb8*!$vRD@K;yp{pXNRwUpk+|o5gGc!(6A7 zEC%k{4qDM&rJJDOPp#1r6W2d}8kdgv{^U4$VY9632bPIaE~#DYb#YN@&ztlg&>jU( zck_Hq*lm7jLV`xAw&>nDk(`r`??1V{68<){kKC}|yg+@8gUxY6?^bm?U7ta9FVpvp z3|Q)LyJt)|k25-}KaM_adW+f;Vb7})q48LUpis^E57}QqK~;D{ZuTBxE}_fjhBxiY z?=Qz3k7m`lu_%`uV3>+Re9n^NZB*40GRh~%&y2>0G~YAM)!W#fwyKZ*EP3{5dC5no z!4YrP;)zfLx#E}6x8%~!?`=v1ndcH5+-JK@BQ7fP2Ml`11PGg!*HCiw1#I!B%&g+E~W(^?r=TbROXNK zU$VvvHa*syO%ZueYefx#}^s4uLze7k1Uf+Xk({vi6{C|a2rkR)QK-P zE5En!MJa1spr{s@IC|UaedZ<7A_N8p#e0Ku8G7xvf;ZthWb`ZHc zscyWui5kyCvPxP0EQiF$Be_Lu-0jfOW@neyM+3b3_|Mb^;NXu6u~Z?sS+0Ae4a9?y zo#SmLj>mm;c-`fDM7MqTKWZnvuNDyU<~)iE&0SLq{_#-YVrpzVBkEpZi|+AmM(@yU zm}F;8>ZjXmdOebM(_uc&uJ1VM{~rK~Ky|-@*ilz=8-;vMgtv12r{ zeT^Nj8c?oJm=$1B&nhV99d3dWP0#k-Bd?sFX!XhOSNLxQ7-Pev71{Cv0wLVMw;j%L zoS&-D;M|(hJ2Wl4PP}@N#<-Rp1Js+OxOe`{trd_@?H9kp(idx*aNn|=A*HtwI&!#? zJ=}WV1iqnB1stN_x>=m+J3GTE#B9{EQzqJu_b?fAXkY4dTKCs08tcpG($GAHURGE6 zv_S~dZeL_g%|4!Ew-y?O-8<6rt$+%FjV01bZ3I(2ftjbCqQUD5#aM1_!GT4kKgKuy zz`)?0pvP8`;AuzA*xejpYv522`zllki)u4qYhx()a%ozl=$B@WpX0NKY6U6C!t`*6 zJOlokq?k$uw-h-`ejYv^;fYR~P1J&k{?$Hp-64}SQ?RS;O1F?U*;p`mRT5(^_JU_X zvXP^JNp!S>g(P@a;nH_YOIDFYrM`KWvrw`MnnQ?j1wxFh;+7qb2G>3YQh>hUDe8J~ z@p8YuV-s7+7u?O}slthwbo{HD{PNNRNfi{8E~9DByEB^sWk{`7SBbiQbVwrIfB090+{Q~!hW82!GhMJYHGnX0~7 zixHNtZ%6l{kkYB!D5@QCP#@SypTD#yUt2O#3|6tCBiOB~BchOrBJdK7hm#RR{`#3S% zY#w1Nzqwrf@WB760$yu^=T#^v|Mkjvc{zTbW;$&4|`idGwSngO8&*p_@LpTke4uiDv4U zx8|g}AJ;_i;^U6F)t5(zI-I%Yn)KfaKR84P@uvu_l&+D&u#I!u3){BYI~g%6n1 zVHeMW9JCi$ofv+>*owT$YK$7hG0G!XpCud^L?_0BJoJ??H$yHVDv)uBoOcjsyhvXA zWfrj3~=R^k*SoWLZbLHvsgecP8yPMGmG}&Ep|k5;>q;(6mh{_C zD9q;|Vau2LjIY7lAay{s?cq;q0^5@tpCKkZb@34D*JFz{IVC*TsE7$7Kmxd~#m)NM zDjzR7ppHYDPY|_cW?gawSCk7KBo<5FI2fQaQ13x$(&PX}hufW%ckns+{Kd#UXT%~T z05B^JQ2j;PjmDagYvVJN^?W5CtWL}Vf>&S3+c(G3&}`wCTj2(>*NsZZ)6J!;ruPc< zJcR2F1JR3|Y(YBHfi*B7fu5_D5mi zh>FBTp z3^i55WuHMwjIe>UCt^51e+X(VWnU85s5SgLR~kwl&J(z)&eT9`hM)41auwmjon8Un z4EU1Nx9-JBp&EFU*g9C_a9KZ>c2wSD1**7>x`WdbHE$SMz_$CGmWqU*w|&*-$IotY zt)MW6I`eUf6cmbHHkc!wH)8ruDGf1vhsG$1*acSG(|2l(EApEH(sig;#rR=mlVAFo znH_iR8&CDz9`HwUvlJBm|Km~8d3Uu%o&0vc#^L6e)_%>AAQf})SmjbJ^E<@XnQ3#- zn7~i?+1s+++H;it+?#YpMhmFr?Uw=INO6$jav4!c@PUpd6DN_LSot?v1HDcu=tf7u zF1~hpl$MWtYg2-T7BW|ac*Y{9QXS%fIzc6ds-K2QD0WXT z9zqQ;eZ<`D7M}`HXP^^(fa02lR-zvlT|!PQnjRh{EPW;ZRk`__WVP*EDj_i)ARNT< z>ZVD>WH$zC@at zC1k5WPbvI1_qiYkC(kjUjBG@%E*~ItH1JwtxPee>IS}>5*)hWBIzv3J%zS7BJ2Q>* zAxr@0;&^%9@NGQ$HP(hRmb1u|qopV0n@_yMVD9#q;p_@U>LSB3ngsMt*SjB{s1Qm= zA<~m)KLu&flCsSOK2{XIW%#2>JEFrV|EjkHcIK;Q!jCroJ%d1G(V7t`%_1HNfr6 zQ`UR3ctD`HHOIja=Eev|Cu}&rYS=Y+sHB($uvp|7)h`!X&apswjeA*2Gc@ z9hLdcL+EUoc%0H&d=vL@*o@eFMtD~O?O@G*nzjO<)iBY@;)UE6mnjgzvC6-HTp}N* zUB;8Dxb;DOccYZ!0eg*|BixOrOAV?i_I!g$%%QyuiC#^!WQ#^#@LkwhcoS2pDRI$0 zx*&?2Og--)d6}t+t(m^~l4#-!0_*JlIW}PIfgPI1Q&;Fd)*$^6mUv>SP&UUJXfldS$*=3o*puP)&$9%zH%o~|?t%Af`ujve$ zjL7RSQRw?sAJ{}(h!wslmgt;a@XS@yz?=-8NuoCt7{#=q&7mH{NgR$nSQRS^C z){xB?vTxcoj`CK)PO5Iaj8%t}gx!#$y*Q%<$Bj0YZ{HOA9A3EEasi@eykY%JlerZV^wY1w_Wm3I0%EPA%u7QnW!r32gf9iZ^HVYbPz`WK|JvISmbs{GNh9WXXGI!Cd^(8Wjq;&BY zqLCkV2X^8Egk8bRqDc3MD@1I@V|@JE>H?03s7+$QOFg zr8|WvG+X3c(y<3F_cN3&zA`NP2dO-ksiMqA=r4%^(6*o&AZ{!j~X(e3kCLA1Ura@5ReCslZ=?*~@`TfXkK( zj#bJo0gYFxJ*2dHGSt;*)LbVrzW)0E@sP{+jG&B1_n!E&2tDD>5tx!6q<-bVaEYjslJzPhEW$mQb-KgX6~XHY(I7r5!`#+0e>*l#9=5% zXN}a9QuMZz>R3zvB|TNpnJBW}&i9kI$HuVCjE=7kvCOv6@R3f94PBo*=6pTr zx9bqLIQC`ix}=?yM3<9sQypvl;K5Ym0wX*n>(tB5Je?A&9Y`mp*_N44{G;XO@sUor zlul`5u==tp#QldJeAP-73x0v9Lqn@~)g&=w6g#EZ9H=A`Rpl-{nw~SsDNgp72uY%- ztRSAk4ULH2mE0jX>)Vr;M`k)(aJpS84-9AUo2*ColMFAGpT znW?Kind_$s!IV>SH8NBbom+Y1R~!8N%Gf@Pu*JTQZG{viw-o2rDCkqNuCA1+Q;rea zQ3Q4+s5o&|@>7xgkQAQ}ZpvM)O!0itjif`j2)UG~Yt$g4h3VDO3>?{m=YnAFhkTR& zuH5`p&WL)ztT#>V7R}6VoY`wLTk{5+;{YLd5|VFf(eLrL8q74vY|O5KjBWTwHfxMt zh|ri;YD!Ix1FFymRIS%FXZ72#AH0|NBB;|*cZ8Dxh`MCixS{CuxXc)Qy2h9*qnz5< z0D6y=!>a02J95D&S5a+{j1#B>uQ8HuyJ0u%H&{(tG9kU!5Y37?%AhJzswa0yZK#xc z@C>Hh#Pn1~KD2Lb+9si`bOOgyUaM}F4?s-E>F586+vSr;_G!+>cA6914c z7D*^*coliX?@CIpdF4Qp{05OI>&NvP1lB=GpRWbe%oWG}KRv}AxeJ@UHemO3#cP!N$$!s> z=IOkwV&@%U3I)*-Z6*w73m6Lt2Xr*?7Fz`vEwK0f0L7)jR?_wG~ zTEzVzIN7hvexF^QPe}qYD^PbPa)Im^>`1C+wsLFbQk|TiW4IkZD2YW!PY_2;rPtjN zYO{07w;W9VOmY&|bQ6t|!TMeWk$bJZhjDA@0gC>(wH49OCeZYIhi;dMqD5t64Zhu1 z0NgF=YiNQq&uysXR((W&)iHX7TB8#Je!B8YMf!*E7LY80 zq*Md7h7U`i_sKc00`QlK8dZGj~hUVe7mCI>TUX>bved7MO58AI?`I>}=zigsA#MYMT5 zeUnoysb8-qJLzXvLEC%FP*&6V%+?#OV^Ifseg8yFkI#*feUPxCJcAoJLOmVuU^N5K}ox|s^S*uPp+Woij_ zGbGsPZR}W3Wt38}neC5bL_*18^xDceTJBd3=A~ zcb})_?b=dN(hpl4fR*VIpf2UV9u;YtWukF(qjZFbpFjD^w$W^peBvSUowl4`VN;Eo z9RM36f+4{T_B_c$#h`^w0&j{tx+_A}I8X=vY~4)+Y6JK(eB7a%5piQ@V+!Lxpg?;c z4MWpd0WYBcc2%@_G!CXDdT43dx6-3FN{)uu@=y{vsnC5%uJF1cvV`k~- zuH(5AGO{(hZd8UTqNeu)x>>>tl0~LPV;+Zs)|Pd%Eqk>|2l1C7>?qRO;^1vc`5pVv zI?R~wuA0C9FLrK-r?y+mQT5#HGpc1TpwzRLJoTfjzp;Nc!XGh(7exTrj8@6n{H`z`iI z$Zor;xMj!r1bOgVbdNz(Lz$fQn}B|hB@ZUv?@)QBx0}+CSor5CYPox%F)6eP0m}?3w2dVY^Ps^0Vxs|LSy$tS3-u~$zSmV z74M#W5AFBirhjgDhz6NrlB6Oe)PhE_uXARmYe~LQDjU=X`d22f^ z9Hyu|9mVFe!_+xHy1d?sziw$|&8Xlew;p1vkBy^9C-PiIV0?8@JFY4O>1cdA8pByg zcCj{B6Assnfb_hAn0kFqNa8O0-F(bK7z0LqLwmFVt@pA+;dUH9e;_j|usWKtc+D+g zT=i~dPOZ|t9DmYjDabgm zvKClXqG3VR>c3De2oEM65! z_Y|_D;jyEoO&Xr2t_#IhUjM0^{Vw(SR0-AZ{TMYe!Eh_$tW$`@*nfqP^`C z2g4Yg{vG?63eK1D)IG*zO!RHTWcr-yp- zMB2;F5s!p+N|O5{QsLc;9oaEC$_7L&soR$nnSO_voXZJ`T+g?%PwQmn);>p&`*dqY zNVlC*p32Z^Pu-Y?s59F~;4SPXugI6+VdNd~G(StpuoFv~zS|l&bt?>-Q$Qpeq@~L^ z1$OqTY?lnHl2r&usP4qckFAY*HwsvM0e~+_d81@;oCen|Qu&oYz0Rg4=fxfO+4_Im zu3{l6qbMz+(`S8xg>{DXs_C6d5Kd_36lj>vhzjwe0E|Xz8weU@}&cV0938)ueNhLGnfz>cr&NbCRy7G7a4>Nr9r;Y1B3VKGw)bal z0pWUswlm&u=Jw`7>vK%)Uv_uD_K#r?E|j@F$4Ja1PldEv?RSFd!s_wdM6?|A04q>0 zKDp#McQv9W{#u^=*$)x&0PAfJW<=0N1DCDB<*%O(;LN-ig2e|Ui!sn;f)@aT`A5UK zg~%o>8!C>Uj=hKb;CckqZBg|%ep8r?q1r4kc@V@svM@k!&wt*Y1j9F&)C}r=)=mPM zt1ll@2tqN)u!on4mdf^?2E*tmhGF<2j+)0;I2+V0W~&$FfRmW<@;q zl{TtUoh@vaV(@97QJFaIL8RPKkil)K?Ygg@qf|}yZG46{H7u5cR`2eNb*nlZmD^bm z;>t)tPjJ7{FCM~wNJGf}Nh3Rx*!pMYY&$Vaqp(G4PA?b;T~!Ua+a>Zf(1 zPNk$($Xeg6ni+|0Zhn*`I~;x1{rzb|yHoCII9g03gMq#78P@FvvltK~Pjw+5zo^?YZ%QHI9nRRJL;R}H zKX8Hu;m{^Syd+h!B&gfjbQu&EM>#c-A@`BRi0%Tv9{JAh@C5@JxRAHR^eH(uZE=T= z*&Am$ykv8@gC3>Q(xS#Aq?s@Hy(Y=@guZ5>r!EGIQ z!m*DYvf4dhsCg@33^bC%RoQi*1-YwJbW4KI@>M=p-D6={gGB~?HjEIj9EGQ@3%Gb* ztaafJOl3nT<|F|@mGL2|an+(Znv{HZl**2=&+0}DF>K`KjFl%C#0^36++IOL^CoQi zc4_E~>hj3=Mbjm0?@VNHP zD|Z||DdAe=+AnsSDXqN-Yg=)Lbmb2JB7w4!ksf#W&=C)Z?+4?gsh9|j&Gq+2xzJ-) zq(~#x36N^rcW2emVr~1%gj8!0rneqdsT$u;Zymq--jsNghEG3JPDDXCHI;atg&%cX zSGOaKuP_%6Vzv2z$sQ&gA88Vw#g*ZX-G>tZAI4Gr9$6CNCm|&Le@F)p04iYV^5WAY z+6|aP50V+@tn^Slx7)Wd8v}NSEvyvRN#)J$>0u8aW3foVJP&NSL~Ubvx^^o5zqZY3 zyAA*WfU(oqH@0otMq{V3ZM(7A*tkJs+t!WE#`f;ZzU^PkoHG+?lTC3)hvws(1@pDr ztq6rpZ^60DkHw#|{?>yxX*58Qo7S6lpVh|XrJ6c-3l&5Wb$To#ip|%wr=#M>Elv;a zj<6)-N-Vp_Ok9_I#&}L}E>o_2b~96$T&z$x6r*=%)LCa`kmyZ#zRDxIextxdP*G2` zHuh-&1_WYIwp$bIB}7&#t7OVGe{hu!EsEdc`7POAb@;hkw#teJLW!pRYK3nPcP$NN z7srD*rT`TZ(^jTe{ct1{{JnUJUR+kSkx*y7WpjgA9fIf_pG>X}3$rx+dc>o2^qi)MhPp{DqQR+tvd&&--w0K)- z?xP*MV}M=1UJaAteoZK_+2_Tw6cBD(xcPilDb@S7`m+x!19WBh}q+&r(*8*9NhF#5Y^$N~!pSljUo;-{vIPjJLrD zN=NXGsqg(ep^Q}9%t}(~;E2FH2uE?PXdDJs?Zn*gO^~B5oR7zii3z<6^D#2idGm!J zPvNFdOx4H^1P#>-+T zAz`ua(07`Q{d!>!lvGtGtv}v(KxeY3`RWwMrSdWrAUh3sK1|Kh$$Rlgcl{}n;D%@; zry=S+++@4E56)D`G0jK@qnf8~Ua?V`NKbaxGXfy3$`wNPQQW)#`=y)thZZ?(y3_D4 zu<~q71X(bW^FzOv=jQ&;mjWu=z)SoCvWW@0(BO-MXyZ=hsrhgh#a(wyR*)EYKq`b~ z92?L)d|f`VjWzUk>U=N#;MK<#cN*u6r~YjTpc5^SaXlbH@Q>&SZ zysndfqpaIt1Hi`6aqLz>OI;jh%W78^;FDORg4dfXylBV8Y9EMg;nsh}i`mQJT68 zALC@CUPF# zX3)tEVM$xp4xtVt`*NV!TWav(oMlqr`JuAs9T=`nb>2=@pfzsD@1i>V;C?g26zQ7| zmJg$MZ^4S@Tw_Aav>DHwe(_5&Z9Xkdj86O5>QXz5&!CFlVVZlZ zhIAM*fY0Z}I1o>HLtLPesswNQvYJz{R;u+B&Ljji3jF5vaUgKqhi3|z4#%OWYF4a> z^fGVVD5o&yhty8V)Sz&v1{MwY2hoeI!Cd_+v1l1GUocmR7CB6{BYRUCYxo9~K8QeLSm zv;|gaaTMe3o+x_F?VFo$McBI=(SC!(ul-R9 zBqXEc;K@J?)8VDo(!BqZ%M~Z8^p7M&ex;X#Up%+rwwOzh%IM!ujxj(l{wv0tUl&P% zmd#yK+KBoaoRKq`c3WY4Rc9O~^>N_?L7ia8GdMQvlP`RQwS5*N#dYPY$wn34%#2(1 zpF4S(2_=~R{){Ihb8}fE)vH;cVX;aNifT+@~r3}7YGQmK-;%%yj zQNc$2?Q8;1GQT_QoE=_ZK|0eD|CXM5LWcb3@|FGdF|j%Dlfw3%8td?tk zGw?y47%l~2N~%=OU%kwU4~HSoE5aI_T>lr4TUSXPFoO?QX=Ut5u`o^0M?-vwG_bfu z!1XaD6@(VPHpihp1zR^D%qxh(XOdD7;!^8sRQDM}yTvuOr0EYBz~`5rS}4s_s?TF2 z1#Xm}x~E)rUf(x}w9cw_s{*s2jBwxnPizi7bhjXYud80Q z=Rjh)#+CfK7tb06LBR`tyUSxgg7LkH9?bn`pq8DY&MpIL5jG}ZL%npJQ^qKXp#u;` z=3|`0rjn*sOXzp#D6wKgn{kR~c2t1V{HcrhLIG85^C|6~F&l}V)NR9!@z>;@1yZCK zYr>h;G6bW5xx%~YIhv{!#SUwe%Sr#%~bE_U?j)lMyz^p?WK>}y|5a^5Ym!1 z$}Qyz(Qc#U?#mZfu7Pq+08YJFCt1P&a@)&>UZmKqWtDPlLd#NC3mCmxJ=7SYpV4JS z5k?`FW4G=OMq#7lM(bZO-BHtH+*IMNFW)+h7bw%n7u@s}A<@Z?FKDu_@d^&X+wt$+ zi-VdcLOkCxHy=VCuv3HOYQ>(M?foyd=$X$a-$hSHw*&(L- z?<9UQ5yG+e)d#)Hd%fxJH5IW%yVCHHbM}r&Zx?gW9N_k6SCeLa!WJfH3P_T>h-67| zOZc#v$;#UQ6xDZ%FG&jqL?a0#ne)Xbu3+X?fO+H8j;2Ig-2`AizRF9hB4C!u?&PH{ zFDS^{Qm29Bw}Durte0PN*>uz69eH#Fr2Hhni(dMnf&>5kHNJP5PIr1#@f<)l+0$Jg zZ6MX-;S;tEwS)QNm?i8g)#;=K>ZBjEAEV}dsMs^6g=5xm%7ta;)tElEfUu&rNRG99 z-PB!(p@kxES;uQX_W3lN>RbP~LZ!gcN&>i41I2V)4b;+przv{~Bs`7|To>ReNH zh7Cy#P$F>oo->8(fmk#Y$566&{IcEWMYTjz7yP5WQkhAl9_!@tsV39B_7f(3-h>B# zlM4kVfga^GhTo`m=TGxHvX$ZJ=~ALf*ceq!DxQ|QuWtK@hL{S74f6^*dV^b7v8x&& zI*jFNgoC@=sRc7If|k*D^osTCsKCx?n~v50h1m4WR5PR$1yBBtoyf_xN?mGoD=Co> zh$cTCLZbHvUpekqpjQ;C`P4O{6j&Led&q)*Nc9QWk!HuX8r(HuKUgkN9!)Dgg)Nl8 zp_0V|tP?XQL%F;`eK`_OPK9@@KvU=;MHUf`Rszi*vESm@p(TKtPti4!;nfZ7Uq#D+ z!wmDHMiA<}BZ)l1Azx%t8p|h{qe7a1Z0J%_9E0P%?tFcGN&>-6;0#}5&;jRPTxuQ$ za^*0AmxXWTACvd90nz&B(YR&#$6i^zWi7CK0jP+Jw2dJj>g^9FTDwz7G#0y* zO~of(V$10)7!gbj$fjmk5pWXYB=up-V5Ial)Hitur~{`xyPwP>WV;3XGM5-khEwOT zNWEEvaFhk}MC5IZ`sM3^AMGvtoy@ZB$XbK!$_)fKn`jRVanpb5MBi;pWZQ$%Z$6e@~-RIq-VAI_!#z8Ll`n z_D)ZC3)*Aa>&u+eOvE``PIL9y{zZVA?OflJY~bLgR!-m5If19-IKX7u2M1%@XSs#% z=S!)97)@zvhQzl>(IL$Q}~q>NMB$}_wJCBQa{ z3yzb_gu;;=+2$S`nGPgs)wn`2i^`SFbAXPEWSrLpSX5e^bO5m_I#d_E@8`7PH~tzL zhV2#E;~R8g%bk^1JVy%O!u_|LPr;DXuZ$B=aM6Fhlio`#ky*xWvMl+Qi-u^+ms|L{ z6Nm(ik6&H^KgqU;)Ho6@3b_eYR`b*&S8cOHQ~QJ zz*m9YaNli!Lx8^tEo53Z{X!X%w`M@Au!nDbmEhm6l&s2mta##-gG;V(*T#={#@qKQ#XR9vbu4P?&~x!bkmyZ1(0@C*W@C zNrGu-c|}!%K`f{x=yF*wsUfZ%T1@SJD|1!Znj6GRzkD(% z^`|%Vt|?@z_U%0)dG8aK>oNcy;_q~LCF*kK0jOB>CgZYTp*8->{0SsJI-Kqh{g@UH zN9pnpZ?<&+4olV2_Kj<`V3N1W0>2*Mi&o*9TJ>m^rlA&kP&8lUPV0_&V%i4)rNCx& zR|&v?dZk&pud==W32R51j@V}!w=BH{@JvF0$~Cr9z*oH+nYXauE%`er^OXFLLRkW% zv+@~vSy%k$p|UoIhD?j1^>aT{0q`@SEB?t=*voNB*LaU6F72uF)16TM&Og zx*1Llh+?^nco6&7N{Lwuwu@1#n|>#}LX)wd!vvb*5>~7<*-W~Z-2ly&?Rt)K)FzoC z+aK{i5-?a^#PW+kjW9v^hOMI7_&OR<_NdRQQp|>u^m^;vfl^!YZ@~@i!!cP$9T2Wzu z2esu5JvhFSIs(KWMCJX^fqt}R-4F(p9u!Six)I6_7Vyfequ^{*LXuPOa$Ue14O9bH zHzw}ZP+NM1_tX2f!96Bcrq&*ou!Rod8o{KI6Fegr=x}9~?V~4$ZoTKfC;lo4n{ypa zG`Jx*^RS52OGz2YeFBwyADMO>eW7|&slv|_XoNF&^q*C`&fXt)vkKr2MDn(9zb6XUlGv zpvR_J7#9_uXxWv5lqX`uKxnEcq3Eb;i3)w?MaisG)OE!(lO22C@QY7`Ejd6#<~Ml+ zFh9PiU$8qAj-O;MG zfR;KEeNb+;cvTT_|~rlfj+6FH|$sj*kXv-=V&=iC5C+h8DZD%>g-j zwvz6?GZ0oN4i#Xz=oKR|G~U_Lp!|6&2%UG z24UGRf|1WwJjP_TjdTH*3LXid8r}LZ(C<0lmyN_949PMbHgxvDH2wMmptxl$70$Gn zI#TSZ`NZ2M>$n$bVbmQs6NRhLdVy8VU&^W;z<*i4^_K*NC_8RH@_omt(~vl*3kW?vgCF4ZCQ#-8_1GO$JPkH ziweUI-;XbMivPxNa8r(oRn)fa%c4wsA?sk_)nVr+6h7H8NA;O$j*vsAn^$%s3%SS7 zUw6COnhC`8^59@c(R97`q54O8V!uk7c?7w(4Us3P_8$w7>p~q~GdJ;uS&77)R<1@Y zN&dnAxC;aqsQl_wL7R zyX-yV63~M2Un{TxKqK*Ix(RD_$#5I$5}iOd{DDKy`-x8Y$MRxwulu^Gi-3lH4^tBd z%%vq{g}->#`D`4asHrQ+2MyE+sB)D9$eK*URQ{RFRqron9v z@Y>p|Vb^KUM#bR2N8e=@1Bb{Akq=Ptq=6ynfS3@octNmM!1fe73+OzM!1td`K_#Ap zv3$LYD!XJ|M8F^;2k0D=qWL&HR)RSK-fiZ2Ezv9Sj} zvVf2L%W$mj;p%lmlB7@xU@?g@q?kVv(VW>U=eY1caNKf_tn|yL9?#lZvhUFD2;ynj zJr+hAzJ9Q1!FfXrsm{|>B<7sM?sX<{vOcwN9Qwu20l(*XUP_p%=hCqvA{fX(>o8+^ zmdEC3;$*J;6z>_yr^%B8oM@cuaQxt+_|+ku1NYBH=(d@R#zX9ipqydfFFhtL{Wl#3 zjS?F0La`{JUT~ud;+A>*?T>IqCY?x*>8aVZK7&mbk%Oq0@pE1=-P5pa1%wTA*SV6Q z0wmST`?Dq8gbM+bw!UDSe2u1mu3_k3ZZS*ECWw+o=d%z zFfIF8MDYUPtNV5xvj_Nv*3#Iy_3g?Z{AnQGM#@?Rcl=6pe?5W}TqOzf)d zp0xDF8f8H>-SFnW8IMhpY+Mr;a^pf@(av`kV_AC{3RvWrhU@tFJrZ?Ctc-clb9PC+ z!4esh5d#!$Wt`*8r3P0mRZSwdLZWIGEy3Y0HOJ2|VmhSrfa-qJ)WQ1wXVNHq2ox4 zV-u`LxQ8_Jy*`dj6f*vCI4ZFv9y;Rw&a=BZqE2|teGyG*GW9xC!rZqv^x|fvIPLYS zE+2+gEy&Sv-sPCkN|O=O3SpahhUFISfZ+vqJN!jMW^2!!@<}A5y4_Q}Ofk&zsiOE_ z!RMy{{-1wSHc69El?OjzKZShV)avD|e>7q1%^QVX5GKh`NB^z(j4cQhS~7t_;LxBa zyOOk)5&|U|MUaSKLzAB-e|reYvSLlwIbTw;*`()Sma}IJB$Z^{+dZRNFUYEPk^>f@typ=-%r-6Pe*=9-! zfOL~)JGj4$O|Gt2$@y~i{o(n5W$ihADR}FFVm6z1EVF^b*R2DrMrCCFQeGT2pEZ014u#GE@o6BpM%faD?`cevby-y!d<$bKtP>Z!n~71b4_tIvnq;tF*%>YNykP(=zJD_>H(u6bZ&k7{V~G zVd=h-QBe_#TAOeVX6j8G#QKE)w=rJC5d;|#lG7sU236M&g=88E$HWlcVOmTra0rbv z;3&J z%Z-PdsLvW?Bs4=N}x>6{@(iVZav%N9r3Y&rH&;X=xlEiq1!mTkDe zpSdxLYasB_znQC~X8Cj>9}&hf+<|FmHUpV^=1=`0K1q6uMMQ4i-C_UwIMd z)R9=Up9ZF3{Ro|~0ygc=y)-+e+84M)&v3V=+c$?btsIvR-?CzjFHcQZG%-!Eg46Tzf%e|Lfx8#ei*oRyyQU4COCg!mP7jlIjAK1)kVfhg^rPw5L z=)K`P)$BR?qy6_^8-w$B3npQTtoftfEK8|JIF(|*h!5jRWXGa9_~ z;V7ZGUpFfZjU*cvSGV|Zj^{Ic3|Fv%mT zh3U2>6mSla_HjL!5In{`Jv_^4QN6{2C+^oCifoz&1X36OW(}pkY;COK< zR2u=J`l|3pX0S`I$knpvJoLgg@ckqwu{w!Y4r^sn!x9*W?ONdp z;xvKevSK0%QmK3%6*1Cxz$J4TbJl>~ghWmJMA%tvzB3~x*^X{SVKojlH1S5L@O;EC zokRusIT-FB!Jd*kiTqM9hh3LP@Q%Ewy$ojMW%$+LlvI3Fx7gIuVo(u+ezZ4r!Al;A zSiTv*Nqk;BIZP`9PUg1_z7j`A6@8I46_+Z&vk|w^j9pp)Pp8rx^`qs%f?yA8 zPVC%CqP`KEK4W+;yrGD~MHEroo|X9r_tT3E$u>L9Bg6DMV{OtXgl zDl+&29&d-tuKRN(Zv^kPT1)c6Gjl8d#D7-nGoQKR&YE-hhz{@34g$h1v9Ji+3%850eK%I-uwq@oBie;`d+7T4NJlOf{4v zVz$$KUo3x+`pwiOT4Nz$hVsQOUFJK@TkK;WI@Hjrwy#86Wuun6AgGyzKi^@~3S(a9 z@sI#kz;XrQZ{KV5J#;sJh|_tgvTDK;=gJG&fyVTmb;wTCA!IQ8filM}CT-k%2J-po zU<)#O^T=bTn36<*$@j{*BvMNeKY&wL@oHi3!;3{h%P>y+wLoMg{tCmtidFa)xf3%_s=zZYxMW&{6H1w*;!cYTtV4JxVL$$bfdI$U7%qA2 z$7AVhZn|5_>;(K1ErMd?X>qi}U}*>`H0*SSB5GRoUG>-vW9IM+(Sr~1-RXwb3wmT!onJGgxK~cxiR2ZvgGX2i+$HU#;tss%8 z4A^WSFL&bO!$*8`^-g4Ja*kEsn;nKu$UsKXvqNTv^QK-i*^@1X=E95g-ROatdOBsC zpcHQ6)?%KHO7|tkEta^yv!Pa|VF_-IL^;`LqoVMcNA`mIz|z>c7afU75OP|B2?R5k z7Foi6T%-F;>{G-TPbfJrGwo` z*d6@SlK%jnXQJ#1Wo~41baG{3Z3<;>WN%_>3N|qyFd%PYY6?6&3NKW7aAhDbMsIF( zL}hbha%pgMZ*mGRMsIF(PGN0jATLi=R#;j`D@;vNZ*5_2F)%GcZ){`=FGg%(XLBGl z3NJ=)ZgfIIZ+IYEEi^bbAT2UEH6SrIFgGAEF*7q=3NJx(V`Xl1AT~KU3NJ%pa7blo zXJ~XFHaR&8FGOW?V`Xl1AT2RDGzu?CbYX01V?l0bY-J!Y3NKT1Wo=dFIY%r zX=iA3AT%^I3NJ%wVRBPtbRZ}%K`%ltNiR$!W8XFoLYa<$48haWC8b=yOQ+p#L8b=o!8YdbT8aImnej{PC z->3L5Lq{I!SM>ZpbmK38^}ioZ(#YDx(exM9p=D(KzjV$os>49f_&<`)|AXoq z|D!sJnHtMX;TE&Jw&a^zTU%VNvHf|rzp9Q4e4kbT2qf0#7UZ?_7@Lht&h_0_R{$Wq z!`Y_AOjm_D3R_(~WmhfaCcxe zun!WazLw{w~RSJl*R2YpZ8AiZ?P0~6=dZF zc_b6#&jv66hF;LqUDLags~_6}R}-*T_@);{1@PD@9asRxV`*t;&dA8>>S`F{>1b&f zd~gOZj*FhxvNR1=AWc3vW#5+);90EK72QlLsqK+7L%7e5-W+PcQ8f%;D;z(ccw$~3 z4y$dEx9mr2mv@%B7e3anGP5ri)?P%2&l=|A;Ao$taSDntKuZ{E%PimK;O5~QKj~oU z>1baXpS5itKhi`WWDp?1ESx{zzlk3y6MH*W-$Gg-kKOcI7BV&hE5D|WfQE@kjL+n8 zPl^>xy;GyDqy1+GA%d>C9_;79_T5aj+UmDa0=jZ+GJ=XyL5c@@Hgy;0sTX@QjI_11 zkI+vU**{^8 zV4_buz+XG$0@%uE4p z?0!Ul8p;1SL|5*Bet4g$ezr9LJWgg_{v3F$l{! zuA9q`ixW2f5T*3^4_Za>k)kyl1E4v>`Q`-#YiL4!qS4W;eW1)!Z`yJ+~O_lgaY@H-{TEJ>Y{wH>>Gm8ivRLDfEcLXQLU>0 zk>)?bn>TL3KYW`2!UsM?#-yYG1!Fw;48Dl3Hlx!(x{RP~9G?R}+e?>tw_R~>Jp>yZ zZVoOS4!Ku6gxgkVKfX7bmEV9nW*R@h-3|?J@P^(e&oT0oH=DdaX3x0L^`PGaU((Ch zc(+;WJfRFd54NA+o;j5ZKFe>~Yh#;$uQ5qIEzb6!UqpLZtq(lA9xf|AyFM0zXER5y zD{Ch2F{a-)N(VHR=Z66GK<4VHojv;BM1tG1V}1QnA!lE^^gSCpKHpC{+dfSAKQmbm zF_wsku^D7s*ks_o*+CK5y_3^pZ_;r|3m-l9Q9s?iKkba$OJ6jfC(j@Ne%8DOfkSMu zbT+X@<_e;1F|K=tpp0xR17b^LelzrUvJsdGIIgSuZW_eAeS|+~J(A&sv6(0_*)sJ# zR%0gj_>vbZP0NKf$Sb7d{2RgC_uQ_d`~3xNSTN^+Y4_0Hs9MB}A@psa>P40dX;ybu zfdEf)L~_^B*CD3KcPxz)C`eo3%}Ji7EBlzo#xUR(jxC^yo6?;@W74)IE#Q%>Nq%(~SHeq#t%OlzBV6ks?O6V@H;94$fuAsx4@+G_{9{26_SFn{YR6Z?acXFX*4S9D#jrRBNDY!|dZ~ zqma8Agh4BAv3U$Br%%qPIf+q6-hT}>ogb;TQ-fQRixQcNp9U%&+vCm5n#jfgrpKBU zSva0*xw-aC@p0B3qBLW@7R&O+RCu20`Ugc-!sVjpteczkRxr(*0&MRSSeJJ&tODR2 zqM-V?L^e%0ivu~RA9GEUEb9oKNaLEaYK(0SjqoujZ?0xRc48j_8&T?6rmg_uG$UEH z|K&s~v(yyj8)&CeiQyi&d;YZ;{m?t!KOXk3d7{SP31L7bc6KVJ5ml5`zo&DvVa6j! z-PjZ5JnC*YZU5~d9FcyWrakh7#CNH4qH#Y4$NlW_)N;Vn%U+aMli=*4Gar5NW)D`6 zepxizCTzI4gNyCRDn%6EzBm5uDu5B3`d)PSgQFW<_g6br6nzF2ohTkmLG)nJ=AI(x z6Da%wZu)_~knA&bX7llP>?OElqGlp7Rk1YBrVX%=3L}qm!SFz^c#ZiY$pKVZLgOjb z;wW(cQKCne_ge%h=S=m`7f-=(w-6udF-&d`9_0_l4vdc~4E}e%b=d6Nr{KtyY_C>n zST=Y&c{ffyd+iP>>}G~I9eUx0I4(b$B$|vtRc;EE-(1nxUn&;d4nlTt3DkaWiC%K@ zp79XQ8M}4zZV4{`0_+|zZ48B~SW_zd`rb=BJUKoSYjM)MLc={}`_KW>Ba3=d_~ z44CHRoiAx+v-kDWs56=(+b1Mjf`jTXO4%=TZ8%g@f5$ zTFu3ll;91*Uv0Qv3G}?rnojvWsn`AhRrzOHN0F1icBJ>rfPj_F`RE7{GfeqDvf8jJ zddPd_Ju90+mHp-}Vh793Gk!Pv1Gw`>+%~qT3AKG_BM0U%%{E9(N)3b>BE)0M87krp z8w@9yOE$T6EAn2joR7B&Pq*!qmi~x2VoUD#UQn_NbZ*YAHLSP;U_r&FdA1V+@i>8K zs}y)g)6;@3*L*AFC$|k9M1e}J6_lMmd`5K($TNQVJ~zylkg=E9to5-Lh6MmlZI0v< zL3`2gmpe6h!kb>gG#cUdeiTXyx?B>0-(+>3!peBSWvLn~8dqs>70>fuYMWMQRMYWx z3$k)-v^n#-?(ktq|KN$R+Cb? z8DBAO;T@r=#60Ap>%@DW8k3DJfZIWGFtm&EE2d9E`{#rBS3nGXLbN>G=|QJnJ@Tnn3PdOw;1&lfgSRYhLd+_AMnZL>q#E*o}|_ zr{ySn`%WDf&TK1W+nR{;rVbPEe;mW`G?!sk=V|2*H?CVsaChIzVm^wD2Kxm8%^zx; zDnnSReb8}XHiUL5S~C||2bevS0)NH#!z`-QM&j$iUBKwhi|?i9ykt}a1ynAW<7#l* z0%SreM^&e8M3AVfkXHWurU`@ZcncU)gWr3RR}$>eSqlhNTGXbPDe3rMIk3w_HT;Q) z7=l&2LCHTfgKZP}=I@N+Q}YUtCaf|nRRKt=lGVXRUFYirpzu~Vh(jTEr* zEx$G0k%(&YRz)s5ZY|doy>CjZV_+=%21}uxDPsI2P0*3JNg=kB?rx6+*|dn49r_V~ zY0HW3X!0@L?`wbhBVFOG?O!IKZqoD38xCleI}iOg9DK5V-jk88@_~T1F9J^v6BLYl z+)t9dOq$oPZR`OmQYk`Qw3>O#IH#@K#?5e-gQndN3zLiPp%$hmO{8ce9-Ccbi1gNS z0?e`VQeQdoqOI3Rcumkr0+WG+$Dt$NC4hiZzDyH_R61l-(H} zE?TZ2^acvEAddMjhiyBqiW%+|38#MQKKsJwfe>~srf9E99u#=P) z0!|@+IP8RaCXaz=&x_U*R|M>UM@Cd0YLX=@Tgl5xKNz%97}4Gb8K#k#;_Zt$^2c{! zfao0@5KvirgPNxaoxgx&EW@7>;mMS27K$dEC8bG-^zpqc)HWfG+$0bZdQMR<(qT8V z-7NWzP^5phAB;Scz0J%{hnM8wXKHV}v6=xT0Y+&;Nx*}RTNc?4cs&ZwNrG>!@}m+WFkGdhan2NU(WbdR4`r@UiMy4(jNa`(D)sv*@Vz=2KD zsDv|}tl}E%!M0uSH95?7NNSfm*B`MW@vds11W?Vh)bh*pJ~Y%WA7vrx-acdC#)_z? zy*CSx5mtvVo!>t$Tk4<987Xm*PjzpF)Y^=%p>wPIP6I;U@Q^LUaPi)z=Mdam)%dcR zlUVxZ441i8^VDex8FQ*)JPUs{KROU+KpDJhKg_+p6vY_*9v(L@l9XbC4cB7mt_N@l z5^wq?_Y6(&AXRa76$1Uw&*L#?DX8HWDH|D_vMi<+%B(l4EL!y%+^e0fLi08zPBNuX zb{|Otd64N&cUa}IdiK2Zf5iJoE8`ndi85d$J*Arr#@%o|b(pA6o4?biDmYS}gg2hP zs@EZTT%u`$x+}x0H;S9urO0euNyH=fJWbp2+8Pw)<4uZMK)HPylb(vQ)_JA$IoQai9@P|6tIe@h!waoU3L1! z+*>?K9J>>Zl#~=(sdfUQIKyn0i=__I-13Qj9t{W%7NWBZpYClF((T?#B<#efE$uOa zbFGWq0+MycCIsS~8Z}lw9u?bvRf#V-dc0<^0)L`LK%QVycJ95g;@*=<><N@6LaID~fYZg}sT0^JJsc1>(4Use7TLZn9o}Nt6^R9W9yjo7Mpg0*U zPjf;gHGHOK^VG!eavJX+INjK|hYUZ&(thygd2(n=AUJR1lXXGZ(|xcXh-j`ThcBQy zuZP+L(hh$fOjUSKskY$#^}%)m(^Vy@cnu~dFd5F4%gYU^o@5E=5m!T z0}^M(br}hSA&zBYa>$(nwF6CBc5Z-T;DPzdSxHPaq}jWY@&eN8^z2o;&TS0aHJgm| zk%H&D06!YbslEN1)?4HxACaD^yAKZ&}naeL=H)*G>Pk(l8)t~GY5&<)aL`GkHe!V}Buw$SLmoGI+Cb~Vg#Vn2@*uQu2 zlg~8SMa&DCz>RusS1ntDzM{*;atH1{oQo~ccZ-Zqy^)yxC*C^q(|hq}UaO+CJDj@% zyqCj3gKR13Y7-(o0-0ql~d#uvdk-N$CJSMhfu^9o8JA z3+@EA0!A{3Otys*rz$6Tdt7%s-R8O^=}=SIfg7l>5Z|R_H%nxojG1Q(5duc;>v}e} zB@8P`>P~Z8g?xGkeym+duR2Ot>5n~EzJmL}tRF^!3evz~LVzr`XTH9^H1_#TWg;Yo z*2?mrXg^=$QhtzXSZ9eZ%QEboZkci@y_uzzzaewvHMcIDLr~V~+@UwQWBeiTyu=qw zC!i_y%47l7mv5SNrQMFCO!~M>PpMd533`>?nuLtlUbNTn-VYH{SJn7O)G?Oy!e!k{ zdbms9ekyPVcdBO>sXwks>kUEu$1_JDp<620YGL)zitf|uZYpy^DV9(12#Fv$P@M#W zfXxF>S2`LpeeF6H1q}tVuOFv2ix>i2w84&{vpb@jMT+O#fj-WcQ(2{xr`y)CWX^)- z8Ng`q%&K$GU`!Esh*e8c&}Dn|FC8q{x{+es0hwlt=~gGd za;mR!IWHJA4vwb?tca}^C}+V+%;+OqJ~!I*UYm|xs}POgy34&*PyRRd0H($`@H&CQl?2Co(*E^No+_Rk&)69XJj4fA(>hg)-#q-tM96)o3 zut8*Mm8jt3(W!M@4WgQEC}ka5;&SnZQ#U1*T<&7QVLt)ujre6Go3O&3@EBkG1I{1m5Q zRP#c4t~}5Qdbacifu_bd9?~i*q1>80iLnw#h*}=m2rYUZ zHB!C&Dm1rM`u;aWVTwPxo(R@k<_YOajccPAshhdQ^fFx$+w`X(+(2(cDxsz-^Uuj$ zn^e&Keoz4y>9TgSx2@@6b2&)W1DR0XZWP0~;7hUo64uIa84v0-q$!^a!v@?F^}BA@ zs8rNu7z$D6UWJp4VD&}Wb(b=uFvxcvr54can0G4o#blTSI`t_zIb%MfEwfD=5C&3; zFhH$&+P?es_AEA3v9SCruMiY=$SBs(7uCjDM1dUrw~+FSouV68F1~A>A zs09cn2{~?61!?pX#m~&MXPVJl?3^U(`PcJ@NM;hbJXM3eSArYojsYSTnsC{ODCjbf z#5+Jx=9B?0W16l$lejeZ_IRY4@#6B-fyCfQMa*!Pbbeonc+SrA}RMDFmem9P(7^Eur%q)WYI{3?B~)B z4v%**DOw?Zx2fCL=mAPErcsfzHH*%AtNfN!iH|KyZX*iRE`2SG)5l@BvaR31Kb~WYqAuZctggib$u_%d9n@-@)C%8Wq}FN! znKUHLGPm|BHLz4x-5r?;*Olo2-m`R?ji{f1AGeBdGoiW0e+3Rrj^h-%m#6BOhB6_W z)&-*d=J*{sDst$n*g4}%YP74@(eXK5(*|ruRg24i{Co=XaQE~IXQt{pj(4iBStjnbRikPZx`0a(>$SxBA_ zJIyo_mx-9lXdLuH3{u|B{*;m zQ0Q4+45CPTbz83E-a4e}?vb-{Gt3+^#~@DPw}ooUj%mfpRSs!8VWq@fuB3S@9I`ed z@UBn^Y#Wa{u9)^EH!514S!epp_d;`%K$kAU37P@3M~p?|KDfgnUA|=>OvMpF2D|7uE) zn=s_<9U3+`i&?OOPlcYSxDl>|p@#cxiNN>*$U9Ai(POxSV~9)fH&}H5cTORBNU)ng z%n@S2JXK`;D$*hWrfM#jX&^oTi|q1q(|)c~R}gB5H8Z@J-Vxs`XFel_b|IiEhS_?> ze|38I4+|6gUN3Q;wTObHaY&nQ>ozws-2NdOZ0Qv-Kp^Edk;R7uUIm zxWkgv0`j%?#=lAdBMG%PX)P0}FV|gGf<@`Amioh-0QdCbn9%9TR4;qDl7C(;p$|_T z4Dkq`q>=_3-_|@~xvl=9qAQ{jk%gr_mqIenlpdMhC{8;EGhhr)=fJ{HwZ(4MTeVf? zuVx}T*R+7gSP01sB*m8xq>+fD-KMv19!I%U522mn*@t1DL#BJsfx zjamjs-JWMyfae$KUiJV?PjT@Q+A)hP9Xsl(S#CMO!n|jeqqyNNIW)8aI~2(t%pXF%n)A-dDP=&A4jKhJ`_>-_Q)2A>E}7z-WiQ?Lg#lwaAer2HZI6m zb%0vK2(QE0vW;Y{4xUQ(PBRaO@HgQCCskR8MYWuc)WT5VwCIW`@H9(}dt7;)Wa1l4 z7jg!F>-I3}cC^|yc|}r@xfi^Me!euUOId?oX957A+V7kUDLr+BekR&E?p-UW3J7zmcGoN_s7kS0!o z3mUE;K|9dJ03MroT`k&H)72M+jBK4DiIW=7d(W0}UHXy}RG{d$J z=AWsbT_;Bq!m*5=YtIdcrWl|FZ93;V{eiCPV`tgaUa8tdPRN6*sxLp% zw1$vJyiBnzd7uXYlh^-K9O3sSVGNg51;~&7*jf41A8f|%oa9wFyV%jl8r?}l90pN6 zz_+=m5Noy=Mt8^VB-Chk!x){~RTRnyX6Tyq{R*jI_BzVmj z`_Y%96n&ql=ckD)$LtL_<->rIo2`bqQMz85*-TAE7Lgh{&7=zXTZFkGpwBqAB5D3A z>IvBt(u>KAd3FWwb0yPnxPigim8&aS%{O<*Kz_Mt8sFEW9Gbnd&v)O=u89s-0v>(t zQBdx?MeLTLnIA0+2#^W?AR&?H2udf3SRXG2^Cujm$=bvToo_z>)zoob`Ule$@Pw{} z6Z>Dqd}^UAnS+}NpD|-$*ui&ihE+pZd%kilBT^bg;0zB$fdR}>AVv9Iw7mf7e_ zU)PAANN?d8ba-?m5$Nh=Xf$2rb9;?jeP>F-KOEEhvk-6E@~iET1n`;Q_9Ef!>>^@x z$zy@vBA;D?BAxK_Z3S`;xXGV#4+)Ll8Akj6mM7G;mA0^*V`KU6NMV7dTSk)@2xq=V zk3iy0wRaz8d(|1X41q!d9P`s0VrC|$HYo9`o&WQ4 zUe&fFW>!UwEax{NI#_XF1ggF)WV@pPm>BP*)U5wg9|V(Ywvd${HKcR02cDh2CBj#K zY(NFlRx7O+z=1ND(W}_F8Z!H96~iB^cz7qXGD630@|LhIRQol;Tnq8HgY0C7U^aSJ zU0TZ0s6TC&)^l~{Og)xjF4|4Os4%mm{mD;|Zf0veMrWzM_@2U|_0G=?Fb5b2D~8aV zKW!|rL%<@J@;QDR(E4^MO_Qcvh#%b{kwc;;sRV~%7m=moMD40=QS=S94A?EI3g4r= zMWMFdU{Hf}-Jzk@sadM?TG|{fg@!Ow+-v3w0UI@+)KME;zH1FroM}ctxFAS&VI1$v9U5 z=+5okPbEDqN8#i`_Q#wvkir7V9Pci2u zMkFb!0vv>G(+9)?!S$+`44zk}7Aa)-26RU}%8%X(T}{>;XMZ%Wh?)y^`U-w)B2SU`Z_4y7TkAIDVxH%zp`AllR3imbMZ7J6tcJp1~U5+C;Uv9!WI z)pvBN>m2IFe~+HT1vhvl3~C|HOwqd5CO@s~PQfDCz_df}FzGS(f`E#hL9kO*nN8}V zf;kNTPt**2jIjEC^Q0-}pd}4;c$slD^CJxUC0_S(NgIS;^6Kb{x&mc#k&;JFI!%+Y z_mouDN~+y=JN-?!K82DACAaL|#;$+P;jVtVx?M2FP9vyAm3Y(MzWoiJemr4TY0RTZzk9EThsn+ZCklA&Lem;y9j;t*%DgMjI+e z$4S{P?*NA@IM~g036l-zS{$Y(Ri$jW6G#v)P5=u9 zGW4gW9EREU@LWlh^SpXGNlih=n>s|Z(c~~#;58C_iRJb6Yk;sSqcdaf>rruXANnLvz z|FNA64GLNryNKrEH*hCPW()F5?W*WZQl^ z^Huie7g*n@wvpOX@94$4pWzO3Jzv_eI+@D4_JWV3EBY^kkEgMME+Xd;xUT#O?Jx>Oq4mk!h*P?iJgox>K%36e%+ZC_0D8AndUX;JofLTK9!w9^waIm1a++ zWqz}=Ezva-PpFG14^W5jD0PAK!CI@fib%ON^7vH)2cDdC#mcZ??-FkoPP-fXFZMQ*$iH@ z2bDr)Y(ANiJ;r&F;(P0x;V+!jMJ>_PxD8clb;8*js?rlqWwX8yg@@NPx zPA%(&5)ZJ1zLuHg~#({gINK z;U^LyeuRXta@Uh@f}H?+sMy_<#z^;# zne9JH>Rxe)lX&!-w?;}gZaj+6)5OeCxP9!K))m?MfN)H!;JZw&9(i$~;_w!no^Mhk z=yCMQ8SXfRzmVnH4`h&DV3ga6m0<68@~zx z34%1RIpZyzsIYC-eh^5*?ywh$k~F)n)&w>-0#U!QCPWNLka%paMu9sSM7_#hGtT-U zEqYo8qC68%+4&IqJXsmzq*PQTfjCvWyJ3|LLgG0eChMgE6a5;0z6u;dQZ4lNfDtVA z0n4NMhbRPLl)|VW$S(h`^;0bQ{`~?Ykx{fSj1%eT4~Q-ti}S{zY~Q^t@F({G+18Hy zGw-WeT2~SwB8G$}A%UmtK9J2dSS)ATf0=oLkp7u~D(*y2(;B>UFn=&leBcVO4pFpY z0k^#>pWTF&cA{7a-cAT2e#jfICuWt3Pn0~3^Eip4Nhfq z6LrHfOmvrsr#R(aAWRJ2ZK(|gRpp?}N`s9np`d~lH19)|3L{#Fq&lG%5(+fHhKWV7 zZTtS2hB-cKU?8#Tg=aD?+1-WMMePJGow9fbj3RADo1dO*2Er}U#jQBCKs^C$Ku*HF z6bitqMX^jLe)Vv<#@9Z~3jldphK#^h*b_uC(pACB5#nq@DPzcRnR+Wtw<*v|-VQKK zT_*RbzX2i!*@(1eAr7VUY{ zH|+U&6~xg5Ke$KwOz*>kF|Zs>1R+#HkS!ne6{~s;9!#Tgz;>6dmbC+su{#?}%Gfn*mbg&X5F9X{cf9kjiIV~BQd(Ro*{Ts>ZcVq4W7 z1%dly-MyiVwd+@wd*-*T4d`5%YAhq^gqWFdNbfc{tud7D8oQA>Y0;djy?_cz7&)bI zK`XsU<+zb@Mn+Z=n0b9`t8pf>EiYYZkB839VYL1qeY4U`#xj(!CHhy zo1WskCujBr$6PGl>l+DIacNz*I7Bc`EkJ3ADp%SF<#ZHPv{ZPUffVP4v{2rX{*8yb zP(H2#0Prn~)u55D-W{y=8JQk@E=Y~<+2)DPtkNq1Qs%10QM$giUD=4tC{c&OeG~r6 zNpQwnXAD@~Ko$XNFK#KHa}CCRt^geK0cKK}Ar&4UFE@7?2ML2kme+}q^zda-A&60? zcWA1F&9^l2AcHM{Un=O!vGj&k;cv}+Q=6(B{wr8fUzUO7N<(;MU}e2r!ECL|_hC|< zyFB~beP2NJP45A)S~G936E9CRH|*&`rrV+3JA-Pvzs6H#m_Nf(TqPFmWO%*uv2tlE z2zYuHzEBJ;6^NfW$JY+ zq^t$5=u;4ja5rHhR3qb@!dV~B>m67bCP~p!DKO;kW!^I>r-T~OHWHKWK#sjQ)ygp$ zmuTX8#zD>bTt5283+!>p^8Qh`T~tOw2nsPIi6kEwd-v~@m-UEK!m>HSYi52s?Ak;j zXgP-%3tamT^D>0TZz zF1M^2#h)R0Dxmgp>xNk2Axkcv1a4G8geqlI~i8sxEB8o!_L0P<#b7HJ5# z%)G%{2`?IWih}(?P~Q6|vn2=td2oEQ<2G3`)eY!1eI|KpWI^X9Y^_Va=P>tRwA@Sth0IaTv13 z%m@XCfq23?VcQlZ+5a>#<*b2HPz7mPFe|rUPF#M^*1H%YWh5NBlaJ>4jm*HX5WgQb zzl=gqy_M$W_a{$I(Cgrz2=@TZ6gIiVeFco0Q;QOkX4D{u{Sa3mF<2%LF8Ymitat{@ zYJr6?=}5Rx&hmE$q_3yLjeml8$*ZW0?(^QP${2L}MkH97H z`x%kBM)X&?kl?Smsmu~&N`%&$9L4e@+_mp0ep^T3xu)uwan?xOizkWu=j$U514{mghfM!LcKTiOTfWB1(m+oh=X$hcL8zBsyB@`(Dc`b>RJ(71_ z|FfVTpj&t%pV<>%v(Z{j!J%416Li@z9Bqx3MB5FTLn0}nUKx0?MRam;w?6cb{&%^` z9xtDJyz0?-YT`?@kj>_VfVZ!|4}YsWVP-s3Ou*mS+QtLSq#R0e1!fl&kRElQ%pk7t z`tq*HK_|yedCIs+6YHkMi>#uN`Y0|HStOhzdN&bMrEfgDSL%XxRVYScis6wXdsOQJ zAQDcd%;6cp0&6%vmQ-LShWQdqg6B}nd}GV#nM>v9KUYtuG7d3HlXZ?f8gf`pm(t%* zBm}*mf!QPQg|Z$PIWGcdSU%+uAr1DD?6jedD|o#cr9j#^&3U{Her-mf?wFz6G$%K# z#Jdpgp8lHT_HQa4bZQV2Hy!)(^O_%~iSZLQt2d7fxI13|Tp$Su?@J)dhSnX<==yF& z_9zt--cW45fXWkZmxJzgCfif%eJ8z90s}hh0y#1N4Gggi4(VkKXvjrV*Gf?aV(f{v zZ{R6;x`3%8wEf~%eX^z@{Lzew3=o3}bEb4Ci%4l0F`ttU0N&i@+Dvsp4z?7qVZ9m5 zCpd8~J*a%fMT%YEAq=8U3|7&We!`#hxLZ=0yBl?j>hnDYhWsL&)VlH(_#Y{bova1U z(9DXYzlT?AY2k|3!0&+dd61SfVM_+a11(7#d3BY%d*H&dd=}I#zUOGS`IpK1u&W-$ zT9Q}R!B896#5422DDxm1XZfMW^g|1{CX{osDu|b?kWq$vo3MCqYp~q=dPX|56KB6S z3580l%C{9iw*D$Bc3|6vHFY!)D-NHNiHaHFvUM(L$7J$#%pNn5H(9+bsJ^SqU)#j~ zTq-tVZ``)zYZ$>V1017)K5`v>AAS))?ezWSju-k_Cw>Az^AN;oX;I@0O&l z7|rD-wbiYkuz6|(XzXju;#y>+e5DZzNNjxv-E?38o(T_<#={g;!+_w;nBZlD(Ny|B zkHtDRC?ly@C-e}{<)(6-P0eMDjh?tITc|;TK}m0>lTcZv(L7%5r!8%(sq#*G0ZJ!* zCS6(59bAI&zz5bV5W`qM=IquB-eXT;lm5&noKs)Tc6EI69v#)$HW#a7n)BwT!8b-6*M z40(3O-Lg$+U*xR^{{Ld&GACDNVv1Q9AAn1|VdYmPA(gS~ew(M^m+1*SAu=_$*F$M1 zzHj;aDV!-h1%y!$4yu#im)G?*>Lq52O;$K8wNNflLjSz?}U} zsL}}|_JI+@S`wTJ!SO?RTAml3iSL7~5wtUVlox|7Bu0doWrW@`lPRG6u)SuNhP^74 zs;|Cvt;{Bo7xQ>N_9Hu3#pw9chqP-6+>0fj2&R6;!R>{`)O8l(Kth

    ~9?MoUQp>fDe+#?LHw2Z@ zfbtQwqGc#nysTGife53sb#2JM#{s$xTm2?LYZ7E`=KRG>ywoD;ep@cdLdj;Lrno|T zV6B7@{n4N}aMf);qWKjZ6oPM3ZouRywTb3zeLVTxdFh@|8Mc;-Ndtac+ue_aG?!+4 zS_GsxDB=%EAExq5VdVwR@r;5#=bW(N64KGCa7{YXPu7XG8_K32Jq0;;|F}wW^kIl1 zlj95`sh?iSjm1ogxex@I?VT@cb?*9iD^k7OmPUcDvi-X%K3v`S;PBo2fb#jFUz!<` zELR+&o}>cz@+(fC;LT&lJ0|rU^hylM!i}2tj*-O0&?#q=c;*KAGQADE1jHoGl$8&r zTE#rTpe&+I92ycq)&{JLjRnGmTc#iL!FY%-GfjD(DUZ5LPRkA=tT)T+mhtmc#P1P-lNYt2@&$xq8!;Ki!?b((^Kxzz<7LR#3`&}4T)!EYg z26&>>_iywQ8SAIBB?XUB@f?3&R6L7 zXdYg1$8N_63wmF>7`Hor{hElCNR3^x@}yc%wjUG6EYXMCBdfdI1*i5TSj9KX`0a?~ z!~S};e8D`?HNs7aR|{wP*x1U}+Dvf9`5s2-iz#QKN#K6F)-_RbYGLbA*IO(``b-8k z9pZh{YR-$#!CuS~sIlXx!5FwDKOIHJlZ)t6cEdv-O7q@(qfH-8qab^WIIV&akS4iz zYCLs#5RkK@!k+LYQG=uGSDz$KQAZ>K1-Y%ol{brSsf6xnb3X1q)AfxQ{v^Wbu%)0p z(@9~QH$!m1he->sCtx|gp(uzj%-@=cMCjf}5%w5GFC?FJ3l?F)h)kF;9+yOl`WTmW zxJIEkVk+CqYGT*Pc%YBWC1*OE+yvZLL?Kn1`MBHChBpxt9{}U>PA`tzmn>RUUfQL< z2ka*u(2#Zz1Q;9nSqi<<_w0S9rYVF;K}kCEBs^WHUvel~WXrytY!KXv^^k}w%kQxW zFI~UpkfDmOrWP{2)cSe1LS!=DQr(3kuOn^u=$YSVTtoij*m~o$!yY*_e|r|KiY#OE zkJTaC?e%#$v;*7u`T#Y+tTLIaK0&gvEcPp>^ElF^@c5;A_#Un8HMz@2j}oDZV|U9l zXd#(2XjG@{cvA3*E;eFYlVxHf@L|Q%hsXa=sq`gBlS?Jp-*UASO06t{qaeutt5eQV zlYD8FV0~A_^)sSLE~gB48+N|^6uusro^)S^1%G&4^!5O8^nt-vX&si!6syoxy%uzL z3|?P79!#Zd#1gq*kK#t>YI=6IY16<&`<48JvGlxujrz|x>M)Tu7=Gv)9bl{DuekQ$ zHWNvLo1{CizN|zd7pzS?W>T&8>o2nv%4EsAV(~Skt^rsl(P5#iT9FIFgE*+Z!WObO zQ#%aqXwBp}1kP^ZuMM8l8}n!a(9n5OdM|HXT3U%bk)aQEq}^XVMtQ4BY#zCgRzuz) zxnnmnw=P3SS>GD3=0jT=%a_Nqy@x&gnzV>i>%Mx^>fOLM8Sv@{eM6y?_`0_D86QIn z9{OzwprMSf-H!rMF2cb`Y(~^ePNZ*q%!bo9qVb3Tu)ovrtKY{a=9c>`@!m&C`NAnh z-FINZ9yNOBt+;n$x%IC6^)xRdj11{4$~Xf)j zF-rGG(gTPYOs$(xVA=E7SSSXat5E}AxjD2rUzfPbS%ZSfdd(bCQmt?|+c2P@B^=o! z_IKq+$|-O(wrKAKt#i<4a=IWkoK0m77sk00p-Oy43?gE_+eRd8;$WjIsApmYt+W-` z#&1+jnnopJ&z;_Nl?ynpUpu&8x9D3|Yta>G&%xAAkK111i^Eps=$rCx}NR4kVz~uU^Bn0Qt6BOs{f?$N zX$f}`!~Il)>zg5_zVob#Lf@0iSfcwo+r4}XD)kO`u7}B=2T#xX4UEnzt^wgm9$&lI zxfJu~%UTQjhN$0);MB8(Vq#eP?l|O4SCuDLAH-~SbJ8T?TJL+EaekSjaTb|weQBf`w{L>X_dp(ldBXl6~b`Ieq`0yzHz>&Aw+ls z?yj0z<^4#z3QH~iW;k%;M=#f$nTFl3w__lMVr1YjZ5oBD8&Agm78*K zwIw&JRVYqUIWM1g9cnn0;yzq;Ir!YCz-qt`h;FE?$)m->jnezAYGe344Dzhhvbr3-xO@iG&*1GH*S^;Axno(*or7M1%9Czcx z2%f-mcs)(KBqZ=m=raO*$LxYzHl_YF?Bj5CcrL*~shhI1WK7kF2=whH8RgE|kJI{s zSp$0s)ztJkLAOKpivqXN*S_zXgJX}&w-hStbRhot#@#ayLsNWxbEFc_1iQN+3ZD;5 zr8&%J++X|{eoU2#x$V#n06%ak(TFx!~P7s^vx9C-DM{s+V`V%>A}3hfjlkks#$vzk*)h?$ioNN5TAX zn$;2TfG+9`iWrlpk9|_N5_Zm~#>z}VYuBp+w!K z#qV(rU7sUWQFV8Hrpw-bZx@DfLn$vFQIF)Qg0YV6=AAghp355|#)QA5Aqsj&r`Rru zP<#q#dw1}GiG+ocOvrpnc49CkOSjO0ssW$wlP=TRkM5DgA8EIl7(w~TT~qORgFHV- z$=-7Id`~ojdf_tUaNa}WIymJC>kY0Dw?a{N|5!#&`~TWFhh0$s1c_eTwr$(CZQHhO z>s{NnZQHhOOwXb-oB4=Js*-ccSAzUC^6;=*`RHHdrbmpvWBlD2#HWtDt!ANCjx6p< za5aWT_;Z7KMW=KDJ2S91gVS)t-5?As3>!0)czB1dR|jA#-AZcA`o~e*a5LV!{AT+2 zXJ9r@MZy5w&nBTkIoZO8s&n0uF?$X5-*%^cH#GsyJcO*+bYdgAF#^C9fx~(yWzb#x zDCG|(@ie2S;mjZzLrmG=1s{|Ai;8Xx_-849*@&_b*>V&VFK#}MQDrpNZ4&bvy*bcI z_EQ0^sr+}2?7~A67)FBFGkCQTLVqgl0O%+EzcF-@-Y*sIm>jrzKPAKSz;I7m-#+ss z5?D}jOu_rgC305g`%}_I-)K&due7;rx$@{SKU7sR@1{S@9_V`8&AW}CgmoVL3Fj4ti3^B*Y*_YEJH-=y@}u@$yDlD?C7Pg>mr`*0fGc&(&9x)M05X6@}xt zSE|d$jL5{2730Jxi)yO2T;{V*As9+`3T2F%QR){TX<>L8MM@wmy^bpSd^3P&_CFAPb!m;t!#2amHPVlc=80xT@<@wx#BlI{ZNUDhyra+caym+nn|gzZl0OL zVL*w8;1obl%UShllQmaFqU#HQiNvX}UCx1!Tao@;0XLqiNg2qGwKj(RDI0H!GnXq= z#?|=CtO8M+!uFx$4Owd}x9UY)1SfLO(0f+s(}StV05#)iwgaD)j=W05!=}#FL|#bP z%g8-q)6EZP`RjiIN3=ZPG$b*hEZ?NbG+B+&y^%vge5bH(T%-_xV6QgKhT-{oK3K1z zTcIo}NLVPwqbb(F9BZC1D;4e)zKQ8mqAkLng3B%~EuQD4=y-0WPQrb{Y>6RU(7ntW$%TN3ua z6TfNSAfJ!%hPzOh$YcKCg$E0N=hITtKp!!9J?c+*DgBdz+6CdMqLL=c50B)mGLI2& zz(m$%vAn`{_SR-XgR?I5QNI|e=Da?pXrMB#i)*#OHI?{x(JJvESFAF{w^P7&T@cJk z6MDRBK#`GeV#RVnMUHDDIB)I@!QKvFexd__x|Nc6wkxi)L~Sdffx?ZRIJmr44Y@!F z=#DaU1(}9Gol(QHm`~H9NoG0+LNCl80^a+Ix`e`nqw(gG>ld86`G>L7JKbOKAe7f& zPPL`>n~8bP%0ES~*Rw~huS;IbA!u|HMd%7z;M}kf%_Z*8UZb+Z@uKx>q8pTfBrt|0 z7tqyJZ_Fw1@RECG+^Mj$vfBp;^gYs8IGw*b5@Z1?#(;Q}iiwo^yEHF!IM^~Kr)?Ch z0m9|@AMkePkc+TwB-3RoAhR!E^6%;QxCO`Id*1^_-2Gvto8tN%khZt|>KT~M%7HMf zj*oxfT?sd(Y-rg0k!rf?*7*S5$F}zakp!lgymXhPT2mwmKz!ToJUazJ42>=@=X=gG z=9nUK_^F?S#fPmY8?wXjZr|{Oo0?eH>6eN38TmaY@A#>6mc6hhU+0u*1~aFtFOk1L zC{-dEXHkc-P{egzCIS;r?yT+#Xi#ps5_+@2&-%4M<(gd=2v_oj+aK}E^fIfUKSDUF zg1>aFsfr&R_iIzf5!ZzFm{+7mO-L6^T%@t~X~RFV6CK6JjP4wb69_=y&MXWw8|H#o z^#;}OtSyTk{a#bc&l&H8=ZG?%N&OF86E*Wz318a~`rOwU1G>ZoOVh7-LTOwY7JW!9 zWVq9x0$BF1Aph<`fHu|UX0LgwZ%`s_4Omv7R74_hmdJDZpkM$Jn=0!aVf$eLxxa{O z)=t7vB?M@+(-1hKhXa4n%;d)!0F!2YRr5!wINEN^SnBp02PG|Y2}vOX-8e$@s}Sux z!&SeKCy4N!;bjhcCljAIhaxQj5E$J(Z0#YjaCO+XBj*@-7Bs{5uqQ;lQXN}hPm?JD zfTt7kt@^!#v4Y7r|%$fW*) znq@E36Zetkd>a+<<)F-o)s0Fz0K4v$$ycHDbiQ_iFXESaS6vJM%?gDpBfWrc&1yb$ zFk+4DOMOPCm<*zaKxfitJ#YObNDt}b@X7soRI1accZODs>J{t4%`EvrH@ z(c`A7owMzoo8OPf8*Hl$yrbCYmazlTnU&7Ahn4nkJAdTl!T3cJ8<&2nUh8VuO7NEE zyjzfWA^rX1%xS2Tt|)O(1$xnwnRDpw`yif1Y9)#R1g+JN!3E}r>Mqm%sTASV{64HG zwCM=4@kEjZh?-I7PwUv*OIiK)j&`w;CX|xU&pvSzdZq%V9obXd7ebvRh4!&vd{JRN zr6^22PwiF)LHsOgTyB_g%?0|($#r?gHvHS0W~6$@E(w{VeIzFp(+7%F4Uu{Bv3d6G zkI1lIGUxwXxw8dNi|+CnPRsm?KrdBKOBze%B!uq;MSX%Q-x@boyI1J|YEJcp7)-Xy&C8KT zgaul<+AD&}5fACK!1tyuPh&=vk)f80kW*#Q?odPLTTPpdrSC81lO5kSC0!5N73Sz{ z-Dc|ZSo{p2*JZZX>4Kk?3%^5pHrfuow95|WCF>c2!~gI?jNIk?QX^C2q$6NmkDw}W z0>X%iBMlOV-g4zbD5UuZ$mF9%bs|-u-&EbrlCie=q!w$@zCQ|*)j8nUJQnhdNeu%wH=ENU!F;gKIPmp<2 zapGD$c4^aR0RDzzE!@X9i#Prv#Xkil2`Y)$&`78~Q`eyBMply-wFgEF#Zs9KuW|XE ziH*9#Z08v3w4ARF&GBCPp&_8jK)^*H3fE_3u2NfYitbdA{P?f$i@q0_p013Msi>+T z$zh41s|L>gM7QUDf#%N$s4rY#`i6*%>e3shRur5 z_49LRkCVtI4Ion{vX)Mps$j0+Y=lzZ|HAjZtHG#xRioLgfNC)1D2FhO{%+VSO{)6T z?umpyn{o~pi-+T2W8_2uND^|@0Gq`615JNIAt>Q!Yy(Pl)y3rs!yN^A)oLM2?NnQ* zL8Hc5C0SZZYv_H^b?}>7k}d}8qWBY*fZUNuY6WMG`UB3(k;Srx#h&OQAHmk+nAme^ z;yEhY@YGdWA*2nUykGqoU#l%OOtu&aVf+zE8lAkUp0YS7$fJ$#-`{!=dfEiV(;hBq zP^$n4I5zfZdf6Ju5t5^s5oJ{&^I*!Q8v*A`slcmUgCFr*kH;6&bzgL|`>RIuAlhXB ziNz*;;`)S1uQhXAHG<9*%b?)ukm}4Spu_Y?>pwY4S?xy1=z|JCUBy?1vh9nrts^N> zGddOu-MuDP)%P4l0Zy+=`o?%5myBE{@DU})6G%4s_`t)p6ek><6Lj?@Uf&}BkNk_EhnxSwWzkf(Wmz-~eVRhwmy>Vwx2WsK=XoPtG2R`?b}C;nl2c=O{|e zwcr||qL_>aeC;x|{RIQ5P`CqVITc$sfUI@Xml(*qW!GOMo3stnRaBgfeqX%o+mtGt zm_2Df^L!$a5(-TiAm0YD5Qg=rmxhEhfJ*)1Ux(U~1!ur6f-U+G0UPc=^aS!aO2&oQ zXFy=w3@TzR z8b9L4lHFAa{Wj)JpFiIMi|}d7OH^(CuqHW|o|KEFb((60e^Sc=c;iTwEV1islMWbT z9#i}Bpian(5t0MxssV2Q5|2%KP<%Q`mAn=uy+>CNBVPxlN6+YLO5?3yx3S-WEKVSz zw@&ue`sjDP#A0Dv$;2nw1tZ2%>Fk0jX&E6V(yOg?x%{%;4Po8=IruLt00CsQ$(4P; zcNH6801vj0Q?XcxFZ5+JX@tgHWV5tnQ2oy^c7ZKvalS_RQ#Mz2A%Bg@_&(_b$v}O3 zXvFA^f6VfU5`Y^i)0FK=nKaxZY{-ys#dDgP%?%nU6w(8FM0+U(=VPvP&n)=00mLqBbO{Oy^d*_R@r##4cSVN!u`!#sUey^V!g*B~`gIbDXT zxR8CtPKM5|4|tw$qUJ{!HKu~b!Pj5$7%6EES%1{fed(AKE9+^dvk;2!)P6DmtwF}WEifvVzX=@e6NaAlsTBWxa4~TG`h{iift$T z#d|u;YAcv_lr{;xbScsfF^k2w0IVDIenHYMih}fwQqOZ$g6tD5J!V@obnM*fvG6l_ml|W@nK99&s0m4R_i0@ zZ>E(1b!*btE#yQOZo*=Fg()>b4QWent7ople06#m7V`^^%EzD!?t|F`lYdK_?$h-6 z-yEU#bk2x6VGZf%@q6F7WYO|jw;ldy6h|-<#Xl07QRId(?h#1Nj!1iP4DsDuWk--7 zl%pox_^$s%Z|1Zw*3SuWK*9WVRG&iA<+8J4!2}MP&wi)FD))X?{|DJrn zYE_&Y9l?QOdgVL7X|Re?N&sT;RTF8bwK{qlT|(<6(EV*POhm(g&3e%~2iP-oLU(t@g{-Pac_>0m1o<0G@*@ z$X65=8|g{H1g6nU+T}QCW0hNaDZr%gJJ1Kg%o+x^BJ*Xda83_sZ%-#+GUYJjsnxdM zu_9+S3|iMN@2yGu51l|a#8Gc6AqC*91Lk+>P$@_Q%Ff0n(G)w^eEo$|=={o%?`bAe zh{uTtb-c*5TCQOER$*l!S3TkZMMXeyE|ZsOg>Fpia6%ak(UieQ6j^#YPsIauN&)P- z8aoMQimt}%)#xj#WWSj-B=B*FC1y?EIo5Me_mQE{Hh$`lEujcOI;YLOW5ZH%0Whu3 zVb%}^;?EZ!;DqxhvKr2rgy3V4ZPBkwW(v?; zo{~;ZeEO~-$-j)&juI%wWNx5OkHvqSXFv{Q3E~)S;`*B6uY8qQ$X(*#>#I}-5I5lO z=&}#A!m0-C^F#2wq1e>3T2#D-1^gTl9C`Oz8Fdj%pyK6q`VIPv)LFLDxP@3JbMZL?g-RF`x9doB1M?!et0!FC!<+_D%LSq zBLRYM-(@KZQPtY=Q3)VeILdv8=6oEAc)$p-uf1o((_+~4u+z)} ziE$^t#-Ej$jX`z*q&C$7lo`*L?pMUCm~X%&MB zyi6IUh#dOTjYW2}{VEua0Ur=D>RNjE#dDV%8Krg0k+$}pdu(ul4B||8TCO&1oVFk{ zKZ~Ib+$Ne?Pt)QDK^?y}Z0nPoBUSWRTpVtD3-$9?b);Ll3@iKu7`0O9>blqF>o&-FFA zZPCSfZ;EIH@44LQ2k(KDhj>bj@Hw?ct#RiZeVUiaipKa)%7}a^IT8)e1RA^lp-}X^ z+JD0era~+RTdeu;x^TKv49z6W)1>_|g&HtpX^B?mB&wx;^kRUt7Bd)6!=3n~_e`F2 zerdhur6jSR_Zh*(KK>?>lBA|e@3*5#Cm&wn#N|>;3<*nD&$@(kyq}>^l#1edPer?z zUm8&Eic9qO=(9v4WD1R9h=|#E9bwW4;5A$eK{;X-j`0*PImbf+=$COX1d5sZpZk5K z_}e|Ru?34l5mxVF^!wzK*Z3*`xSZ=vE zv~j4X_=-fy)=Y=px+;lv43+A%uBz7tDHdZ6Mf-g(VNc>8UaZ^Rjp{5MViO2JR5Zmf zUS;xKG77&2A~Rz)m_V2+!HxTJJ!g_4O-kO9(Be!laM(NnQT$GL4n{{8iY#^AJIZ+x zE9!e9kx6joF7OlJ$PcYX>>+;zAQw$1+&^3C9q2#OkrfmXpPiIzI_Y|~2=Y>tw~GW` zn6IM#jYT7s$o@E?`Y{d_0Z(m&S(rrPfcSRpdIT&?6aSPVGZrt2q(d-owjSyG}}sAVkEm#&m`?=GTEtc(g4 z^1@2euB^`@>TZsPDo0txvhEay+Q-L;52rmva5k7 zbkwelD)%BqkCUk)kU#@QT*0^omdO`-X4$hj_IVRMrhkA?M%~+d-uLv%1jK~!DcqLI zoiG=!<~Bg%Pc_cb6BfS4gJ^Y-MbE62lO`7PD`0s3`%wT7&9&e8=D<=XP34x3MsgTM?^)%mJAM@Dq z^S!LbI=LUIB$$xMe(hsgELumE=fqjn_w?Zf!n~2xKZ5#SK);`vL!ts73bi0n>|{Bz zjdGq2**#W)uxYZNS@V^c{I`&#Ty4LNAuAmb#kRZ#(JH?VJdjKUdU2U`zQ(44m{BBO z{py<`^|v^4n<+r04}1ov05!wuqKM-AwcHjjFhxc^v(m%Jko|olbf*?j7mHr6UPt~s zE={{Ki&P6qIarIqEx?8B?#=eUpGlki#e#*LMe0cHBBnvb+3n$A8PsOs?I(Ch_ciR% zp)tG&yDvM-cduMs$*`j7Tz4>^z6*b3AWZW$RjHPBtcHy+m}Nd9imyR&Y4NskTRZRmnUuwc+JQ^e2N(?N=^vs8Rb?wG`&DrQ}DFhz1a z%DBW^L%>?8=6Omr{hqH<|AaWeTp`=D9WUAgRq;@vYxOF#*5>9%Wkr+R`e@@x9~jxk z9;QXJo9eak%6NQe^d*p+#JO8$h!%kve&f*$#cnC=a4{*)OtBe3u>)6P7t{yNFuY=$ z-JpuFg3Ye7joBXc5CSXCxIh^<*o?w@!z1%F(XJpT9?!m^=8IhTM^JXc#zzbsuT%sA` zv?A$zj+c+~*lYh)z$=`0ehBMzaDVIZ#tR`QcFRF;T}vF0ARI?Nve=3VBNveC`b!!6 zU3Z&+6QY^dqgfSRwk1cZ15??G_CUy*d8GQjav~~!aQ+tVaFtyNI8^Nyw^W3(wpnT{ zWhw6Li;z9ZmVHf@8OAch%*fJ$LL`zBQlk2Zh$LAnOG0Rol$4?@p(H6%{qMaf#Bhyl6E5n$ zm6F7}{JPuk=I}MD*Yu*4_J}U&+2d*OSGF>WYum38N$P)-vnZ_ANT(L*d>~{_4%v$8?Wz+?DFt*qFJVM=Vo! z(&Wp!SEUZOhJ|oGLS+?bA1rrm!enRib@p+8)sv--95i(Iew!Xv{OZ(Y$B zXe{OWJ)94-kZM;XOw#kA0{wn>%`pYs2Lf0~!tE*J}ZQQQ;>_N7wL<_c3KBe9;f3-3uFGAgh|9d+2BX7V) z#9;o9rn|Am@7q&f81z(&Z}1v%McnZg{b2b$>3I(KVU<3acZ_!pqf_Rsfk#fY|P;9BymA0+Yc3>~6EE3>#&RxS7J#bDd|vK0wr5R*ifQHv37@bZ zcJFHUA^3vD`G#MawO*bOwwi#y`Lfx%i?LqtI&xT_6@b5N#ULqg3Iy6#YS9M^r^G%d$K(;$ z+hHZ5Q&)#ix)zGkm$I()OLA*c+PFeTF6&f|Z{VTC?8v>e{jb*R9|SZv(>`~NT$TOs z-N?xQBDV0?7A@`_^&eIWbxmPZ980;+Dl6@>vwL^XJuyp5saCFtyqo$w>DdOu!RtY% z>rGijUiDS?nq91~l-jj?9IuOHeoatriO?N8wu#JenSjlk{fI(bQtR}&`I%Go{}wQ{ePo_%p$b5rKQ+l zjdC71ZyX!A_my&&VK`w6F5@^;a?JnKm-qLZ>0KUIF!CxQFB3)|%N{tFkPxV-8z_wy z@QbS?-iois(Vc&yjTyW{9 z>I@AXQFc~#H&jX+6vFV`$~7-ZBdY%HS8a(fYq>kW$e)~^k~$_c83 z-^S*GsVzDo=c1Fo{!nwB#_K8vbI6)L6fdWd&HVo36(EgPnPFNaP^i6KR(~E>O^jd%};wT-ZzJCUZL6A zXMeo2?Q9sAf65hpztu09E?#9n-KOkoCq^>^h8ejb7(M-^ll_q^(!Xn&Z2Q>j=y_wM z`6zo1lA(2YE|Ws2@SQ_a-sOqO2T zJ(kEP$gr}>*oSw#?$;K_mJVuaC`lP7-8v&h+IIL{ ziDuK6o&)IzKAvo8#qb`u{n-81s#aaEKK~ZSKoc(;y zuk0J!e(*@-QEOxSSW5Ex!@}69efv_gHC1>yJJffPwx1gwUiYSEEbzWGwv7H*YVRk@ z>80R*;w?h=cQ-^+);^=8Q=DoCTZg53mA2OE)K6(`9T!>J&&3^7#E>1K>=Vs)D#+V6 zw6amz_!BWidGpBDw|ffA`v-~@u&(35-`a%s-s+(Q1~xhy_&pS!7AzF&YQ`w-uZaKZR&fH)>4pHJg?94h_7$>MJ|B+)+;m z7j@)h(aJW3$;M!{$@(V2@a8Y-;u+@$wRa4}#}q~yN>duFR&2FVST1z-Apb3S0Y}jm zg(W@Q@m%)>uAbz3>PR?-M{6MKZyKlZi2icr@spZ7y4pLq(Dm8Y4~|CGZPfCzjh%%3 zXL*%gSQu^j7*Ey=r<ToU!xzR=6f*haYaQW=Uo4_$H|v{L;Tj2cAi(tIHPz${83@ z{GJauEgR;#_SIM?V`oqwAJ^d0dbjvYTv7L+A1gvFzo!^@7RQ_PH-GFzQ1)h>4) z%C}Rlb`~utI)XnHYIDR)l>7Mddx5{&<1@^EWDc$CQ*z=t*Z#1cWjS)uKs6KagDTPC%XTxw?Bn@tTBm7YTUObl zrarlazj4rue9@Pc_N8j=m3PJ`@4p@#US8>au+QuC9;+qYQfPw{h|h)VG3ZT?xz^@w zAsa__S;`)|?Pzk|$&M$KJI2PoN#GMkZ~W}?i%0c!)_ykiJV`9?H5G{1oD>o(@k>~q zJ0P{n-CRhw=2E)J!>p$-D!yM~e$c;(myFpP^x&M%Ing{tYcAzZ(}$R*?ynM0#p{|g zk2mH7z0x9x++NOlGBO^G`3UH7?lRISv{8*KEPh@0HEs8X!w0iuKUR%y4^LlSdF~m9 zO;6iK@w$&qm5rxvZ47>ylq^~C#g?l)K>$UHygj%64Dh_!k0imW#ZcHmmr)Ji(^P zEJG%7@S%cJ-HFUrgOapLQA5SLkS}$Kf=uCWu0+~uiyBD>*I(OqZB4njeAQqD^_#3h ze2G}wMka>{_X7^;=e_x3S~)srA`B;%{R&QA(LvSg;p$uV^h6v!B0K2kQ$fC|h$DJ& z=RfpvW_>IERQ)b$6)vl)Xt|*D_c$56dbsir#EW20Ov?j6X# ze&wr5XzmumK)8#4waC*IZEi=>tyOZqAj_8xivB3`NYHJx63+23d9p3p$^HEx>!*oE zhYTXc#CI_M6`}Wq%hqMhJeyrz3Z^={@4wJGB3yUteKAHL6q}38yg_mt<-cKTz;$R~ z+^L$|FPm^iXt({vb03_$h1^^@2MhLdw<*c^4|QM7)Ncx9c35$rR+oxwsqi1t+>(E@ z?E`;ks*TNGO^na&GAs4BRRGuNoh08?ZYE(`DpQVueYa{uvh@+b+k#C7iS2buqa~9k z5AJ&1t+%2!S*c9Q=7SkgbN%K7Z32%!;0s(iJarTQvj6;=8)M;J!Ea^uw-{x`R9AYM zeGuPOVtHX~@UIa?nVRGf9u9PDNTu%yr%vt?j=eg)OZ3$&BnB>fSz@Z9MpAL#w>+PG zQ67?kIm06$VA`-NAx7UP+$+Iw|B~cV#Iz;uopZ7LNkOj0Z&|8$&r3KIuC~#M7HBnT z4b9j7ko(Ql`YP$tr1dNERq$^;huZhJ>jZ6Z+beESQL9~b_jry==%dg(BiTBZ_k~ui zzcv}QqwbD}iS!c9&-xcka-MppO*$lQObThZ;=&vc?Rsp;qno&1ncJ(tQ0r@a2vx;V zbxE3a`1%5VXQbx!2cr=-Ii*=9WZot-AGsaeGB$}e=ZE~!$LMP*r$bVF+Ye@K-Z&)7 zk@D=Ohm+yb{sLkJN8ptpj#S%yQe(*-S>_^F_ls;b^FeEk09N1lymGFEj(st$Y_4z4 z?)LV=UE893QBB3}qvPj8X1DIGRy4X>V?vAe(29AI%$>bk!D^*>ibb|EkHEmDnjPzt zjw?D92|X5`s(jv3s_|?sW0-8IXU=!JA^$C9h_TsA)mWfdGQuuQASrIcz{`oCX+xWr zvP)cB5{rC>WQwB`hE?As+6nTvB2;3dS(=rXQcr%qocru`(opaBhcER~T1{e#_KVxp z72noQiF%JoFODb|lXZC4+_fRn@O?58@ip%Uji&6BQnCeA?#}U}lk#fU+6^j-h01T$ zO0(7sskg@rc7)T~-A=0x=A1zOwZuZ|J8KjZ&|L1Dbv$-T#Nj@oy3}2|$00a2rZO%$ z`g4Vmn(qOHgV(7+&w|q4Bh^gswNtW`)btzeH?7k$zw#U~4?TJHw@du0if^R+~h06yMmlfIROMhC`N$(L!C}~iE78Jgi0tu+rDE`tyz|u9s@Ff=7|#r{kFS4_+}&KMdoA;Nwx=F3 zlp}4^D*m2U^PQnp@BJHjkJ%X%R|$U`B?bz(893yXv}AHWdD9zT+Z^Q}_?nd$kBs9L z)hre%+@Bu7UmS9eR9YhxP-KUlbl!mk?!4znZQ{q79!NcGb!de&{iCs}nBK&Sv1>o9 z4_uk>^~jZfQz2LLF)%Ea-`bnlBU9&KTf^klxZaSg+C-6Zz~v`Rn;T4XB<_A`{UW7@ za^fl0xbCxpW@{!su+g$}qAGovIyEc1@8pi&(?Nxu&9ZMt*Pgc;=Pi}o&8?ZDWzpde zoB?b*40cG?$WHVP8JgBtn^m6H5P`w@_+Aa0w>bV=qiCe=8dhK_rCGdA|*kAp^Tu}6TlEN*{dN(fmw;&-e z;`pt7O2$RbxQ{^%EBJkV!>W=baY?y3gxJ0t82xU4({bZlcRmH{gfQc#+;c56yMl^r zOmdcsINGwjs(#3DHGRJShQw$}FYfs^fe=06EdiXAJ^fqo^+@;r-c|Kk44|#RN zaD-#*(Y^9MRAlGY*wt;3N}ZU3&PdspTL-D-kKAqZ04`rVY?gg1k}hU@$tK0E9dJ9CvW`}ELL0sPT2~8rn%0slXQ~No%ib4{ z_B8#4uzQo=Wm&Vtf?+6^?RArMhwzE^VYK>}YM-J=1ZP8Y_D4g`?dz83EV)(QkT7wM zb$;O4>OE)kA{v|zPYrZG9UmB3p(vUyi51>^c|%>p>U*YE90{uH?|j}?*79sHY zp}UL?w{PDC_6s$p)95JVf3yq5p%BU#6dpmqpb#h|3je2DD2_y$-z^mW_`6#u8jb#k zZlQQ^lfEf2a$SMP^W$-hc}Wa0Of$ zE^cHWG6lSc?if(NQos#xqx;Zl;MLc6rr{{u#}pv@2e|kE6o5kA3-uuNV31uvTYx9v z8SLjtrh!dKy#Q~pF{uxk$prU*8bG7c$N(+C*Okm*Qazx4rF2l>2l%-#$TZk1d^h9O z5A;rV2mAx*EV8?+55NGJ0F%5Iw9BLh0ZbnkrYFDxSe^_r8DIs{fdC+Y<_;Qmqch0B zULX(%210-kGJ`(987kcSbbd2b9N66yL3j1yQ&ZyuW+JRdbECUcX&!ulE|o$dgV5Z_ zOoRiP2u>7%K>`%$2L>Pqx%s&G{=xTWUI+|nMm#%_znk40?FcM(me*O*W~V(fKL|d| z5j4S>sZ!_xj2XsZ8el?Z!U3~86DcOympXF?b3YTP+4vyvSSZ#|PUwJHp$F&#+W`YO zJ9fhPVFH)}W(XqscbZrOHejmQAxOA+6eCCkFne6VSR(Le&0^68#S-p`82|lqhhe&!HszCrTWs zu`t2^i%TtAQyW9w-zeu-GLY-}bV9Zl07Zi>i|5ga{R4DX^`GfP{e5rdf1}gZ*nG#% zKS0s*N-i7FS(@g>9fURy=)Abkv`Ys51t_HP|1tV6t)$t2LL@9;6px<|6za-67y4P% ze+G(%QuzM^`pIHrHjq%jHMa0e*hBZZ>T0NzR=dG zpi6gCwqUt1Sc(V~JavSEi<>u%9_T}M_wWVDWGW)SuIW&86?uuX^@);SG z#$wRj1Ke0tIt`S<$PfxNI!zgDljJnI1)WO z%h{1C7|w(SIgf|d=3ViwfThutoI;a zghg?N2%h6-=HR5`0(P;dgXbnQX!Rn{v*|v!6n`(688!*CX$vVE)7{B{6_Y%t$_JSG zk!e~mxrmwUo0nnp>#rG{7i0!P!I%pE(b5*-=S~5CD5F%cNEI{+Jo{+{usrDu1xE}4 z4}KFIkw~N=bRk0*+V6`Cbmxe~g5OA_Idp*){GdA~blC!3<`ndx4<68k3SE4DU*;6_ zAqnHRf;)5v>%&&zLGV-9G<=~$Dlnb}o^&9wCv*w;tpFSPGYB3z=XfWi1>5(9?iScG zfFytB{Rf^m^kB{c%noc5cDopo#X>VMnz_M$;KGE$gXf;vLE~V<@KhE+vxT0)u;FpA zR`73q*vtYyX3(=47m9!-RInCASQ~l_d-)R|I0o>?#pZM&eV7^e0!Mugw$blrus@hJ z*up>9GKTcw3BVp344N_SpN126Eg>8FC_lM(wrEE zJ3|Jf0K0_&xG4fV$-#kihC5RMTneyHuwLnc?`a4HT~#y^jRrFwgGM3ISb{PVE009V zgL>d{{4W(thKrvc*&PAsyZA84;F7o0vsKX#@bPhVq0yk_Xz591BEWw$%Tjq}St8IX zBozV{LqaIbEl|aIyKfMKOyNT!2qZpm7PDUnJPwD!At;F7G89;fpf3b%PKL%47t4rv zutF`=!+`JEi)1J~xc9N?VNhVvVUuC;V4Y@@;h`e|n~VtU!V6_sBoW-Q*ktI%n6X$Q zSlbrr5eURZm~lw_A{i11z1%Dui^Y=`%?(R{&I|MPPz2(lu_(~(-;E_L8cV?8*mwrV zVn~az5EpZWB_Y|k0>=`d?VSxD8rmc0%aBMc3AczV91_JgUvO?1(jq+~9y&uVv_~W? zjvIjlZKCXYBye9_C?nvZ!^=V$4#mzD5=q3N7yHE#7TF`>iHl_@cAi0d80cuW&@YjM zWLpa$2ZY7BMkGOJ-GyUGNcMSvWATgmBO!^4=0QTS=N~k7@j4=*k!<;bM3OMFpc%TR%Jhq$!?co;J2oed&K3^0P%f1$nC?t`{mP1Gs3IqK{VgVKuim-@3 z6gV|D?m2&Z74IBx*utk>@T_RCS2T9V=)j<5WiW34nlQN;V8hGDA5s_FNj&I{e JJyU(Y{{i`K-39;v literal 0 HcmV?d00001 diff --git a/inst/doc/BuildingDeepModels.tex b/inst/doc/BuildingDeepModels.tex deleted file mode 100644 index 5e0ab09..0000000 --- a/inst/doc/BuildingDeepModels.tex +++ /dev/null @@ -1,260 +0,0 @@ -% Options for packages loaded elsewhere -\PassOptionsToPackage{unicode}{hyperref} -\PassOptionsToPackage{hyphens}{url} -% -\documentclass[ -]{article} -\title{Building Deep Learning Models} -\author{Jenna Reps, Egill Fridgeirsson, Chungsoo Kim, Henrik John, Seng -Chan You, Xiaoyong Pan} -\date{2022-04-03} - -\usepackage{amsmath,amssymb} -\usepackage{lmodern} -\usepackage{iftex} -\ifPDFTeX - \usepackage[T1]{fontenc} - \usepackage[utf8]{inputenc} - \usepackage{textcomp} % provide euro and other symbols -\else % if luatex or xetex - \usepackage{unicode-math} - \defaultfontfeatures{Scale=MatchLowercase} - \defaultfontfeatures[\rmfamily]{Ligatures=TeX,Scale=1} -\fi -% Use upquote if available, for straight quotes in verbatim environments -\IfFileExists{upquote.sty}{\usepackage{upquote}}{} -\IfFileExists{microtype.sty}{% use microtype if available - \usepackage[]{microtype} - \UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts -}{} -\makeatletter -\@ifundefined{KOMAClassName}{% if non-KOMA class - \IfFileExists{parskip.sty}{% - \usepackage{parskip} - }{% else - \setlength{\parindent}{0pt} - \setlength{\parskip}{6pt plus 2pt minus 1pt}} -}{% if KOMA class - \KOMAoptions{parskip=half}} -\makeatother -\usepackage{xcolor} -\IfFileExists{xurl.sty}{\usepackage{xurl}}{} % add URL line breaks if available -\IfFileExists{bookmark.sty}{\usepackage{bookmark}}{\usepackage{hyperref}} -\hypersetup{ - pdftitle={Building Deep Learning Models}, - pdfauthor={Jenna Reps, Egill Fridgeirsson, Chungsoo Kim, Henrik John, Seng Chan You, Xiaoyong Pan}, - hidelinks, - pdfcreator={LaTeX via pandoc}} -\urlstyle{same} % disable monospaced font for URLs -\usepackage[margin=1in]{geometry} -\usepackage{color} -\usepackage{fancyvrb} -\newcommand{\VerbBar}{|} -\newcommand{\VERB}{\Verb[commandchars=\\\{\}]} -\DefineVerbatimEnvironment{Highlighting}{Verbatim}{commandchars=\\\{\}} -% Add ',fontsize=\small' for more characters per line -\usepackage{framed} -\definecolor{shadecolor}{RGB}{248,248,248} -\newenvironment{Shaded}{\begin{snugshade}}{\end{snugshade}} -\newcommand{\AlertTok}[1]{\textcolor[rgb]{0.94,0.16,0.16}{#1}} -\newcommand{\AnnotationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} -\newcommand{\AttributeTok}[1]{\textcolor[rgb]{0.77,0.63,0.00}{#1}} -\newcommand{\BaseNTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} -\newcommand{\BuiltInTok}[1]{#1} -\newcommand{\CharTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} -\newcommand{\CommentTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}} -\newcommand{\CommentVarTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} -\newcommand{\ConstantTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} -\newcommand{\ControlFlowTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}} -\newcommand{\DataTypeTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{#1}} -\newcommand{\DecValTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} -\newcommand{\DocumentationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} -\newcommand{\ErrorTok}[1]{\textcolor[rgb]{0.64,0.00,0.00}{\textbf{#1}}} -\newcommand{\ExtensionTok}[1]{#1} -\newcommand{\FloatTok}[1]{\textcolor[rgb]{0.00,0.00,0.81}{#1}} -\newcommand{\FunctionTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} -\newcommand{\ImportTok}[1]{#1} -\newcommand{\InformationTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} -\newcommand{\KeywordTok}[1]{\textcolor[rgb]{0.13,0.29,0.53}{\textbf{#1}}} -\newcommand{\NormalTok}[1]{#1} -\newcommand{\OperatorTok}[1]{\textcolor[rgb]{0.81,0.36,0.00}{\textbf{#1}}} -\newcommand{\OtherTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{#1}} -\newcommand{\PreprocessorTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textit{#1}}} -\newcommand{\RegionMarkerTok}[1]{#1} -\newcommand{\SpecialCharTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} -\newcommand{\SpecialStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} -\newcommand{\StringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} -\newcommand{\VariableTok}[1]{\textcolor[rgb]{0.00,0.00,0.00}{#1}} -\newcommand{\VerbatimStringTok}[1]{\textcolor[rgb]{0.31,0.60,0.02}{#1}} -\newcommand{\WarningTok}[1]{\textcolor[rgb]{0.56,0.35,0.01}{\textbf{\textit{#1}}}} -\usepackage{graphicx} -\makeatletter -\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi} -\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi} -\makeatother -% Scale images if necessary, so that they will not overflow the page -% margins by default, and it is still possible to overwrite the defaults -% using explicit options in \includegraphics[width, height, ...]{} -\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio} -% Set default figure placement to htbp -\makeatletter -\def\fps@figure{htbp} -\makeatother -\setlength{\emergencystretch}{3em} % prevent overfull lines -\providecommand{\tightlist}{% - \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} -\setcounter{secnumdepth}{5} -\usepackage{fancyhdr} -\pagestyle{fancy} -\fancyhead{} -\fancyfoot[CO,CE]{PatientLevelPrediction Package Version 5.0.2} -\fancyfoot[CO,CE]{DeepPatientLevelPrediction Package Version 0.0.1} -\fancyfoot[LE,RO]{\thepage} -\renewcommand{\headrulewidth}{0.4pt} -\renewcommand{\footrulewidth}{0.4pt} -\ifLuaTeX - \usepackage{selnolig} % disable illegal ligatures -\fi - -\begin{document} -\maketitle - -{ -\setcounter{tocdepth}{3} -\tableofcontents -} -\hypertarget{introduction}{% -\section{Introduction}\label{introduction}} - -Patient level prediction aims to use historic data to learn a function -between an input (a patient's features such as age/gender/comorbidities -at index) and an output (whether the patient experienced an outcome -during some time-at-risk). Deep learning is example of the the current -state-of-the-art classifiers that can be implemented to learn the -function between inputs and outputs. - -Deep Learning models are widely used to automatically learn high-level -feature representations from the data, and have achieved remarkable -results in image processing, speech recognition and computational -biology. Recently, interesting results have been shown using large -observational healthcare data (e.g., electronic healthcare data or -claims data), but more extensive research is needed to assess the power -of Deep Learning in this domain. - -This vignette describes how you can use the Observational Health Data -Sciences and Informatics (OHDSI) -\href{http://github.com/OHDSI/PatientLevelPrediction}{\texttt{PatientLevelPrediction}} -package and -\href{http://github.com/OHDSI/DeepPatientLevelPrediction}{\texttt{DeepPatientLevelPrediction}} -package to build Deep Learning models. This vignette assumes you have -read and are comfortable with building patient level prediction models -as described in the -\href{https://github.com/OHDSI/PatientLevelPrediction/blob/main/inst/doc/BuildingPredictiveModels.pdf}{\texttt{BuildingPredictiveModels} -vignette}. Furthermore, this vignette assumes you are familiar with Deep -Learning methods. - -\hypertarget{background}{% -\section{Background}\label{background}} - -Deep Learning models are build by stacking an often large number of -neural network layers that perform feature engineering steps, e.g -embedding, and are collapsed in a final softmax layer (basically a -logistic regression layer). These algorithms need a lot of data to -converge to a good representation, but currently the sizes of the large -observational healthcare databases are growing fast which would make -Deep Learning an interesting approach to test within OHDSI's -\href{https://academic.oup.com/jamia/article/25/8/969/4989437}{Patient-Level -Prediction Framework}. The current implementation allows us to perform -research at scale on the value and limitations of Deep Learning using -observational healthcare data. - -In the package we have used -\href{https://cran.r-project.org/web/packages/torch/index.html}{torch} -and -\href{https://cran.r-project.org/web/packages/tabnet/index.html}{tabnet} -but we invite the community to add other backends. - -Many network architectures have recently been proposed and we have -implemented a number of them, however, this list will grow in the near -future. It is important to understand that some of these architectures -require a 2D data matrix, -i.e.~\textbar patient\textbar x\textbar feature\textbar, and others use -a 3D data matrix -\textbar patient\textbar x\textbar feature\textbar x\textbar time\textbar. -The \href{www.github.com/ohdsi/FeatureExtraction}{FeatureExtraction -Package} has been extended to enable the extraction of both data formats -as will be described with examples below. - -Note that training Deep Learning models is computationally intensive, -our implementation therefore supports both GPU and CPU. It will -automatically check whether there is GPU or not in your computer. A GPU -is highly recommended for Deep Learning! - -\hypertarget{non-temporal-architectures}{% -\section{Non-Temporal Architectures}\label{non-temporal-architectures}} - -We implemented the following non-temporal (2D data matrix) -architectures: - -\begin{verbatim} -1) ... -\end{verbatim} - -For the above two methods, we implemented support for a stacked -autoencoder and a variational autoencoder to reduce the feature -dimension as a first step. These autoencoders learn efficient data -encodings in an unsupervised manner by stacking multiple layers in a -neural network. Compared to the standard implementations of LR and MLP -these implementations can use the GPU power to speed up the gradient -descent approach in the back propagation to optimize the weights of the -classifier. - -\#\#Example - -\hypertarget{acknowledgments}{% -\section{Acknowledgments}\label{acknowledgments}} - -Considerable work has been dedicated to provide the -\texttt{DeepPatientLevelPrediction} package. - -\begin{Shaded} -\begin{Highlighting}[] -\FunctionTok{citation}\NormalTok{(}\StringTok{"PatientLevelPrediction"}\NormalTok{)} -\end{Highlighting} -\end{Shaded} - -\begin{verbatim} -## -## To cite PatientLevelPrediction in publications use: -## -## Reps JM, Schuemie MJ, Suchard MA, Ryan PB, Rijnbeek P (2018). -## "Design and implementation of a standardized framework to generate -## and evaluate patient-level prediction models using observational -## healthcare data." _Journal of the American Medical Informatics -## Association_, *25*(8), 969-975. . -## -## A BibTeX entry for LaTeX users is -## -## @Article{, -## author = {J. M. Reps and M. J. Schuemie and M. A. Suchard and P. B. Ryan and P. Rijnbeek}, -## title = {Design and implementation of a standardized framework to generate and evaluate patient-level prediction models using observational healthcare data}, -## journal = {Journal of the American Medical Informatics Association}, -## volume = {25}, -## number = {8}, -## pages = {969-975}, -## year = {2018}, -## url = {https://doi.org/10.1093/jamia/ocy032}, -## } -\end{verbatim} - -\textbf{Please reference this paper if you use the PLP Package in your -work:} - -\href{http://dx.doi.org/10.1093/jamia/ocy032}{Reps JM, Schuemie MJ, -Suchard MA, Ryan PB, Rijnbeek PR. Design and implementation of a -standardized framework to generate and evaluate patient-level prediction -models using observational healthcare data. J Am Med Inform Assoc. -2018;25(8):969-975.} - -\end{document} From 58a80ca6706beac396ca92f9b2f7b6f9c368cc52 Mon Sep 17 00:00:00 2001 From: jreps Date: Sun, 3 Apr 2022 21:08:33 -0400 Subject: [PATCH 051/140] updating website --- DESCRIPTION | 3 ++- docs/404.html | 2 +- docs/articles/BuildingDeepModels.html | 2 +- docs/articles/index.html | 2 +- docs/authors.html | 6 +++++- docs/index.html | 3 ++- docs/pkgdown.yml | 2 +- docs/reference/fitResNet_plp5.html | 2 +- docs/reference/index.html | 2 +- docs/reference/predictAndromeda.html | 2 +- docs/reference/predictDeepEstimator.html | 2 +- docs/reference/predictPlp.html | 2 +- docs/reference/predictProbabilities.html | 2 +- docs/reference/setCIReNN.html | 2 +- docs/reference/setCNNTorch.html | 2 +- docs/reference/setCovNN.html | 2 +- docs/reference/setCovNN2.html | 2 +- docs/reference/setDeepNN.html | 2 +- docs/reference/setDeepNNTorch.html | 2 +- docs/reference/setRNNTorch.html | 2 +- docs/reference/setResNet.html | 2 +- docs/reference/setResNet_plp5.html | 2 +- docs/reference/toSparseMDeep.html | 2 +- docs/reference/toSparseRTorch.html | 2 +- docs/reference/transferLearning.html | 2 +- 25 files changed, 31 insertions(+), 25 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index ef84d08..1b1cbfe 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -7,7 +7,8 @@ Authors@R: c( person("Jenna", "Reps", email = "jreps@its.jnj.com", role = c("aut", "cre")), person("Seng", "Chan You", role = c("aut")), person("Egill", "Fridgeirsson", role = c("aut")), - person("Chungsoo", "Kim", role = c("aut")) + person("Chungsoo", "Kim", role = c("aut")), + person("Henrik", "John", role = c("aut")) ) Maintainer: Jenna Reps Description: A package for creating deep learning patient level prediction models following the OHDSI PatientLevelPrediction framework. diff --git a/docs/404.html b/docs/404.html index be8d9d9..9263d20 100644 --- a/docs/404.html +++ b/docs/404.html @@ -138,7 +138,7 @@

    Contents

    diff --git a/docs/articles/BuildingDeepModels.html b/docs/articles/BuildingDeepModels.html index b33ab41..76652c3 100644 --- a/docs/articles/BuildingDeepModels.html +++ b/docs/articles/BuildingDeepModels.html @@ -161,7 +161,7 @@

    diff --git a/docs/articles/index.html b/docs/articles/index.html index 82787ec..0da0dc3 100644 --- a/docs/articles/index.html +++ b/docs/articles/index.html @@ -137,7 +137,7 @@

    All vignettes

    diff --git a/docs/authors.html b/docs/authors.html index 43aa926..d611cfb 100644 --- a/docs/authors.html +++ b/docs/authors.html @@ -139,6 +139,10 @@

    Authors

    Chungsoo Kim. Author.

    A$#=rCyzA z)p7^zA9D~fQ!*u{agCh5x;ZUp)xgLUiM*fov-33IsQkG zSzMzgiToB0f%;ko`r5+JDx_WXw*LK85VBBLTs6ar&p86Y*967LX2P!7R^A^~4y8pT zf-=(7d(yPi&k;Pu%;pX5Sd6wcc5++np2~+f@p?a)%dA1=mR;IqeX?0ZZb0fRk5`y0 zIEMDP`D5su-A(r>x-WmepMvpIL6Y3JW^<1}1LnPh4%C{JPAC_Ps_XRf59^zQE%Lga z`31YsO8$<>c}Rd5T(ruWMr%pqJ~2K~t+LD-GhQ)UYmo?$RX@cfIC#=w>;Nw~(R zWOK`p*!o2e>sRJPVK@o3X!aVJo4_kF6~Wax+F zc_F>reloB6h#K_PV3FMv46V@x)6Pzh=6S)iiNZAFDoZ=1wA(p`Kxjq(jR8A;m1xpF zli?);t509upp|!fPd)hBKG5V^k<0vusNxJtVwY64gpF~Gn89&q%D4&$rV6y7oOiY0 zzux60LikFPZQ{(Y5697vKOjh0&X}*98Wh~2MLb>wWXSHN(6{Mbu8`_4tOL0*9X2ZY zyGkW>X+i2o-5FAlph%TYCSVj2@<<=C7CZ3tS;EJ|gYHjIe zQQsAa(=^42m$xXA5Z(a9zIi_*-F$Ym?6*^jFjjtSHBnbY4BuLtf3Mg)n6WO`(}` z&wzG%Z+amm0Q>UU^<#1L3Hz8!7bKf|SFD={nX8Kc3RC7PR^7Z6BF+Ag2~=4xrqiYa zq*d)M(a0uc9piV&+3Gr+c4#SO;`elrV;!T#WOa*l4YGrfb&bu6s>9c*(FxNK1OZ|O zD-V2^FtOC??)mWh{%2Hz7X3CQ^_7Q(o?lNGX5;>m#9$aP?zY+-N#5Fl%}JZV zIhIhu?e>~$KC4;P%d2x5@!<%)b14&SnxfB3;Bb4zco(HSVYnYXj^W%;AeYfU5z)jp zYbKF3%so*c(0Xq4IHT4!i@Y5W>tKf|pOM}*k`0~!;?K58;zlzU@z+3;@jM*W9r`V2 zt8lY(dAVya5Y z`IB~R(w-;evj8pPLxyiBI?3TQ^6YRWnFj0UQ7*2kU&Er{ND>;V2JGrs7wkl1bS=Z+|=M&HM2U?>dI`tZL{T;-LuiM(WAq_}m+!!r%gOxD zy@ceyM4|sl}}Yu4K zxun$o?SW-E@1j3Uh|`m(H+3DTpamu+v&;*8%Au8vCg|m;IRww?z&*D|dKz8qw6Zt0 zMHI=J4X|KXjO2(CMRmaa0n<#9V177V89=iG*fx2_tZ3FkYSFNuNe|(%pjC z3o(`EdnVn^wa$h)3?whHNq^0FgflNL@!B(s_@t9>hMQdZu6Ugx)%{jZL%TCE+T`{xi zsm7lxJCA_9_(0mgcS$`*$ZJ5Qp8JrKeRS~H|Dvq?_PtecMCngU93QOYN6&+1OqE|a z+ZZAc&pv;A!M_*-fjnQLRpvO^Q_lRG`fRo<_w)!UJLn^LNYpkD2RLoys=%y~0SiKM zM)_Bp#vzjm(vybzvF4UVq81Kj7x`Psi2&3UyXn!5qWylOh6&-%EX$zhBoFffOlhps z?)KGyUR;Av)-$m4LB+3)%;u|REkBtsiw&x`@+4Aj;K-eUPXRg-S{7QPa?xrpCo_Ajgx6gouWswmmWEvUB zE~8G(o+SP35E1p@`j{UBSO+8&h}5>`fg>9D8+$bxiO4$U?bgh6)!;iX?=xA5bbM!|LBZ6g?i=TH&E@DH# z=y;|{Da{0K!K4{Q7rw3ZB+(XzRLUSULIC@XzkuAvXx-NLI*N3S$J=Se z>B%|&kM3!lh;1+CF?1ET1ZKAFbvcTiiIqU zaC_>U!^1Vd-{3@@ZVej``~XOt#2v5i-gyBb7B!4g(66)G^usOIciuh;vRNWmfnL`< z7#%BAhr{Ee5iI5%Vhgvw1r&YVM#23VRQ5 zDQK$dottGFG={I^@o6xW2K0_3z7G^(*1Fn|A0vOGa=9LU!TseLuN*WHLhuEVatJfM z=#4gFnzWeX{8V_0WM7lN59V<|M)s#8rBiDOa*#ZtO#zU$_VVhygN-ef=HcyQ@X9FJ z1i}Qbr}w@8qrh5=FV+ZZ{{q5sga236PcWk<6>j`rd5L{*Y#0A!!8w(6S}wI01_#RBx(k*dt%g6NB9`MVypqk;VJ zHz4m<7pE>N@$+?dnwhVd)c0PY4b$8yqz1b*nDzG|ilj;3j9pATsC|nk?^z!00CMH0~d~G{U97wou5)ff{VQ#GFaYs6btZ! zm#8{ekJsBH6e*33SRud5qNg(OoAXzrSc(a2v@H>pW)V~E|bRrQ9cEc(uj&Y2I)lDJPG8l;z&FApFWPAe`l zH`860%8Vpd?7*)28wugxZb`lgX{;mld`oCRP5VP4^~Ud0&1EqCHIn2jBmD3lF4C?$ z>sm8PN0DN)(Um}{)k+900P(b^uvx-FT-#6$b}UeZ^2}17;alDEB^Ht5)^$8hM=GS2 z!!j^8e|f{cVD0U~AQ|M^Ls3Kl*(}7pjW`w5=S7z2N8mrS2~=pG)0L;IT*F_MmioyW z*LWL=g&~D?KYBi*g8T{S`rR+i^aak1cy5}2AiSmVspgOzGbFayB{q0KR>F7(1v4LJp2te{$)cUgC!$Ti+}IF z4(pK9nAO@{axg>aet-mU-kO?2#3Np1lI-atqa}Kyp?6h95-u_tU{W!$D8zBOGHUfw zI-qpphOBEvvjFUI zv9N)m#bjn;P7B|1~o!}qq04eqjV7X^50;zz?jpH`!|1`Z0hl;BCofg^-4weY6|{@ zrzZV1%PjkPA(lTE2_+0s_>vd!{XHKiD+RCqj*E72&*C%hjl)EFvpw>>(1U4HUD3KZ zGK7c0b0pMgZ=(}(`1H8~9e&b8Nqf%;%suVK6JYMzSmxUx<=+h+7s)Y#oK!!rg2w(P znv}15FlVnL^jNIwk=K_drE~t!aH|kO=1k^fiwUOoy+` zbKs|(5ZiGs1K1MAIaLo08mj}TR<;M=L>K0Pac~2Eg^7Aea`oKONyZZo`Q90@T}FbG zori=S2nsyh`Zsl$d?wltoAmv~40%95heYAwgFNcsgvcRwU-%uqYtX)rtYB}8bho0z zmc*`2k-|@*>)Pd;n_x<%M}1YrE9a`x7JT(|8zWL1hN>bVh03s`13xP_98{%>(Qhv{ zMRMIQeTibRML;c5`NSv|NsfxrG|P)m@G$qv!uMDNQr0hfp+TW1oAHUnYZT2SO+f6s zFwHHXerrEUKGmS@e30U26%=lO+jtYv{hn}%ykl%^fuMA8+W@Fomk%~7gZkwPnMKyH z1xer0+E7icE%=1nywWc&H?u0EM)`Mu&ma*z`r`eEw-1qfdV)c<17IL z;f1W1e;^`WBr~61>p#uC^>tOY~+Dyd*f;Q}8h4X&YkSVSA3-Ww|zxAk^ z!Z&uP3`D~#!6~XzG`~HOt5^)p8L-)gq&;lWSO!?_FvY!B6B>pz|Fv*aS zsj|m90nF7W=df~ZE2!3X6=AWNuO5=nIznBjw{GWMByLX9cX;U()h-UR=O5T4r7{>t zN-TTC%ziXOMKB6)7K;A$np(JTd1G6yG>HhD&$LjF>G3W0dgODBEQm6>spOQ^iY-(N z^I5RoB_UZ=rVDmL!12K+^8Jq5&xntuth-H+IO8Vv8w){I)WS^FunTt~)@%u0 zD2%3c$&?hrtZhoV`oaED?r?l2@(-@U%5y<|X0JjCn`n|W4lDGWvAS}w=8Oi^jl%r0 zXUmVKNt; zjATOVmsa)y>)$pF40|oH>Fy@Gp%cDTir=7+iHIX9;^~W!Ca|%Wi(@-NPu0Q8#Gy2w>ieK#{=|*0bYbYF*zpDv!?zFdFF3!*52AX zX<+bNBz`6U z4C=jBBED7Z;R#3VPxxQHg8bIfLrQrM@ps7o4?0-tNSOAlqf^>bgd&@xWZ!B4$v zu(^6fNagai45`l$egh>iPgpm300Kr1?P$WH!%H|Oe`QV~I*_eKqksqb{s~Wm)7puc ztD|ZqQAaZ)q}>PTR!wgx<8wGrn^8;4Yit%i35uC@U(`_V$&$xD03B~ zwl}D|fsxINvfDzYq>l4WeI|B5S9bGS0q#v?srWA?#Bg}PrCAvLopvyCif@gQWv*LT z@Uh-I{%1+grhR$Bsa`Vu{?_UPNfniagj>}RvTg^JRIWHX{>MxMW+Sp`V;r2ZpX-=( zRr_2V7h0-M8@RD=U}8$Cq)5ZDc_t=JT3kNlEM^5)XmoC@_?$lhEj$p`Hxg6j>q_7C zZ!%sRlF237)QCX##xrug5h(}GZ=Q~5pUJAp7Dv@cg4Z_oVw&Q7wYq(nrXx*B*(67x^d)TmMiZE{ zW6@~q@f@az1DUgPnK3tssUh3-bPnSds{KgW_sT{TTCq52kQfQ(mXa!!_I1Mjo@JGZ z6S=q5nst7j+tC~Q93~w|u#_2vXj~NyX53=iau^za~7gT!5g?IW+SoYO#ThVv&hllPw zFKi&!i^xd39{6ve1YLo<`KLy`@a(apAIJG=aiLgI&XV@G2bcB>8h?}HP9TtqB=lUM zp@cAHu6zS03H+}d6nGWncXcT@>_^-_coRWkJXDlfl~jb)mE zLL7;w+1rN`MoDoWJ4=<)(O?vZYP4O;teb+N)xJVzC;v%2;y<8zojQ5d;7*I%!3W{;saZY9Yw(n1DZP`o2fM5wVNY`FaXkT>Mt9I)^7Ga>~UqPJz zmTL#~>7z%w5`iw(y^BW7O6oTn|0|QTvQPDz6o4im)ybxWI5^O<$_(S`;v)=m8l?mK z#4*%fI?tVaKSAf5HKCuRiD;7l_Ww4st-vwcyD-F|CDj7;1;~ISj9*bg=BGf{k+jV% z-^~#TaS&xq4!s^#$w<+c{zDz4LHBfoh07>Myy|%@fh`a7QKwwFk9L;z@*Tz0sNJ-c z|F3ahCn5t{#Ef=&%hB|04rAV6sKJ9Lz_rYFOFicPH))v39iaD^XEhRk!AbWUj05qE zbDjtST^cQ!ePp)tar>wJr$)YyG`52zv8a>tx2Yj zq|=H}3c*nnuelES4hHqThM%{oJ!$PwBHR-Yy#Dw6b1IG9w` zZN-SDN$!|7_Wg6*oTw)_^bU2J9$MO(lIZa(b;?FDhMCnCp6pN_Bxiq9zj=b$){Jbbx!3~fa*PVEY#~Y zm>3iSO~DQY2aYu`)S4r`K*zl^d_E29Cl7m8ChaDC_r5=!^cq{otR#snKH9T#^xA8Z z?yF%m%}Xkp7nzjTr5~l?I>b^czytpbqB-*Cf{!UwLh36|M^RAofO8LOW#T0g%@5aLrclc!?`D7E5_Ylm=Pp-6!8>L+^u3H_ImN`D5O@S)D0j`lQP=oBzbM2Bkaus3(!*T?oq*5$hA26d;t4%p=$c=H?|UKzq?mk#azqCy z)db-IM-Sq>9T)JR*4Yl%4gy0xTDNSH!qnBG1O9n?1L%WamKZvcXv)JURxBOCCnBSD zB!(5GywH8@X+dfEHia&j?{37c8)G8a8F0|tiP3+_eW zLR0%JO5gUKOyRMLAWhFE!5^i=Z%MZo5r-L;hT1!_hCW%K zM@UZ>yRq-#7UCUB)Zos+SUQH8Mk!5+aPN{}iu7I?))js&IhNJ_1pNrQ< z%~T6%1)3bfzq8Yc#fwgOxZDx92L0MgEY2u)E^Pwo@J)ZGTcU^R05>e9BtUtN6kKJ>xUjZ2;ef&4j=UfF79=j zk%6bs6ni9)%j?M5X?9SLq2epG>snx-%7P9Ge%zUN#dL{ae@Aj!eFEF^YUA}okb)g_Fb>}WZ!BCeLSstW9 z6lzthd}A_G2`00VMSaDe25&cAe_I?Zk#*0uDiRKI;01RfM=WvBbCim&jaD&V403ys zVZ`;Xon3pJ*4QP@pC^DnL%<6f*xHk)Pv$(C!`oIdX_l*+y91j+qgr>*WaO7wzk_y} z%_MbiRvo{4!&o4g-1{n^oUtS)zsQROTS{whj&DE^>H9ZRC&+G1da2!w+6@|WCHbU? z(OSY*a&S4Zv7w5U-h<$4{5CCB!>HTU2ZXAs=}T=fBjR0X);!}2!VjR{2fB8!Mp-q~ zD`qZM&k)mPvw7Zq-?}D~c16 zc4n+80bp*fXQ}q(nC%*Gsd>#qR|`JaUyZm{PRLD@fDtiI-N8G8jFN;Uo#3s2=?>8i z-pj)@hrs>g-oFMp+OkFC1H=33D_blNwJc6;6f%gk-}@uVd`V^8`LC(=!P)EWNS^Q$ z2M`Sg5e8}N&}aW?svt37t?(213Q=er4il*HB`(^T9HsjZPL{g=oZ=~+fHG?f>+fHD z8ziEF1@xbhZcUEC`qdyy=HRG8w3rlr*P5a{B6YZk69TJjnU2=qiZ>v$i;Qq41{9Ju z5wziZ!*h?An^vj<+1zhnlY{uXmGg^gp`FG$gwafXVzU9Y&u)F7T2l@gN? z710e}F4qW{es>aAVLR>HHZw)iZ7r7>2oded)O3b(&Bu3rdRU4>oVveQ!d7*k+j9;q zgs3TjucriWK2WbOxcfkTK?#?d++OYk3D~iZ4P9Mw5mJT%h@=2NN0clE;Ib zN-Ly3AnY3H6l#z2P?Ua+6!}eP#in>F_>%h58pp^6g74#|c^uap>GCCS6| zyLnit`d-9(@6PleC{RosHdcquwjYh)&f~n>;z2`8JxrIDk5i|{2$tD*6Xx`z1t(x% z*1Ld~*n|THV=%_f8CzpAx`KlA41V;Ks?5im3TuK#cQ z-*p9MW_Biy|5II|(^VsNXO$O5qAke9hO4COcNs3#N5;Ak$Ci}4{2B|yJCu}^w77fE zoHyQ&IeQ;B?v9AYwV1|ntZSY2?FB^2ismRS0GWV)1mX%rO4G8c9ux(%!*uaI22=QoWy2n)jEDMO|K^rXq z86H3=fCfsj^5g{60BX+o1wfCW%vhLRLur6k{*Z`Epp#yM|iI4>#kKhv(B;l`m6wO2|sHv0Hm-I zo_?Eh}n0j5~5L)lfU>sS*mSy0IvzqxdD#4py%;k;~DJY&=5EWR{Q-)Uwfzl zK(U<*im7ki6Hrd|qu~%J2?Y_jU-yOygaZ8kHUFe@_oG65#xt#!*8U7rm6uZh;Kf-$LE}i{jvxReXYIO+Z_N6XH(7!l}_nO^val_l>_&HWeQAt8gT2VVt4uHuc zE+&c~f-6o|5B`dOv-c)Kit-A;L!$$rrUwTAZ6ZO6OFJwG2Zyj)JKlGkTpxG}0@mTd z^wSq?`wzm&{qdV0jm8#-md4-O{@`S|`pn|+5-cV472iDq@-2P_3IePiAYc}tAiRV1 z;U?&3ddi;p!k$?KLf@V|0l9y1c^CE6+88p#NBG%|)fXNlKzBQ!ch{f(lU&H`_?INe z2qr9sQoqZNR2#S4fgAZ=nsih#j{)co_4 z)BICO`umyInVbWZeEG9{*^4WKV|&m2xO~y0+wR-9HcJtpgDZdhXf-*eHr@efm9GcP z)clZM^<6d^D35tdOFq0LHil2r9ep>v`T7E4XmYlHTf;J?ow&sbsH+2v z^$i9f5PnB9ihS+)08STiO>R0N-F)vgyp72(F5s9L9$J7J93BB>bhc-B5j6>z1;!>L z@6SMC+CUeK@ectRm^pz25)uNCv)9x2BOL{OdJA`O5Q6$;e}}dMOdI(lum@;@)z~1A zoFUk{yD^2}eDEihi)i8;$72Hju|C*;wRLoOO-@J!T?`!2M<61&l?Q`p@HczdLvxc* zPT*}}cYUP>TWmenFCieIvHdT>Q^t7pa@Ynu%5;XpH4L_d6Z{StIx&m$jw%E|)Tig0{QKbgx*zmq+4F!&yHU$A9z1F^@XG z^gll)-~fPl15U=J9|)Yk5w_-F3rGBtI+95pRJvuxOgV(OobKwSDU)qOJdiZCj|2|7 z`%42<&Uq4oVDbz1$^^!Ma4Qqqp64!RB#)8R(JG0yO8uhzLQa#4iZiujvIHxlq5(;9 zk6gf=GC4a#ah57*?`TN?W)_4UcUxz>Fk0`@*U%@9z({GUHXN?snyO>h>7MO*&=?^% zA<@aV3J2)j*Bcf}d5feUnaMZ{If|Wb<#uv4xaSw%2ICc*xhs6yG?JR#XD_C*z#-ld zYp*Co6o$W3L`c;Z0kTAZO>kaP^hCGi>juf^3lAT3xVM3vO~^+qnS$W&F7)MTcOM${ z-%q0~szH3yc=XOTer&`iURDG*rD<80D^=SUgb_w^e~^1f2<1Ca<+8{qk?MBzj4p{CP|M%AtzI^>2?GvaZk4=a5z zJ~>Bmv$bBcR}(VW!^$)~wD~vv1F5aNRfvE5l1%-DjObo=U1lQ(@(TU(InyMZGa z0?aNpws7Zggpn8bk%l%1F{zzN*uC>6bOm`Y%sA_`(;BXVA>|ykS6$qaR+Mj+M2B{Y z&%BhE%<#=&*WUp(`h%koA{0KPO`+89tH?w(RUq<0rl+Z}YhIZqOSJv;Sw{|=ib%M- zR(1b#zDjDp!<*_;En?F{QABIEG1627UUHG3tFbuiJ``62ucW>)GDiC;Q9di}0Gtgf z>G)D{zWkedBv4iO15UiCj)P)FUDPg#`+<4Pk0#fujh)iaAm?IPt(XVIl_hoP-;vx_ zE%Zx~ⅇD4!1u2IMgcKWP*OGeBNl&Do0dPLn77OpNiE!RdG&|a!_wQBr^`OD=Fe) zoxxtOO7iQ7=TKx4mfklF79d~FYl(YB?RASj@`zZ4Rz(I~ikdp~$y*6Rc5;d+MyqO> z=gPmkm{_wA49|by2r43`_lf7GcF{D#9K}Scxop|?O4{ZxkHWY({R@M7m84dvUCph9 zvSZKsH3(AT=b2Rv=?2L@W+RCt9nY^JT`x{1sp3rt;SU*_UExNA`o`rMo}}rJV7e}% zvuSGELqXK$=3GQFyS?4Sa7WA2rzR*1V{<(+4r@Fot3Q*mDL8aNiD5uS^u_8}q}Rba z=}!C{)kw6Medoz_n)v5goFOfxBY(W`zK0d|?n2#T*9_HNs1P;{zh$E1-+>3VpTQ=( zD~#3fi=kB_u*USs-Kw_}{PgQbjNJrA1)xZi z-ndGFn88d623nQ6n{5NIf;&>E2IQAz%DT%1YVs+<%XNE^!>@|hTv#r-(1xg6qDyOg zE}A@sFdG1UJ6B5Jo+&dTgpG?!9@s@^da49u2G4IM*y7ARNDtXcCUh{D8 z>l=qa_BRYx>*m>l&PmUyaxLL-AC3J@J1YI|l*_nc3O^LiawC1m_lu)UFxO9~69j~@ zFrFSUJfl-qTiKo)Et@kp&Lc4^tdR@1R*fadN2xXN zfJaf2AJEH{&~f1vfQ^dJXZM(m4w~_EjL!Z1BK5ol32T~()cWv-j}se!ha5VRZH9VA zvwDYW5kJmD25SPrwAva}R+0lI0BJtW=0&!zu(tX~7p(WMEz_bjHe1L2Y_bp=w0I6& z5j|N15M70QJzc)xI{Daw7I}Cm#Bm1%uAiNEm*hhJSV%|>=f+?oEE(Uw{oWZ_FU!H< z-2Uo{w=BugCg06v>sX!et#i1e%StBF!T71{!PQ1|C)oz1Q{@FmLjm2pQ+m$s>e|ss z+=vf?XA9%Cqi_wYP3gv>xAO<#jRVPr#OA>YAxUutW=4m8_Kc1R#|Y}@9-EK7A^ z6O2Fn{kn<%ApmJAlM)g(>imh`Xwy+gq4EPmiO>DifE5G&X%md2V+-8wr5Uw#iej|Rt}ncNeRQs>Xc84)_-AhrCISJ|Q794+sV z6mOUC!_mrp^*cMT`94u7U_0dU36Mb|T)^-w1MN>~^S26TTaHReQFX{X0U|iKp+JIWeIZ;bA z@I*MxVfKl`jKLS&aOtH-eax?FCzNE|582rUe4%t8OK=p2A6c<8C5bjUimmna&$tkJ z!yB*A2Bi+9aSzKNh?FlC+C2fvj8tecC6wB9%doYq5Yg;YUl1u&B}t;E1EpF?vX$h?^c%bIuwGk<|r9vN(@QvSo+cVF=r_|U^L&&aFVy139E6sQ zmhSc9T`J@UaFvgH3Rw+a+{5E+p!H<1*GWQOzs<}1K>n8}suNpJHv}H3(kmWx!Ocv? zia5!tZkv|kt$tr?Ka(8#C=JUfc2Ll8({x&vvq*w3R;vRxv@#vUFP@+`%9_r;T*1*y z1myrgYQ#jFg%JwEoeRN}oL#BOwBL!=SfNuS7uy@=Dm@NI6*Js1iV~iv`ddKRcFMTm zz#K_G#52|uk58^o=oD)WWTlTTvnuOsS{3DJ`r9=kRh%=M3v>!&TpV}sHgqQU;7W16 z7j;Y;%eDLEZTZV}a7RSXg^$v1hY>vn+i*c^80~J1T&E}SIKD1~zR$S&omw07OQg;c zWK<^{7QK@e7ph%j!t&sOk5N98|GklLW9yxg5GlLX6O!H$y6km|o`3eN%^gSCVh~%M z?`BIJm1zbqMq8>^^}JTpFt*+aT=pL^XuRDzQF4ussSP}dK+@y2&_iMCZnKHM_81du z$mqED2cwOe@{ux}>Jd91`(sIDN8Z_@=d9_E`L-K+%f6#UAWR8MFK6=Vfsj|~rVJhV zU7E$$5yI(qL;oC{36UxA%OIK9matRCNNlB%XImUgSMKJ#ds8%b`#8INE|3I=OyVT4 z@!}4ZXz(_!{+=7ri8HV65PM?%y4lX~uh}*=Qu4=%yQH%0+h_gk)9Z|QOfjKWG{Te^GADx!>;w}bT?~U(yKNEhsfm&vZ^gNttr()|L(e~hWuCC^^8g20l zTg=5m#2n*ISq@PfU-9~+3UOBYLRIG&z4&>0ZCQ$_Ocno_>LT*xN$W?g|6|Ul7x|x0 zVco0r;xV1&H!&dRk7=Q7R+y(Z84Ab-b0*sY+YH|FKSya`U#QRFvW0v?ibSq_!GSei zHCl9-w0tZ}>?lKqyk{X52f&@V(tEEr?~tNA!wK`8y=1;9dpPHn|618kgklODw8Zy^ zt{M_UGrcMaMDFO=f;#%uB05u&cTN!H1u$aq z@t$Izxq21FZ&eH7U+^j*sEygP3R!7M?+C7Xw%SLw9+L>rhDpMu*K5tcfDkgokeQkcXd?a2}2{ie1Er$69pPY*f zQK3Lb(|*OheuT>8Gy+kstUDFg8y?oV#@#lf^%OW2!)!iG^Xt8of-3&;YjMPE7%RBXGJ=v4Zw<-n1`cvKxy=KGF?q(MmG@yav;d~ODt8{L`=U8O55)oUIS zH__psX3nDl83Q9Kp*YJSyb1+u=a})zhCG-Lm$`5lz{qUfsEEXmA>^X@B>6qg?!><4 z(N6hHJ^A9b<^>v@QR70p+TtTH-<$$jn0-T75m%Yevg;*eIVN)7lw!soNamJCjR- zHTA4IWqZ~!bkh+Ez8bvLSE0E$ME#HoWgoTY*cDdaIkKhz82YVab)$MC)ec4C?qe2hf6B3c-wsO zEWR{?ij4ikZI)+0@=Apup|XyL-jawIc*kK=9u}=Bj%H^F2BisTA0zqQ!*kRKgKt_{ zOPL8=xuZ#8P-0P)y@CfUW}WJ29`}zKWVlzQoq82CUbHxcO%kqy{@2!i5V9k*L)jP6 zSMEvSxh6^ytgT{Ob0Z?*d(nQah+-m884gxz=E0dQRCCB5romMBayUQv{1|=+aZ^`A zEvq%mW)mkDrJ|kyXFHa>vb0J6EP_Ecul3|V!+=qv(;3Q&yzbLa4CGeKttyj@Zu>yh z57oq#rRm^f%pz#1j@Xlh*$wg!KOLeQoed} z#SjNHQ;n#y6!LyUq0iZ|7LI!d^M+%n7tvsjl$Dxifr={Me|V8^%wwj!Sx1p=PqIGE zTgut1-t=S5p5C9zTcsBVI~e-&%5I^d5ttT~?_?EO6@#h!AYK%Xu-&tYTuev!d0hYA zR?QA;qe5r${fQccWb??^Mk&OPXz1tLvXYYNUoN9(XaUEYz+7n%oiQBe{$?<-p{Ri{ z!MB;4`@6=eS&@Y$G zJ@(jC%w>kC1{L4Vla%OVl*SD40JnBWi$JN5oUYJNda`}c{+30f{Ee@8sy0I`AT0I z=oOAJ#kqiGZ!Hv!8`H7fu_p??;%ZILHbig(TbIT9knaNlgff-j(mS)^y)~`NCyq=x zq$`DmXM`%{V@Zs!c~-7s5pX}H-^)5`Egu^ca%kO8umlwT_ydm9Ouo*<#(BC)B1(8A zDSf<%#vVYeP){vqor5>G#~>+zc!eqmI^GFR$TRQ^XMFA5xF94pXlp=IBt|QmR{*hW zRr*nprLc8OMTU*%>o_f}`(WE(p8|BbmP5ztwTNuC<@z|nC`K!zCE6kiw^xYB0@}P_ za?BOQ$-P^`&k+zWh0|gt^K~!3OvR2@!cp>$P)RrkK}6#}h=Lzi?EnduSsEoz z40IS>Qj6iEexSse^bc1h|C(WgB8Vbby9)XqyVp;+(}U-&H7n+T;*x@tH&@KY?u@zR z%O+F7aP&)=$gC@!Nt_AmI`Uu*#^B@j3ay8uXTYeA{cXQ2y1{e25l&+77|osfZjSNO z$Cu82T%&-OM5obsCdtu8C%4jGZJ|)ru59VWAu~Ce+}B0u-NEdw(beSM<44Ilu)hKX zt$lN*r1^I8n1s;Fsq0Jo+;7aQCq`dCg|^xPB+CLLX~M`N%2RWD*ufiA0myEl z!)WVvJ1vR<6%t9f=<3G9_*z|cI=nHyj=n5V*&W(rtw4fpACmirD3EI@G~nj=(i11P z5lkJ*kp$%EdDjstH2~X>^RVUb9Rwy=y6;`i9y7OJ{{;{92L@8aUPK;vT$yI9&`M_gVbJD4iqKAr^XaSmLi zU=lJrBFhe5E^W`ZL(EcQPzwl^%D zKZ0Dc$PYheZF>#wuTi3?FyVYn5P6ajJCz_+W2|&j5AhR z*Gmr){5;I_h`fP|%zU1}pLC!szVA3!+vq%+FO zsB(^vZyHK?GOPY06VZel9Zd?cYJ*;QU=7zJ%1*fD_tA4b`~b29Zu&?~8?BCQqXlq@ zc90&P!Bf-f@nW3@5x=EMKJyUmjJBt}4=4dqLfeVjFSS{4Z7h{REJ-EG^CFkcK_r#~ zf#v6WH_Qub0VT7i`XMwD&sI8Y{Dvz+Dx9*+`njQ*76L-eaK%-pvOY9{-C5JpfvC_{ z^dVL(q}d3E*AOPCpm9H-D~o5_Q(n2WT`OXjxIDg8URNuj!KH#2j&xaaCp#|1RBy>Y z2<6siYMuHndho{%q3~>r(r1yLo(-Q7268cQ8?f3XH~7sZd>!<|KumE-YGje%%)+y5 zEA1Nm$Iu5!2e02y`MdgGbqBO?s7GV|aTqEfVC7ui%fAtgm{PyErNJsP;PsJJtF%9}BR(6ep77ac*_wp`o!n>ntNDg{_sM`J7buD}gG8}<&8YDJmCFFyhU!o&Z z0kQk4v2eYuWV$_GOHNV)hjxa#CIGhDTM@!gl{@h@%FV>xL!pf!v$>UOfAsqlWoa0g z&0hxhpEZ4w1A{--_g!PHR=z+fs?9yP^$2T=BB+nFo%c+7*_Ho#$9~oo9C~%an!)6S8d$c#(71pKB(DmDYAbb&GB4Ih5^l#8aGZs|wmGWA}E7F%Bps zeILVE$@$%B+k-c?N*0VA}|^U#E-|N*7cM}V_LsCx8@!5KDrp{<(>Mgq*kMaX-fUDDVViiNLh>*9C@oZvL`2jC>oR7Sn;M^(G<@!=~isD`L>O ztS0^FIVKTmucS!jrl>g7m6D@4S$pT3)!9JA1EjsBGGDYHuLaGiPteaTy?xTw6_o|p zc0Ucgg07E0Pf*M|Vidkg6MFs4fN%{9N>gGjORQXbOb(J&VPT8r5KSDL;Tu%tGBxK6S7O z@zIyAUS`ar#1=GxRxX|}2V|UFZOFQm`lXctYwpjbPVZbU%_62Z)9JJD9q;JQu%Q2^ z9m2du1-S6GIx}s!WQ2SEGw$e^lxpTG8>mjHiO429=BUnQ7LB)%I>X4pPdpmgJi@vR zUWTyswJ8j}oL;T54>=zkaGgS}3l%5nQVoRC@ln~3`7r35`_}^5(O!xf>7^65QbHAq01Kpe3pQ1hiiUPI$Xt~p_ji6iKzl%G zeiNF@Y2(}N(XAhE_(7>;FqGI)s`kch5;KsjXTjq~Me241R0BvZ@ypKTKGq5Rry*DJ zGwCAF>WhglWp#c+x2*(jG&?oVb0}#cFr+WH!g$4>;S@DGTA9l_H@bE&rp1~BzQIk` zwZYZ8!W|_VV^Q}>qR8DxK44UQjK%uOqIg=^qyh9E+WC?vnqqUgXD>(Gq;4{2_ilPs zyJlued&K_Q&vyk~)2R@ZmyPxN)Lg`Lv}D5kWTHeBpvry)CklExIGwG6cQv}UH-_Kf zy^0ITMGow*uZA1leMo=&Ki@u;A-$)v=B^g#?9av#Jr2UX7n_ z=rQ~f9z<4Y&`O7Um$y-hFVVid`CyW1hEJQDPjaeG9rESPI;HdA(Vj%UA@K4+z@7T~ zW8vK~Qt>(>m~d|rl!E`m#Kd?E*U-|)=5??&`a-eaiDesY^LjbGH}B!>Y~GG0cV`x^ zQz*aj)cjakY6@|=4s3a+{tF9?ntfuwj%U#!3}rki0_}rqb+gE$_iDgr@vIO{iGL2iRz zuekS=(ed==@EUiddSTy=5V!P1^#b#4c_dOA^#^lE$&G{iEbZW>>ckR>BXZ`I(oP@F zwms>|(xqeK^#dM-svOj5> zm|>B`hv%-Mx((Ck-x~UCy9AK3anlXI(7las4epEW^=aL6`oI+VK96robw(5VyG3lW zdyW(S#8N}hDu3Zrx!-izATB8%j!B01Hc6XpaASk|5<0e>Z3*i~9D`u>;cui!R3=BJ zIlN@da4l~oLvr6tt`gzLaW-SLck59c+&aHaWfoXY)|h^LETmC!fL9y}%YOTmc`AtO zUxHY7LfgThe~QQxQKvm%rcY2SW)~hy;h??3tfx}zzihzRU-+^Y#^X~=9Lkpxomn*D z-D-SjCb~@$uZA8Qr+hWVqZ%`3fsI_Mki6#bLt>sq$l1*+CXOI90IDu8jP1RWV8pGE zE_CyE8`iM6Z|@y_)g_5w1}=F#ZxI7|r}hkBR%3wJ|dz6p+64lo^mu z{oFA2npMD>nz#=t!rPa=((QZ_hqT5_#b(!;ZpRFXf#xH_TL~WQgE_p+!1x#)k*sE| z=HW2Y!2VAD@`)Gq9)48TOMuT8?BQR~_~T!d8shJHl41o&{rD%Wtc@b_k}erNtmI#Z z74u7+lEGBQYYH{GI1FK*%?kFVidZ>V>uzV0tEmrKzFF3m?&eUl#AeH^_fRa;)GmNI zQm6T0jp~R%Y=!w0m~YVCIu?W6Vgre_s{p z>=|Kf?d?5`6Yw>hwI^6lRFnDBP5;8Hw98v(t%*)QYgnn<@EqE>%Z@N=1OX%X1kqyn zv(vZX4iBc=rBgjsI^LScKA9M->w|Q3_XSt3@G?$EWs>hIf(0IJ9>xxNN1;|z$9wU} z6%i_lS2;VKz-5OT4RFh}itKk_{2TR`G#!-_?%fnQ{AxzS(1CND(PtUi(!n{I#!{tr~e%d(WNvkhmt(2lk zCHKeE&o6C`G!)nUA>^$almjAY!|)JyL|lke)WgcBlM~PPu$d+Z?ip4rb7fzQMBDQs zmCp*>+f|l=(WO6ktJ)v*`UK5Apw?Yx3;^ViyH_}m^r zzV}l@;l@R!ih)-BK8`{+joe|dQ?0z?0x(C7{VCF07C4h#@4h+l3p(OPhMzO(8j)=M zylF?gIGs!L%O-WYH!}0VZyfnj#xrD)iB!J=#w2E*<-oe>ha#LyY^?CZ%ks0FM@i0*q0h&U~!%op{M1 zxDAg*{G^-*HDyiLJNzbUTNDEU$!ZO=IE$I93shKR#xtSGfkN3@M|sJME|b<6xu?eU z89LBH`Kumm#z|A0WSdXU%El&g7m}5W^dj+2cQ?r#&)I#KF?!R>Aq^Mw)IMEEQw3`U zhcNe~@LEq11L(L+ujKJYCHPufv>Mtk`5Q65io&D%FMYT6ZQqB7-4(*e{&IPymAY*@H*jIv6!5EA9kCyR(+< z@&oZ7bF`5xhT%kBn#(2dhNy2A!@&)yKvRfU;+vPGAwJy=N7nrL=Q>*o!r>gnxaeiWr5?{GZ+IkM1 z7H@nfUJ{_h0;r$T)pw*v)s+(59+Ldok-(aljKg9GvL(5Z&w`U{2N+vO*~ znF063k;cWWh_v*$UzCKzxkA9>;JtQyKKeSI3iJpWOx80V5mwp8$GV)(;rwK21{dP4 z&y(~q`pL4`gdLdDpXQd3^EA2-(gWA`nj@`kZiCv#SXqMJDP+QRznaf0DdmV&=Vh_Y z^z5XfsJ2BEc1FcyLTIGbDhZb{$uqvX2n|+s^POD@c**A1pP<<)NrtB79ufK3QbIrW zPC{EEAj^r^E}>qV5NrCQjzP{43BWuPEz73f$eimV2(kUbcCPrl!I2iT^^kSCzST0U zQU3eL;w|cQ@M>iJAIH>054%62@II0LFnWf9Yi+R=XpQHp35KCr zst5QG8put2LrtJR)fGm1L*1^fA}1>HwdEr+{&KI653lu5MnS6J%a4smN&dL!ygWT* zHNg+^xJ?cNAINlZ_|^Q&sFLYC59HKgnQtUjU?bA+&pPlinLwtm`-jHNm<4k?!fw6B zsq;{rVls^t+A}%v`8ZLYOMjUk*Hh!5xM}QJzow?u56TuV zbkLzz{p@HJQhFguCmLOuDKAVhazBa}FzMSo&-t+CJ8_C|$8K5aPkm`L5u;bgPXlEq ztgUIc@h+{Q;dKptiyL>;vUMMKQTZ$*sOR14LeUwuvB5=x>9n)9iMCdFpPN1D(Kn~KP1SLISL-8#i$y*6d zhU|t9S7(fXK)LzW?LIE~sR89QQtYg**%9PTy~b*WN&LD>w;)|q@R!||QJLxP_-w~T zM});4<68136hSH*Mux*xGMw-PvLfnZK$&<*(Zuce1VVnYK!u_eFpc%<`euF9NPAbY9~XeJy2JwR{)wq4qi$Iy0^xizq~A)g4FNIk2F=Sif!@v=O;hdXammvRZ^+ zaePB1UE5S>>HH%_?xgGcLX2+O5QD3YU!B^lty=S2 z_(*nB8=2P~)l!)yfJ*sT%ziX}gF*A`c;>v<`kPLgi{GcUkR4QG&_9E2_)R0EoC2dwsd@&J&Qbk3mFPaZs6Bl1bg+Cbs>o2efo@8Unfle@ z)Nu?{c_loarqXH3Lu3sG23*G^#uU_)mue1jwbLz?h^c$Vdq0J#NQt~9+(!qXESva= ziBJFjAYO*YB)%4-0s6xyiE6BwJZA^yp^>xY2lJwyK$cBiq6=%ry5!++YYP!G(H8u; z+n8ue>!~%kLxCT#7(DHJuGJBzGoS|(`I6BP<22`;&KZF7rFYJWV?WWk$95cAE@P32 zHw;JLc+RnTU^fVfL*Ee}fScCbV53w5-{!1$MlVyNq)wpYnse3$I+XW+JG z|CWX@m0A_4mDx01LWJv@zp#Go8OQu9gFh5b@mKBBEsvJ|Zlm41?vR#gUX_~Y09ik( zOev1R?>62T_e?E$ECR~136pwc*R+jscyw!lXpSiEUpRyuVXmL+X|2p2;F7hzpbf+M zma``Y*o9t4zN7s55m4bb8nh0D)YdZI|7EPY)4ePRDd#E?kR4n-rSWA6kAiXe0U&-2 zK6D*E6F+kOk)=vXn9w!!3{}YB%gk3Us-H}%Hj3`tP2q7(N+E^IOjLPuYoxzh3Lv29 z?KV5dV{(vfVVh8_c+cus zrgXeQ7nv+W1Ya94`VQ9@Ru^`y62=Z+%#H zU4kD>@w|#YJ`Sl0lCmJmK~pZD7-`93P#hToQ~b@Sh2Lw|uJMD{X3Lht-NSV%2&1PO z%B_g@j2XRr6#FZ=gDjdq!qk*;t_Wei(0&R+uJc7XGSu$?x-WZWNy#uY8P85DRtdTZ zQzPpbYBH>55Ur>YtMU-UZY5GIn_1r;bPvX^-Fv=rv_%uE?g;4w@*FrWp21%{FvpD|Ix;)iGt zucW!(Hg@TWc=(#ETl-N{U-7dLB`hj&lJM0V!R~7-d6g)Y?52n>sN?quO{&_TLrpl4 zx%)rs_Rqa>eK0p@q#=&}aBaO_Fw1A3O3k2ODL22b@oS-A#esFHE(y!2dE762ZCRu| z4SmLLM?22t&mtE~|j$DUsF~0ZdD6VtQ}Q&6eP$IaP1;h)wGF&zOLm zVWbZ)xY-i&jBk@U_^B-3C|X33AgP|+)F)yEDB75mfKajQI_5V3BPz!|teW*z`S>l0 zK++##L88}9F2tkXF^_NvBnqi5c|a3%bS%E$7pc+Lt1p;uOez_d+17bqTDMDpyV!IX ze)ZBfq#m?FgW>mYW=77ilr!+Z$Lh6*j$%YF$G5S4MA?w!w?)UniWPp*VPfWGXi32V zH1WAX*-EJLl|~|fK6zxhNRfYk{vO@h{$5R9 zBxG)7Gf|3GEh6k^YS=PoorH$(%~IptA@O{iHcrG11qXsyM^iM#JxU?8USYyTlDpIwaD&r5TQA!>L^T2~1e3O87KNjRx>;uEXup)_vHQki?u(* zc(kKQz7jy`mI7&6BMk>FBBd%14zv9{a~f9=^yEXR_Fo0!tj@#cy?LR?d3fz3+Jn`z zf;qAdSJq|~)@qaod_lb*OI;8;hw{%O^ZmeURcXYfyJYr(y+Z4UicWKmqh+5v4~-JR zL!EgsLFwnwVla`ql=)EFF0qWVpX_@rsBEhFH8D<+*1-U^x?&wtsbQ>5M~Phy)BHPr_6Qx|)}^{4X3DFW%(#7*P03UwaQq zMjdfKhI{uVts!FNDU8_9@=-G@2{_1Rn(Pg9DIRp= zau_J7f@Am$G@Ex-neEgkE2WHGadxe@;SRgakubQfi@QBSCyFwE!r2{E^(D*g2l9;a zp4Ock@wIN2o%jjE43^T!Nl!@%6GuRxi zWKoW)w@e*3c6;BV0+Re1LK$N9SbSH7bdA)sm0KJtU~iE#S7juhm6tGwX8gQYd%D0q zm*lSg?VBAWV?q{XTi_C&wQiT%Q_*>SAbicaVLM5~Q~fQ8p;(SD{~$`!+X#CB7hjW8 z6gY;E6LzP>UqcPb3ZoBi2JEBkn~%E(TrwALPE?Eg*R4Gg>xrdm;uQ*w%Z@aROBXQ4 zKSiee8i~3h@|CGlI5U!^i2RP^5SrtW{BD`^n{{?0k|-jt-fTSD?7664d&Dm+Pq*9M z)zK*S#d-ZNgTlv#W$wfE#Vcb$SY+92t9HEX#erz5$jUFDtmS;%BHRXW~=kAzth3bSjAb~JSrSlPud{tDLS#bME6ClVvyVgNcbm3- ztvYMTRa5)0OHU|xc#`{Zc31ErQZ)a{hZH2(QPtwn)rOwX_qq8~Q-cO(BNcE2Pn{pD z>gjkqJDvfQ+!HKC0C|bbBlb(>i3h?ZYb)&-sc?tsZreDxj$&&QOfUs@*d^DA>*~CF zW$vIm70-KdQ=djLt=-ngn;zzcab2Tv&mgARf2AomgvUYo%T9(omFqjw_nzZU`lg!q zX>WGEB6RR`mY(^7a>Wfh-}|4CFYs;Dk|(C8@R0J#vnaZJ6%dL`$oyO>_tbFmDsQoF%{uy;b!TfdmGf{zJtYXa_%O2^U{!0nWR7XGQn$) zMxe=9bK1K~c(1|L6JYNcZDBXf_(Q`1cPacsuN7KQOXp92;iq$_#Sg}T630f3pB!NM8J%sKa#wz&;3&lV>%gJi&!zu41^4X<-&S#?9a4|`s!`94s z>XbW97(!$-l$kvkM+BSKSk%|(^mBmi29Th8^#)P%*d9nQI1He8-@=d|l@O%cBb7%P zQE_JZ)pk^mvco8_9vZ8ENMxg@pXkwPEvsx6f#57EnRtYK^oX1FvJUtWH|Q*=5FgC~ z8PgRc-}Ydm&U6Ql?=0$j>Nbx1gyg0FRw7h8*V%n184%v zh14r26If)ratA{;%M3xoZF#|Nix&~1$72K_l0c_-%{1LBicIDqgr{% zB(NfTk>^=|nsU3B^r`Th7^ffB9+a92wRnZ;J#HC5)4W#-4r01lISM*Z#a7A@c+?5z z&sTmEQsedd+dzB)78rI+!k``~rM00Cp833eM9f|JK{HiTP~c{KPqOg(;jClTx%}5z zf%W6hwuhO>(O@aL)y&6rEE0`=!$Yd~2A&E{vu;q<(OuRbH7Up6iPL>X;MUCxkFC)7 z0M-7|gLV(In&q!ue~zPEN?tf)Dl5Bd(38^|wtui9vEoTgBvrZ93NX$xTB`W8#85Gk zsH!krCMOxh6P0bDKV&7_K4NDOH!$Awbay9LBmwUT<@9~ji$$!j7%k#tljrP}alcy6 z?X~qMEO-9|aW%5Xd1O7KRCuK}bvV>hJey@C|42;K=(;n}^kz1AaB_egK2bFz&1p9b zgG-e4!uquM_Uci}+?V9w@bPy`lqZNT+cg4iL)AOU`Jb!^r+`8+*Zp59HTEphnRYJc zoN0|%cV=PNZ71EIJqB#BqDpo&2rXu~4sXt+1+nI0sMn*hz`X&|ZxS}cZjFcYop*hI z1%>Th+H&>XRbGUAbT3g2<1A9Zz^KI$%F)fXgl`CRO02fuZ8U#;Ib#1ZGSS3gi~as+ zd9k6IYLvkmFg8ys!>y%~scBarcrjb7MUI6Vo^)o`OS{e3NHT<;ykBxI|&ILfaZHyD!ww~B_a$?)IabnxHZQHhO z+jicmx;3Bf{D`i-yQ|kq*Pc`KlJ4XXA2NDAM&TwP9s!H4@vQy$Vc_TGzjZv_4Q5gf74k#HR<>UDf!e$_O5 z>LZ65_-?!DR}{Wkx{pBRzcm-0DxfP_^4eVWmNP;U>03S1em1L%kJA_l-u5JwR91Py zkEt%?1H0QObVdgIJbDk*Tl{Wq@YKs@YLS%VW8)E^%*!8Rcu+7AGtN}QoexvC5u^Gb&PwUW%Sle4rUOYXYGVuF|7xs&wmi%RGfd|S~CwMI~%`WVBO!`4{@!X zKCe~{G~*!*IAQhkWm`Xs2Ao<`5lK~TVb0GG(|%!9-GXkq`FUPYa(SSNB=M!rS)#!8 zGPp1`#bhJ4VCup*qhAz*Q`0*JP9`_WS(a{emEAfpF~qoEwsJ%&}?t2SouB^bc9if3jN|DhTH7MW&5u&P z!2kF11@xGredL@hqttf$C@$vh(&WKQ!3Pyr5h$*WmPv(4&*(#=9suhg7sU0YHTQG9 z^@nCa>WcF&U%y{`>0%eOu@+`})kX)V2hxFHM@LelYR~BgWsU8aBI@oUj1^lVa+O|n z8i?waTjwiagYLLYG)+YO_IiV-s2eH)8EnEsXH5}d!uFrcbVGj z1?&wZM#E;9&MNozja@W63D)}F_+Is8y1@DN0h?x9G4__v@^sp^Bq_{{?QEGu2&1kf zreCI8&BC@LbnILl>A{NxZe;uJ6Y%z`yw%r(ChG9$~GX<}em!h3UzVynXGUv?tYD2wdW zKCe;p8sbi@nxQ(@nvKU9%PNX4!wW0fj-nu(wZYX`{`}vk8$-t17){J(mk$c=z=6%r zrdu*A2i0T+qTA&~lhq0zE+HI~J^YUxg=%C>?SK!MZi=b&1#64NH))Y;>EE;Pa&d^b z+P14NG!_W|<={04iw&eVoJV@Q8`w;4*E*_&1uAS?SOoR)NODOsjghRt* zZVE1AWv5IXi!nGdCvi^*<5wi;hqL-1Tu7Ov0SYI1Ql$uSbm1|}1&F2+K=S>o7|!(v z##);7ce0=Jys~21P>h*P&z&K6Q^DQXO7(9HW$Ogn#iEx$!3Jz`%YQ1?{`s$LIHq6( zJ}4e#sW`iA8{MC6*?TO(tPbk-nw<+!>T1l8^zHBFl&l{|&Gek=N8k=;_Xaf;E1e!w z{_WcSq?Mu9QK~A5QqFVAE!nH}XTmRLprLA8-U0@H+jva;w*uYcWUo;C-2Fufh^@Xy zsSm94?3?dEHGXbgl0!vXsf1_K$$G!Bhav9x+w^^sKtmys?VMv2TgbUZH}CQ!h)_IknOtFWk;+;g z2x3ay>FQ$TjdS>vh!c)I5EW}WpzbJF{ zGaiSlK0vcz9EleX!BTOAGkEy+@WWMR8Puw+JSfY^=B(TN4rorN>_($xNhQCg z(9E=>jttx4=~TCs3@_ZJy4B7@6H4vy+%ITa!M2Gj{l3lI0#1bPgOPEln+ktRd*N3< z8HEqhuK1<#m;(dM!3y3<_g+1ewpLf ztH>tok$`A;{vsf+>g}eQ)^&T30DpU@O0-;#9h_|nfAr=!go&!E`$H<(nSB4_0w2*A zxDr9hbWRrz6?9PB<7%pCaNFby#}JzTKI2)iN&^`da?+FP{8zZG^KfgJkrhzY(cQdr z`}?#3xNL>=$C=*05Wd+=Ryl<2B%L6XMiFf&BMs*ce;I)Ka`MGS_!L_j?Ji%WG0%@y zNf8qlaj=>0fA)_^y{T}pu3aK|cz0yZSI3a4s+gi#_ZfSgMAR9IdNH*0Y#L)zOTV@4 z7L}RAj~B~jJRD*Wiri@x3S@5Gt!(U7O+Z!Dq}QZxDb#7( zosMGpHL?|3n?q>ad&&^z<#4ZzDI4t;Z7{=$=Vd~5$kGuh`9k22e;>C7Zv4gQ0O1h` zzszE^B3{@p<`!*2_0oynVUYkvZ?I(<*@yJ3t3;SZGWhqhlmN(We(wsb_I)#wIM083xGJ7+m1-(UslG1Kz?~fim$DljE%Gk+wI# zma%1#^=E&ceRN3|MCWAXsCMJ|3QUO<6AhxZ*5l* z{&FLqikMNmiq6`nS4Y7JmFEa?32i@1&<0c7oZ>@J1VkfC>$(ag|7l)BZh1zl-@O;D ze6Vq;bh1ny5XZ`vS{~wUQ?hRv>Sw(JC#1flB!fVH6Wfj+H}6~}C=OvGe&LeHRxjUa z*j?QFxR_!ymTef+rf~c@fk5?%Z|PD2Qom2p6I{zm$+vn_eNupKNHz@Ji+C{TD$*$A1 z=}spT5d>*d`HjzhL`4FXblELez9F`Feksl^f0Hq^Mcj0OMm+Vvm1M{}fHw=i;CC`C zu<)dF*@`+C1V0NJPZ=OkpS&(z+)k~k+6to@m49n!IA?&n-^KXw`(aqUjOg49x^Xobct0_xYuzd?~=rmIh-Gbtlu_<|XX9qg~6}pRkn)G!#~QNfFi6i3m9vY8^TRa`e;MM z>>C8f?b;tQ;&4wa(n+>g1u@8W9&=(yfb}l0Rq=typwXfnFYTsz{3leSW^B|TA$W05 zRaCI9lw*fzqR5mj<{2)bgxQ;r0zBd1uo)an4~=@E;(oOWI+OU&1Y9g9S%f&Q*MnI; zXhbGQrSk%+miPZmsKuwCZ&7@`xv8wjsCcZ68F;m%It+9UA=)`QYqcIv(b5z_l^Wf& z!tfGE2pv@5W5CQBZkV$Wf34{8$!ekPSv*g;oa@vg5d8uU4)szZ=%lL7o~ z1T{qyUS`W}ZIZ~K=OxSOz{q0bjA**#wv2H_iLE9=FOv_cr?8H~2;O=z@}X%#Qn69> zNqDMGBt-L0C7VW=?O1+8l$e+%*7m``BrlK)PlU}ODq>`mtEvIADu56piKC4e`ge2L z0)1Pm;0=Swj9l-H4&`kI=Nl}eCh!70aBZpC731fcq3|+CXb|$I*6>L$z=<`&sdAv zKl5oLm@3f22;|mX9r)$jRui|teR7bTaqYeXJoxkEjDe>V1I)^N;2=XHhf#K4pKh|f z1yNn!W2Sd-=J@mqPf>8rz zNT~upU_b7hG2JE<6!5205y#fU1opfeh9_?_b}DL;oVk_Yf6XoG4M){4hU8MA57+fx z3YzQE%;Z-+0-pVtE(X@^^>snIwaB(#u#9)NZQF~0 zPtj(x9Vl#%oZ2{SB_cU_+tBIYGil}et3DgMQIL(uL|hw#85@D)u$-Ez3rf4V`E`4U zcjMUW)#M>cATgx`eU{O7{_Kzixo)Hg5IPoXP0L+_l~_A&5um(u*&7o8&tZ(j43$}l zoNvF?Bm9U;Yz`%CU2=@z zrlmXdu-@mo7*Be%vKXl8d$;)l8?YqLr`BM^MHJONx6jbBVCg#@{6!!kc`#2J(dRbu zrJb>#u@JP640X2QW38fXlg67Z-7B=ELH8F7R~cc1qiWZ6>-%=sg?0x29hOBx+_Or& z&VszM9r61U?R;@fCEpyp_XP#HMeGjdh&v_ipI)Edir~j*PgHK5rXw@T>bNuJ$5c|= zqM^UYx6H*dqH~|lmev!kE5Cl{SAV7yQPa$(Ua_7jm81H!RMXgWJWM|%~ML* zb)Xv2p~7Fr4OZ_*l0>DD1G9XaB_YV(o}2p;8&0GdmpqAV2RPsf1?EL^G=j!LZ#T{!?$m|A#rFm1ieYvU=CPKPz;6X(2}wt zH3#TP#JqRkevu`9NgqCNpnS|RsL7bklDs~wkUai3=CbnX9?)HHSlxNgs$zdiUkLMF zmU(A2#dLFl=u0z}6c47(*0n5^qTeka=@@Uw{$f3dio8w1Sf11T!3U~U+MoO4VH>O} zEwRLaAo#E-r$uXLw^sM_Nh0cCpcH?}p_$MNNclvgSrWwaJH$E|-p5@!8gZSNYuf#H z-OIH6OKJCpKN$=W!KSm&TdHzRP_N6I<3g9@>?psQ%PP2g=Y{v^w7!ux4T;1fWN~N5 z$yMK8nvjA88EIQwrKLB^MTAI{ugMgJhNqIY+@Hm{xMNt&=r4>pHQd;mN!O{H|1iqs zWEUMRl$pE=yo*W)@SGKkJYnp6gDg%>nD5dMO`w%1KVsOzv zD?2PhmDj%6aSx+ZQKP?4;T~@g*?X9Wysh&6XTPmK>4~TG(Qm4vOp$E&opCN8D=~QF z|FH{;u_Gg{T$v#*)DM3dwAl>gcdMETnGW1?#{8+41Hb=iE_{RUz^qoU4%6qEEUFpC zWod~}Z!YYyPRumqBU;a5N95@CsxEOxm0wdLfF5NDf2N#drMilfS|9IJ{MX6>Yxiu> zIU=%JB|F4xP_s-q3N3xYyXAlRSI3D^=8F<)K8Zf9uS6JeL~reZ^PH)eHkHR;I5gug z!G(H4Gg`UJs0mxpO6<7;)gX~i#4ZQ!ygz3O>`y|3>mZ*Kf>Ww{Tf6$qpV>P%s&iF{KGjMfoU z;%(x3gb>*9nForOjBPm$_lz(}flMXBXDO#$+`mt#)@SV??)!BQKpYw<>$%*DPfPwu z-5;vXnv?Ky{FH`;5s{|-+ED8(--nv1AO-0oIb)h1*U}E-=#a@OwT-hTapG}faXZR0 zPsY6X(wp~*(nOP?t-?7;tsY2SPFMUWhvmcfdGCPs#oo-km*FGIS5lQx!m~2a=H*WP zH~kJFF1;;K!J||q|H0C!2ccpfjU#(paThiTam$u>EL-}Zc_j;1h~MM{RL-NvS35L< zqf6%etfq3vmSl(v-23qI*!{1HCeL?ML$<4#5p-e-<{;w4BMqv>l@HwNeW^30u$+nZ z^9H!He7`rLycNt_i?UjTOw25Xg#}f$C#B_ApI1h4I4QwPJCVIoR{Y~P_GiqfkI$G1 z-&97|pDq`4K};^@)fF_g2F}akn8MZKtUoU4Lr_Ks{+*Lr;jxV2-BCYF3FoOmsD1$u zf2ss0E4qJae_Sc__6mkhS1}338VOZ8<22nQJn)kp5n77vn!W1e57SMKt#OW zeN@#Yv6gUKIBy{4$54@*$9t;1q+FWRVu6PU#In{)`+6h1!FC!Gb=sgQ>TwP6C1E5w zP*kqg@)y8MGJO+WN;V8T8wPlVhETb1P5=BtAL?4nqI0#i%6Ke(&-!|_XQKRJBP*x2 zF4mBxUBQZA)hl}Be;n{DgBYZUNC^N9P4hhfxIUAwkSNq8)>Jb2^&#Ils2Gq$3a);M zgIM0M7?fZ+seVi8f}XpL=zr4)!pi8=&98Z!fr7hAML`KonyW-VNhrAda-;6?R73P2 zP^9}Uelf9fsL&il^HbM_k&jS*Msn{=Dqd6cH8jZbUyJ)0ays_xjQu_uI`9_0SJoM1 zZGcGWj(dFiy-6H|f|INETO!Zc>Z@)PNzf07RtVh>rr>o76I^yHjYfXsfd6!@|Nk3n z`5k2DodqG_ZtvgyC?QxSCjbhBf8y zD!WTxouOUDF14e#d64p z;OikY^2DkG2~X0y!a2X4*j;FF1zGR0qsS1u!i|no)&9>Om?Aq94ojdbA{ovg(-7%g z4(2zEa9{Di|GwX2Dfhfp_THVIpOXd}aYr6P6SSgDdd;TMu#wodT#$hz_St+3Aa9Le z&HKnjqaRYA5Z_@-w*`luo{~tGI~S=&MgI^Ty?ezzQ&tbW`-ToDq#R2P1W!3`slfbf z+*AwVtz|GxMkMfvgBjR)B3LpkS~UcpCTo9(fQsJRB+3fRns!P9*`lI=bWWU_Jh(-% zgv(-4#Txoz0a*{z<%r@9pvr&qc49hcKHux_LXi`9%ks(BhZFOzYVfi-*R*^^pMxU@ zDM=Hsts7Q8@*bBMQ18;UCEP1cr+7eh?szt=Hp{v;>_U;B)6-MPaNc-{182=VwLX={;LUL)i> zQDW2mCyQ81ZnI7+8<0c`jf3g0$#Q?6uG`3pu~f&;ACpBsW}D>XZd8mGc`37cc+?|u>8gQc19L|D|{nM5{eg6m|qQD(u>RiLbW#MtndYO zM1k|!>J8z(9xK^=YtXcqREi(Fh+7Sd+c%#gAbD(VCiZ05X7mq%u!#6N;VM!)V9EDn zXMCtBRzISbcyERewvU^%7vft+o-tu8#+~cjrY1vkNpm2mz=j7|_g<4mHrMf>05FG%!oQ2GJFH6bT*pXthsb!T zW~?i%N*agg)-%VO&5x?f=Am56AgGBLwuZQWV#m)gx}oL${SU0s^+u4cMPBs~PP95q zy#sJ2P1FV&Zfx7e8{2lWvHixjZF6JWPByl=8yg$j#?ANt|Gjm)rl!uB)6=J?yJxC; z`t(tKArO4%dwp$9lt8xGyJ-c`_#H_9;17x6j9>7w4i~Jv@kHG(H}0Z z83AeACHOV6w)sT)rkelnV8+J}&x%_bdTzs_9uip}EUa6iN0H&zJ5kZ{RGFn|LG!C_ z2SI4%FKOb#3Mz04Rk$b{u(G`!KTGr^)KLH`r3*Tv$(kgg0|@9{D^o#r!rczh=^M5I zIVrgAVjbt1gFEQH#g_=9p1gR@drz$&JU$O5{ZM@?4~w%@+z+c_r$%w4^~Jk1>%+uO z0#vyEF6j{~#vj%Eca5rPCBM>d=GP-v?387<7K%)2M!zh@SA+ZqmlqPB*p@kUb;_y2 zF~vn_4LGS96K6|X_5Kn)diN5KWRQpFnDp_kY;jycf77@P2*G3; z@DC&Xc*tikt$7Ady(|uFX+?^7QJt`u9a_}pe1OVOr5Q%J!+_!oJZ};}xt2LR^F73) z!H~0F;Wx~-_w3e9E|GA+{IwX%n4bv6_~8+&D$UGGmRU=&xBn!UwO#NK9zhO@dKP{I zV}R#D7NMv}{0|al?`gBxtV=?#e>?fgUXc~0hu#IEwL8pG{hPR~unptVZ*rvkp^v#} zfGLNFS05xj&E%fS6%R)^>|w z$Wr=1ZL@s;l})5xh&XdX?=U%m#W?t;lg^9K`A;!BRl2n@^2eFr_~%nl4P3R$C~hQQ zV&nN$uVbprAI)|kPQ@M2J0R|_#IR)xv1pC^O&*AxMl}2FJ5ljlSWd+{FuAJ5A06!6DDb+G zTHegT3(wE~2E4F%SYZBEgW#?5zrTJ{>`Ty1cA^-vn=2h7&4(doQa(^u)?DH9Ol14M z%xs+nE(AUG#V3K@tSa5Y7Fqs5t6(AFEJiAZ-udCbsbEG)&z8Ot#P!U0I!3;`}2J}<%5-W-JEy}~L2|VcR{e0;j zf0xo{HTL@Q+LV<&9EW>ul@lmJk@hKz z%A;uNKdF8SQg>_(cI#ZH9N4u$a?HH4d)3R}{-gU*SC0Z_K=>$ay~EF;I{2q((X z)F4+Me%2RkMoMWNtp5mEDnyj>QVKnR`*?d)_)A`{0B=(!0(KF^Rs9{TeDfF4O1{;Q z6sDUqtKTeh^vibd5k*fRQ3Lf5#bY@)97Noy_f?Gu0WqO%G%4TCEEV6*UXvbnOt*Zo z$TOXx{zRxQ7DtBf^#tCgDvM0CW!fnqEqMBrJf|Ro4mrR)6<4`fQ;eVWEi2qOejbxQ z(Ya$5$=&W|KyyV7dzlfY_9Nzyllon0q(t5%g^m;}6%P4h`lJJm0of%n7y!Rw7k@3_ z+q<*ZK6RpIh+F1_Nln94snX#q=U1ViXCD_WE5ZwXS0B&jWo6CQ!E=*4_2@;f!{jco9qn?)JOxd*jPz~0#W76-Ks`!YRP3XJD z+tF-D(&tAh(G|?fUp!&*MK719|A09tJ&&$*( zej$9Vm~8x2(^weNR^L5i5^xWb)z3(Gy?cH9SGOPam2q|qYbS1<*x@yFTa&tMSV;$M+NqFUHocwh*iU2k>mkY4Sv~hsV7W#9;nvKzJUigAWI&k< z_r;e{`KLdCdHR;%F9!^7V1J?5Q1kJpG#T3$BC%uj`O}sJpM|#T7WjI>HwUg7RX^+k z1xf@;ia`E^HVf)Nbe_PtZ?hGL-c)yZ4NMS}40yscla_03p)dz!L|M@q%vr#_Sxy0` z%*?Y=vS7qOpUq+C(W4B+9Mcn`f3?#|pht+_1w2y*Ru;m6yZpBJ6X$t{6t{zx>Hg$U z+XMR|ihp12qMGb|X)7<~K2&v;ZC64Usr^V`aJh3Xq@FBR*#=>L4&FNw-!VdM?2Y(WPk=Uk53MqJ# zjZpo-h(|Nfg#**=hR2I=42>9eZ-y~Ioy(ZNI-lOR&kcwpeZoCHUabTqWV?SY{60X$m&#uFC+#5`{OQ`Acys;@ace!%1ye+a zIq?#t^VWla<$&s}Xu+g`?LHuhAcHL<;ZY2eOZ6_FL4=kjfhD84co&YrF5~LYFxj#Z zJ6tal>a++S9)MROA|cewhX2Z9b9O}~d#aM>B}zzg$ZnA#7Y@`*Z$?ySVab!q6LZtj z4-@}Ec!TZ`8<`N@7MnlY+TSpSHB@85qB8nNM`_&)_8=xR^wl7drC#P;Kyn!N#~ki|f1S*J*Xr&9kAUmiX83 z%W!(IcnFU*DTFJ9t*!?P;p7^fn)!@{UJJS%#n**ibk=j)kk=|+zSmxTs41c_vV~Bp zeuT&|1}i`3+V%}`Qq**>MeE;%Wj&Lj_ihZ6@D)!ykuFIZuEsb1T7 zXRT9eUwd8t%C5`rMblfDll-RYjk8(Ay0w0qB)lsFLv3=)f%dY{8fo@?D?R{({^)uD znVgGWq!S9U|Jm}olDP7o)gpSGZj?#BHpq1)zR3Sv4DAgURPz`-`6|S{Oe#XaK1xR$ z1+1|*v3GiY5idMFQzcG7b|sXzj#jnN?ZXZ8n1J^g#};#qP&dr0v#<})6OTF>Q9D9b zapY%S9FCl3K~I$xz>B5Q5+``im5o{T0uA1YRvRq#UTzXz4M0aRl{tp29RMk1mX7!x z4^rBvwoTzR_QY*iecG+@;6m?Tg1B`qN^EANK<`cDi2M(W*}^&J?d9a0Ma_Du*k?ri zJG3w|WSLDPr`%st#`4WO(4GDz3yMBmA_xty=#D0xt^t~V@mkD$J{jAW1}C)`mKR(wpbE%vvfec3+S?% z*1Erawfo+j)&cwE#7miWUhX%^tUQjb^-xHWmAR68V+(h#Ho1)Xi&wz z!|$!4V<9J}@h=K2vx6{*(c~sO+o&wLN3~rC0_h2~FRMt2K>&Fh;>~X7N~A8kT<6zm zzZ55>+%*<-203RQL8C ziYP3U>arAXx#MU0w3WaK?yMIC(JBR<%fELOL!B8h36f!J$?zsu$r8`16j{l1)@nh8 zfiB$(5Hvhc!c{rI_&j)5yU$;?17!sxB<25bXD)aeGkMJ7FVA_;G{f>h=5pt@B>qAf zu*lxyCO0@16tqaHGZSX1W{OV4G6ERUyMN@K_s`!CM6*dSN>6rP7}e^p6~Z`d;FrF2 zHI_otxhDuT&GEr4B6ONM?vF=Bq>?9sHn`p);y&;pmNWqXK3pOKkaC;Kr0#6lih+zJ zRocNlY3RZqFFc3dL*3dY7Eyl8ORs$!_=;LyETr^@s*2aZ*pHW#F&_)$cyhUHSP68s_WUBDfrt7PWH917_`LCcr z!~iez$|tBRC|}Su@`nN7@PlyBttl=ZBTKJuO4qu89>o@mQrC@Mf=V(75_?znE$DUh zLv>c!pwo!mChyS>QN|%9Ql-1sM*Cyny0)DGw)qp47x?Gx6aV_Ei`*eyIsJr2j#21D zWG)5>Yo=>MDj1Y?N|qi$cpy(|UZ)D->;pGk7683BvY)HW5)s(6M2s|DCAZ@vt+flc zU4#3w0Kd`pPq!9imuXScIWZ917r8bR3m1m+6w>!JY7nW?V0_SaOgKPuOxNLvJvuku zwuzG~;redZ;(<7{+&uQyMIa8Vs7IPg{Ad!%mgmBNL5jAR#@~{D8MKc{-ou9aiBiwN zM!8kAkodQUQ%(7b*DJ2Sj)rRO06XRNz^6VJU+YDB6*~~xzGnj%y56&(6~~mDU*N1r zBf*XD9^$?SYte@k&(pK!M<@9(>0atZ)kgWM z30fXA>jn%4Q%mEc(+vd)Zf@h#H=Sz&gMYF@%+7@93pb+O-~1R(8KCs$i>k@ zR@l`3^;iqepEL49fXun2s$N_YAtMFvxBG;R_k!L5m+Qa|S~szS8g_)364BxDMr}(F zjwH}#Sa4DNWZY_Lx-=8Y0j1EHpT|B*+)s!E_8YsI&LUB>wCuC;5PL9{yUw_*%%{f! zS$MbW4^a3A?UDMWnU%}A=xxR()aI<4$*CY@AK;nnVN^A4c~%Q8z<()HF|a|e;&@=% ztgce|HkSBu1sdxTFEwNcrP%sQ><0o__h*uZ5xmc-z`x7*7@R|@eb~_JT?)Y@2t+wY ziJB7+;$p{3uiNqSFV8G1E-?=z^WkJ~Rhj~+`|)iB3XrNoZ{EGQ8z;UV&BWW&{*2j( z&_p=O&1gnN12i8M4o(bNw2wHVB>p<&HS@YHi-?vOaibKoD;Ogc5DQmU0kak6vFt4o zOd3dTzvd==x28WRx02v?p6D0fRHsCI?O@$<|HAczQVZgmXSNB<_obY%AY>bu_$J?~ z-%BwYVE$dSGG@8|S#g)T_ftc^-Z9!~i#PE~(DOz(gd_d9RMvAHkPi08m$#YJWXqQ` z9vfa4chNA)EA83x{GeBAJ+D{g354X7XfM4hzza=Gh{0X(&nc0Bq`0&j zCOIjuV!9e6(KuU`NtC9cnG#jPmu3SHH+T4@%CcZLGa6r-@2T`x{8W&0tCPo?fI*-f zGh|Y|A$x#ckXvBG{86&keO{~17D*ABk#0f@L5V{7NG%6efLsxR6jBd^@`&kdp9k!T zX*a?=%mzY*PGlS*mZZIEfbg?q`IC&vauM$_Z97CSW)5IsrT*lIF86v7HO^&<3r@3sLF0ZHd16r$P=2qjHxbOwUyB z!3%F1O4?a8ziPmU3+KQv82kc(ddLcL50;5AvfV|kiJ-}mq!5@=JMQD$3^aT?csv?A z%4&G|y#^)O0kR%SB;CZpOm1!dV2#HdHvcMrGtjH0^YHMet=y&L`Pnq{WsD z6&K^U5KQGaa#1uI@P%&s16`A#mja<%dzj=3OEYc+Xd(abb-(a}?2Miyu3;H@R7a z*mMI9vrH;RCN`Pe?^l-=QasaK|Baf9?c1q}G9mwFi&-BScOk67!4nttv#Am2`b#hV z-3_In%r?_Ou2k}nu%)a#x@-CfULk3fgj5^Re}y!qQZjS2J3B!XTN~?>`lxft-^3Fe zaxf!0=`~Oxk-LBN?}wgG3HP(w$NDzd87(M7>K!?o?Q`mW+aOLl=IA>}OykxPx%N=B zjIpp6cy@k5qaq6SK1S1DH)dxwU*4m=q^dd_T>44lWiEHnm?l#Nc#~xzcM)E$JNiEw z=lzmW%hDxUUu%yjhv=PJ=nelu+7*=Q2MO{$X9vRHC)jES4Aw4)Aq7VSv<99)B+jPJ zu#BQf?CSip@04+qJc5*8(0U{g4wcx;e}tg17Y=O+`1(_hY`y3|x`GRF(2j}D_O*UC zG)sM=YL$tMv%ZGUgJkOd7@6rpHHQ<13VKC|dHv05hFVOo`IvB95DD)NB00N@r7u-1 zYPNKxc*UBc5KRU!Hj%KK{0SOw;Gs3w-ZiBR+X8=iEk_3uwrjx8sMe=D;ZPs(Vz-zwS7Az` zD+elE!>x!{+$P9Wt3~_U4k7~f$IQMN#sP_2ACXjq{TAiP%_iPk|i9ibT1eZSn)Eet)x#Tp&$dAXXTtg}F4+xElyMpwe6ZL=q zIZ)`BrT~utrj;~U5?zljo9Pr%Va*r1Ld%@n%d-QqMLFD?JnPz_OeTHBg+KW}AXKmW zw=SMLOdY7vD}9zFXrf1L<a(iSp#lR~D0UXJa83vd3D?6h0j!`&hDWNXiI6S4fPrkL}P2Cb3NyFYfyFpC|U zE)2Go@@jZviEK@k$;o_t=Dm1+)$qx)xN67YXP3*{KL%|IUt(Xwo3xJdy5(1>rb2Xs zw2v11NWsHo9FLe81RPGL7IzJb7qcY0nA^*=*-?c&*~kapj+zxD}H3?IAA zbUNUe8>p#}QWIr%N5W~0XLvwzVAkv;W{CHyhDh4IFFTLRfuz?l{-A`k1NQ7q_)R#m zx=@13CTq}Fn$5WObFkM%{1cOxy9T3?0p`scf`@P4!>TcDCfq>AG=TfZ2}s2y#u{zF z`Ln`Q?6Az!oh)Z-bJDEEpuB(@z53IQV{pDo92{Vch zp2QmDi8I<>s#6CsdB3{`(LPdG{M)-d%ujzfWH&uEu|Gm#w|CvP9eXgThE}yPXe}v5 zpb9OwZM?)vg|&VQaIr7^`kib_2j{zU(QH_3s+%jmR{;Y5>^$!bm@g31f<^oZnNGMV zs?TyY=-AmyKe?Mt81T<7f->ZGJ4#dz=Yc61zyb`j)%iN~}Q95f7mL^NfPw zmbQ?3Mj4}%g&h>(xSj2Gzd?G?PZl~hZNvYuIfAYF?q(TL{MzI%3iXUck}z>=$l7o1RB5Hn+@oihl9CwxNlS8Uw`$7aC|e5COWRb}$j z*rk+>l#D$zM|1Zjr2H8)cG-+vC4`E^z)ePaFde}jZWHlRl-4$4Kb*(*PD`a zyw}@?Vz1VpVA8>~TsF&=B;8^AR=UZxQyp2dgSzfiX$5zB zO)Q=&@nf?u&(JR#cF6`D2P#h-b?C|r%1W;Q)ABVqc(_>dnTVh$82eZw$9g5Xu*@+p z@xvUXQTo7l|G%vXxtt1FHs|JphUmLjtcx;K7!qofMi-l`SxKN2e>u8E7ye?be&|)> zXLispA50efz!c^TG91#-&E$rOW%ZiZI2qBI%8hzD5x}M*9A_@hU=W>9{H@=`a$l8? zd`tkwGMS>~^kFi%0&r4TP)c`tMSZbr{&k!tW^s25U*hkWhj~c4n0!ft z;TKP(-hpYKncH$mFJM@yPX4{30@&jbXIjCCjS3=bLkttF0YMnOi9!&&7yW@ zoIM`@XLS5iL_6QS3@wOGmeLz(enk2=EXKpZ_}WSP{@C^^wc9#sQTlI>fA1(p;y}5P zLwkCwkP*y@w!JPQ$;r9RxH~guWX5L;FHUO02FyfhS7X01;OXM-ufK#$(xMnDwsvR3 z_XN1OK2LR9q1xIcQP5@%d0?Kr$Jv~sBfw)gLRZYEZF!GN1gkpGY!PW9O{x(94%$yV z@`+>JSi1e=taoG^bG!alSH$~jAtx5gehV}mMKaYW(aec>c7CP!H9NibHzdc58zx7qYuI z9L0P$zAjNV=iwe&p;8*m2#k5Ao{LoagA*rx1kQ3I^Hx&^3a2TdzZFlV^|non8d13l zT2b9Kwf}gEkfYrmkwNd3Q++kKzfg`|pvMlrGWpBPpHMV;$HT=~i_GsH$>QyL26suk zhO-QXzqP|9IYKjw#~pVU&tR0UL|~ze=emxQ!d-$#AGrHck7aMcJeb(7OkT^yXZW)7 zI1C#>0JnQ6-y>lsM;3Ph3-&;bA@f>}yKwX~%w$x%gHZ@bb2AMh z7xEvZaF|8N?+>!C-YEJEpH z^&FITOa@2#+XugK&Iqs1Vpy4kJePs@y?nghXYabnc-+}!e6Gvn7{S;=d76rg4i{>9 zs0zEe<;|`+vnG4eIT+5JrL~n4rxZs0&8yX~p|AUkrf~w5k~9$Nm+LTrK)P_j#l+$X z0#C(Ghy6XvpT2^NLOY@TK0{@XhTl6;@G2fR=^ATJXB*+;*BD+%8(UT1#h7wolZwtO z>gSS6#3}}!2dxOa9Ck>YNMh8NNxv{%E+FHjLqklIP88=lEwV)a>KjeF=9Nsdgg(rg z7&qj=%&`c?8}6m~mPp*wMb6Pw@1{Snn6fprdWrMAq#$8KLZlERDw|?>+W$$BqmK7- zb&2M-Ix)*Z{c4CALS>LVLTDSpuEIkP`vzK+Y^bKx?=txX3TwdPth89wSym54B{qehcy>`oXPW&9Jf1`ko13oet zjAb_$$kC(znop$)#G=JN4TUVkoBYHJ7PgGJd)Ok+D27=r}VLm5Os6C7KkAIAg2 z)Ph4fXyEV=&Zlu3U^(7N+cCcPf+T?2f1(Uz{5t2n59p`;7m13wE4X#;WwegClV0)+ zIW#B3;%BhSv52{SG|h7-{qD;h45RT9*ZU~BdHu7HXVD?c?CT6wstziGp;ri5c0>q$ zZeq(d8W5342^lh)Z-k4!%Vh2R`sV>jc7k~|@`@likUycCH*deb~>b0o#c%(M4nUX$0*u=X5Z`(RPg2`L*1+NjwYVHUH9OEH- zNT^}JSsZ}!n_ftd>Q)UC`EOLn`y!|vfhdNc9*l0oRPKos~G`r!$ z4we7+Op1Dgxey^thmL2X{^|jzvl3lnWcnt$Uj6dvEvf}+5fB9%PG#4_>@gr_lH|#u z*meMAG7T6UuiBKVbbZ*_dM`C|6RMSCvAi)&P+uu{n6|k!Wfn){1+RsCzO9gK?&ykN zN~oqV{MAA$SdqH{p#uxFGj(=x0vg%=C)pcY!E&$@GZFtM@bSR{R6QMl!~h9eTnWPs-X-NVMp{9S~Fk&_D0g_s&33=jc` z0>l8~011F3KnfrYkO9a6$O&KqFtN9>xBEWZ z+8O~&0YJe2R0aU_FtIVR1(*TM%q#)_bGQKk<^T(Tg{OlB&<~ z9S~sWYHJL1a<(+L1K0!Xzb`ld9E_ZRb~ZpWm;aTV{(DM*1JKFR-W1>naCEhI`Ii5? z<^*sCI0J1h|FgHD%JKi}Rym-Zxr+reF$*&bJ1pRTH5OtPR&G|-|5s)G zuZ{oLLd?R>!pcGXU;6f7Y4csv?=fLx#>A>1*qmEATtJxqFf-NnB`3l1f*|D3+X$0}DF`uw zbS85PZLOT}Kk)|z2BPJn6C*-^5^UvAf{?if(9ex44R0cjLFY5Q!%?sB@chBW;pyoN zBxZ#}PPpY4Us;;j>>rrgI8j6}(_Rxy{WOL916m9pTVta~P(u$AMvNAKoy|KDRM9KfCtV4M|l~9kv7^ z4RWF_1j)xm5YkoCI+!)UAU@9N#X%5KIN1CuoAutwbJ6;VA&emp`BUOm&>DmZOg8{A z4%2__6N$6Y-7KKDqPr}UXJzeu5k$!7a^G}RT=DAkRL1=9{8-HV%uvGI`~yo6!x>I8 zpsFSqW9MTY)B@q%nrVhO`N@$d1N=*EXSNo^ls+bqSaK(ioO#`!+jdLxCHLOW{S~R< zNto-S!t$feA%G0|*}!%Rf#7Q*^`}w-=nA%WKz0DJXu^$mesV!_6ItI5VEqj+7xHcZ z5eQ;r4izrQZ~ArN^sJcWuY%R+YmgDwQr1>*_H~&~GPe5XA5qiG6%cPd2%iBz2mebA z1)8a){>P`$)wB65y^#N^DdYk9uvn^^1#l}AsxY<1HxDslW_2tK?G zh=c+r$Y7_GxG?2FD+k9_HNoyzjN;IquYy7=;^qBUU44yEYc28Y7ZIWgTtehY-f9-t z`k$&G`tzK?B+@UePrIP6b+aEXU`!wZk0AM34cs4jVg4rv@fkPqFTTiIxo)j)AS$BP zlKE|+G}j;CTl0VBAfX+c9DaCtf6m`*L+2(E7#bU$-!ncu{R8{baFtG|U{6U+WQiuy72dk(PKYp zK)EO=Iq|C3dG!=OoOcE%G&wf&D%j($g{u=75Elo6`KwpA?@9O_>EF}I*DhTCa)Me? z3TDBlKk!Z?n|f?zX$Z{phoLbL9Giy=yJL{G%Q`SJ6=P@onb8IRIrcI}$M;>(k^FYY$>B`V*>w736sMQ$h~JE3y{_EP3bm=SLYQy$3 zEaqzC^JmmePS(Y@iM#XLMEgZN@w~Nsa~0ZE#*@Jd^3~A%w5Cw|MgKH2^FUq)Ueo^N?f*G2p@7(`F60WA{M7bDMA8_hfQV_g27eWv zceDBVd3W{Y@>KW~}8zPCH zg2tf8AV&3@SlyxDaZO-|D>aD-2bzJLpkJZ-lxYo2+3&CQC45Noxyfcw$yi*`s7TpQ zV$q&_%x%ot37uyaa&!9>#&S8tvusq=fV}#wc%Jui5ky@{EUnZm-{5K?HdR=`&_jGW-QJBMX)|&}7Lu_4%jM zFK-$y_N_SV9A3=!4NTl#FAJ+5Nq&Q`zBo;Dc%w+kUNydnB7T46#?ul^$uV6g3hJNk zsAA>!u~k(q-KXX&8B(t~I{#HZh@Vl1b&yvefP8vk_*-OC+Z@_GjR$A_abBxN9Biab z?<{EAm7k~QJMkbgRQy6AJ6|dEE{~FK@FYXAdxztB=8l`2`)#EYP+qMHvUV0aku#7u zBAp#%W+DNH`Bh5h$PSLXk0-5XFbyLs#s9n)hs>Dr)#CrG)^PcP>#E8Vdfx?J$|{`2 zY^QuV;lBODaHMpOz%3QkKW=}vTuSGH^s~vaYk256vQt8(nzfzEL#ElZj1Mw%O5Mb0)N8JX`PAdClOy#q#G0+3$mRm&dnKY z58bRDNtw^{g4xm_Gd2k4sQ0esU+y;d#<6RiMOHIrneQcIiOGYGpvLIwIey^mr{gBz zVfIQ#tU3neDs}-uEMogZy)vg&q4lSI5dClOufGBTPLtOmmrAZR)7LOQ%^1e%}v_9BS*`zjO-J+ z@z*26uY`2?@BE~PDu#Nt3ga!ex*vt5IspZANJ2EH(KA9L^sD74JxU~%XMqrRBzr@+ z^8JU69Y@{`?VvNn$51CrnA+z*>0guZQ4gx_VAyJEj{+cgWJ`SO|YH}4L#PmYpYIoI_d zB(GX2T49 z4xSj~aNPB)qeVJ6+Ox|ljfkPb`@A8qaW*q|Xhl%OGI@1tx4A_1h|Gy=}XWVIkQ;cr!J{SOx16LKIzDvg&LA)OoA1z z7LN|#piMk-f;it^uSOHoTFXQkeG>v3_PydyJj;_^2*|2Uc4em&b zQ7Xbq22%MN#eqn2vq2d)jQ1_k14Tubey%KRW^7}v8Ahhc>HuUDRU>03Y0ZQ1%) zp1{xPky+|(n~;9U>$5$oS2r6-bETzMYhNfI>>lM=`HMtK*a!Fd6ax>NP?yEC8*Xxl zEkyjXB)@(!L~HGHk?64I5u0|k9!OlSbmCI+!y}f<AI$5-4Lhq9(SdTG@?Y*SKY z;YZ-P7b%vgLBs%(T-;=I%{@s)yII0g3Vs%Qm~2NCp|lor71#N=Y56bswI~GP6_2ikl@j)2g(>yCBLs<>93KSr>bnd7H=S=NO&|s5wZK5cXK2;v`gtt`h{x zV65;N9ZuXPdWYj{dYDJ5B1GE^d(9FgDok7%#gO9N(=u7ry8zZ>lNaA_q$qTYy`Ah) z_M!H^LtCYk*#(!j0O@JYJ-w@~9CY*{mtchNo;LzKzGIh~kx#&}gkGL4=r?`|U%KEg zj=$G!K>P*u$sd{i8#Cn3YpwO3o4%gIx*_i9{%lIaeg5`keIu;9i+=6fKM0|o$AsG@ z8mEshe*Y46PIP}*8Ph=j-I34b+Zs3R+&FcB6HZo6h`!Tb4ZuN7<4U#xa{M7eK58Vu zl5&h2tzFD{D0-Ekjo!X&IvTIfhJvX%)R6D+&X_wDyZo)_L-7bEbPL4JFi?SS^He7EXTyln^bX)lK*6ai ztr4$a@CS!-+>68@@jqT}=lgT~`0J~ER~aT%LJpH zw6}w7a7rA#SMymHzQd1UQ=PCaYL>|vr(HUgF?JP6FxTk?)B9pi7mPs;_pc5TcHku& zMt2mQcl^#Lr1FdizTEKz9<+6dE;G34X>R`{#hyr6;ADX&hKRJHWi>{)4jaYTx<;u=!Mxp?;om>@ zND1hrd_IPMNM8+ss*h z4~J+PUn0dgTG--aq-kQlq)m@EC35`w!rpI#dJvRBDuf79ijt)&FXnmDjk(VqexFVzR05HJ z=(2^=t&4Z8a{7MB9vgO$UV&DxD{%|mf53zVz-EooA)h4%CoiT%pap#Z2 z7NX4eO10&4vWBOpgix`=jdMDaoIrh z1MsIv7!uX6REpR`i8IS6$t!8h7|FK@lxYYwph>>4NJRvbM7*!)RZjB)$r@tCPy8#c zf6I0x7xqRo%A=Zijx|!D+P{6}oViz!+Ay|0r&Chjj+-w>y~o>%7!XG6vftpz*@FKS zo4^Zuy@}ckvtetrDP+Sr*bkQ=1fC6Gn)vo+e<7DWh3_;7%hZBHO*DTd~4m_xFM z3u(FK1)Z_ElGm4>x}`rv^}V3b2P1MZT#suUZKMCQK3NtMVKXJjwfyt%+bd06Z(L^= zaK`@5?qgnQER6()?^R?Eg|3tV=kby_V@o^$^^1{QTps`x992C82Srz)>a1v)7H~4t zU$|eC5UY@N~(34zOf3x2m1k^ zY4+Qs-|`$y($*_2@57rO&;8vaRv0Y7`F$H#$pA*5vMPoL8Q8Qn!!f)U#QO>xa)@8# zjUJo^w!|RSgArA2U==}=V-TJl(yCS$p?K^KfJz_Jv$raZhL(C$4aLFf`w62m z6X%L%2xU$V8eJd9isIpvdNFFSV9}xalA7PJ>AyFA>-lqPqqgpGcjQU!QWzoc9ueOd zJ1*hlByE0qe)&GGhP#9ECUuU01d;mZc+f)D76QJy@+gXf28<^Wl~w&A%B$ififCk& zz(}A`7`JUrFk7rxNgh;f1xclZ*ip1Vv*YTCcp+49f%DWq9DI5TED02HqxW+A`IkW@!uVKGOU@Y88mu@by_?ZuAsUWN|TVopIxEEXiZ z7U1E?xC>Q$j>5RGouzhY>uD=r?g~*RdhcU5N8_YIJ}DrOll&q^)GQRakL9#ocH#k3 zo!@k8fNna)^C%82Kug?c zi(9Qk1mh}HZQ+zaGUWe!NoUwS)IruV)9%#T!AgfTpF-;Sg+6*F%PHLG&(4dR&8NWP zau~$wqC%1^2Q?WLSCkpSVb~s%pkHoyy!T@7W3r&0AR4&Z^As( zD4U8)@SI`4dSTKVQKwhq@@O*1*@?>4KwVyRooMrEQ@~?S|03uD&1~wJKKff@X1>7Z#C{8KC%ylwC;p=A!n#=U47i-M}v7TyO|+aHKsdH zynF_&;8zbJMI64xl!?50Hcpg|iMhbd9hWi;5j6?KK|{vC{Vg=~!Ee&R(WS7eL}Q(- zhx*5cP^I3;%qkY6YCk%t(d2(Q`9k>PE7;9Rb+vE%#zT*jc=rtP)GM&P)IYG3wrGO> zqr`V==!pW75gQVkx4;Tkc`C7@d1CObu%Q~M4Kg`P>#$eg%AMUMdYBeR*gBjdqBRRQ z^Wc?lCu0$Lb?)%~sf8HbUcSt~r$g7czSA9)O>w9`T4jd>18H;E4O7s4w7G)mn78k( zJ$8~|KOLFrUSrv$r%I51Z(ELv3*>lYh##5sk?;CYAeLb^apK z(YgdGe)7sZ_hp=frg?q5Lc`wr)M`)!*dQ++A;Xs2-#J5w?Sx#|Zo|Kh3E>FOe}j1U zg%Xa$7~E2FiLkC$M%N7s{rXg{Kr|(BZVZUPPurwhZ)c<)1}9+>H*=?Jas%QMh5Ol* zF!7Cg!emDMD`k~Z>#Y>g9l9$z9>Pxp%_8uK@rL8YWqr2(CoBlZAZ+j7DF~hzb zlFmOR24g$u;9`{gSxM57dH|FrRk{QtkuyzQVRT02tbWXh0K2_He;78R8`TJ$C%@770$N5O=5GA7jNfV?k4 z3?brB?a(gq$oI~b!xm?5Ct`%KOb_Q{BJk{!^bn${OT;V<*D*>UZsZ3cVQMB4*X>T$ zg$F{@$WTGIu$BL&9{dwh%!^SU;mku1o7lNVH#gLunC-Wb5iDk*C^CNH%$vp`m_mec zIU^bp^DaRqhZ0HYi5SsFMR?w*mf(jQ)oVQ`Bxi=(86)w=IMHLA_?i*d6$PFw!l*{ z{8r4{g*JqB7f5C6zhf^-$78puZP>@K2haw zikkg+A@>FlOJiL;n-MB{VYSi=8&_k~u{Ifd34kJEG({$Q&L^Gvxdg>W*S3Y<$Q%q> zq4;2Pcidd&ZC)iGl+YOBb}wP0(T=H=xZdLD;WtU9BXkeF{x=N-%iz(^7vrBh#}IXS zeDLA8u2>82pu9X-JjIx@N&>X)1%jyI#-oWYYg-2MB+~46I3;LIaLXz*43@E;wzq@| z<#4w_>l{yt@9X1Hfi*?<8&C^yC85=45Dd zK7N&N)f5D)hT&%7|i4NxP+EUDx^mrBfJGIOCxBx(Pb6d$GFH;b?q8O7u| zOs!|ssWgR`NbO_kncSx7E%)H`%o^LV5kq z>2KV0feRy8P}^P%Z)Mun^257m8v+vo9UT)X`%a)PkMV5V{4`;!#Qfn z{{vY-roSAGU+YH-UL|rE|Bd?qe_}vha6y99<1-5GN5@YLx=wGRk1nK$Yx{<;{XJ|a zU)NaNv?W%)}u<=mI z%DZ%|GV*4QFCsZhB7M-#pgpVef@UFiQYREJO$;_UEl1%D1Q{iU(L04dpF(6qprf(# z%v4U)z^Z0y$suchItI2RE5(d}yBTckelGDZvZpueS=`L^o8Ktn%H5OSe{2*HW=Bfn zsWkpA+pf=UHwbaCvCwsJ0-@bk*vb9tp;e{FU_*4>?E8%*_*XNkoBl-bOw{N-N#zeC zX&2v0kqAYd?W1m*Xn!YupYWZw*O(k7LM>j>iG+9(@fVnYjN5E;_B;`NWVDAopjVTO#e_6cK8TS7=1(-CNX3_PLeB z!N&Vli-|8+>oD9N)khoEiu1&Z&iVGgXneo-zd4te)%I6vjraI6mnd=0k+L?{raVKR z9CGG&sO-fbLuL_rOD~Nb527dzP-mhazRj@GGlNvJ8LEg0YG{`%ceu$WL%u66abZ+F zd}j1&^4%nl_f@Ws{E&!Hy!s+LYOX*JHf9OkE#{5MFu+d{ex6Dh#MD^It)_9Z95CM# zRshBDg*4<^_(X*af*8GTJIPO!X-DzPfZaWn17=weUY2S7$kEXB zyx*R2_QVmSg_*;Wf1RidXihOdIohAz#EB`sI*)PfiL7iaE?Su|E42Tf#kuGC7(n-E zTB-+L5~}f%I*<@qU$?$hI2EI=N*b=Y!uI8DCw(r1w^Xk-p%PkM3i{C8<|n^jJ)d}# zR!4zZO1qICc!U}8Rl$+@AR`FQfYgXKN@_Ol%j6MI?85I~Dpj(P&X~zQXzJVZX<(_r3hbYeNk3kAA{< z4BB4b>#KL8wi#!?SdRE&&6r(?#63bt6@ujvT*dj}dCAO?d<)1IE zNRo~B!P7T#k1^(UztUzzU1P2XHyih<9D#cWu75!rpR5txscc9cvkj|k89}!Hy$+qsz_ih+E6-B!0n>NAJ?h3cusm zI~k@Mib7hu&a>4)%^feick~2O9;Cc904iAVUSPNIdC;zXX@*)w#r&!CgStxR-EUgnJ81Rhrw~U<`JSlw{Y#he z&5L5y&|yvDZp_os8yk5(wH(N;%9M>q%wLnJbR|-9Mp?TI4APQ$z^jfB${Zkq`x7lb zmSp7@SwjqT_?WlrcbG7PXf)q|U&R+PH`BK6Xv|C)Dl!bNW097r!-#_(d(5B}$Mlda zsChCs<%7S_RJNW8&z&)9;A{XBr3}>TSP5<&vunA8@?JlK57DfBW~sIz^8m}7&d0+1 z=KSnpg4LarIPZjWaXLE=^F1x=jBohN_X=jrjS!I7m^ptVSVR->HmEc=Rz_x|D~*6P z0H3Kv1}LnC?cfK_RNkI=5tc||%VooLSq9YJMLVyp?-|1F&mwdzTeQN|9J4@Xw-KPe zgdI9GWInLoMkCT*!j{wpUk4FVpo^=?5anh37@y%Zxzn&uOz2Kd935MQsE~&r79 zv#Uwge~NQE0F#u+NX~D5g2a5e(T7I0Zuqgy;LwYi*P%ST83B3uYZBF*`y}m@+KWgo zaU0XS<2gR80LQr8(@)@5@QwE8VHIIIgTl99HjA5vb?gW{3EPIAIYMX??ql5~gMbZ8 z<4Xm%l{Z80w~==T5n5(RqkMbKL@wMs#aH(L3`h~LBuLWTen=#n z!CUla%#}(gDjhmlM*iD;-&9c!5JqXf&Yt zxUSgo4w_S0YGr~6;o%9)18so$xB!c+MAlRM%<+6YZY`LYV5i2?a|g$@f-z^s;zz5)|8L>f!hxXhxNEn-Z? zW}*Zu;l#WveDHW6jmc-r6j#(_~;-b8!|YryaQ7DR2D=-&j3kI6175UZW|C6s=2D{4Mu%ur*)-G zN~r&Bul4NCqXfiv8t*ZkH^hOfMUn|}K^cS%I0l@4#FfqK=Ct}&9V9mMvfyLFD0J&5 zEY?FN$hxDh*1#LLtMv9$>Jud(16#t8e4VFK9HEcPjCWmxqKK8XHfoYv$+#3V>Z$`Y zh;zy2d0_LUC64|Z9XBVbl3 z$%HLdW_2cd!wocJfA+l--KW15&!=@h*6T@v0>G@{aQJ2Sg^}^I8fRlRExEse6-pm) z=$RnwP25!~aD;4-81#SGUr}{y7$|Q;4U=J1R*~q1UrldHXu}T*(l%s_usUh`Fnixk z3{OUcxDCz`RF*ruSYu@X2zb=Vq)iR_;7h+Gg3d@YzPxReTI|9Z2|t=Alm4AkG42wp zw;;Z(2!mc$U65r9E?fgjkaL_KEYhJ>MIc_%BL8|Cym)D(Y$Glvu2gw8T^|A-VdOqJ zORDqc)$5B+-ng%xlFrS-P1Geh#e}N~z_;>ZSs$o6cM>R7a`M%(r>|dTiI8}lhR`t1 zeQMt#8PlCBuz6ze;HSu7ibNh0Qb|iZ8NFLFuxY+JLrpPXU{(O4O?ZU~pRZ{XL9m;K z*;s2M1{!L|Borr6)ZxLALKZ8L5x#MXhmMNfd}w`CC)cV!LfV~!e+h}G3$pp(NAuhC zMf)(>ZvSoA97~;1ZqAQd#C3evd+ZgFFlD)O;@IVDN0#|FvpgZ!3m@dBY!^=IpeaE# zVf_lEJ-#*76^)%&zm8&Pkss3@r?ok|@`bAbu(8DPs9f{C^FGNmYHPjXWi-`Xi|DH> z&>6hG+>0FKyRkS6UgvvT&dc-Q#uU*OZ*3^iZZ1fdGFo#WdU|BbJ z>xv!bmo?K61oz9DFk9C_IWjz+MhD7q9A*`QdRV|1f)DG(67%>2VG8k|WVy`0_ve)E zY<>PN|E0Mmx6WgSixL(+TayHJd-ORG-oY|PqLRR71C>(o|!-G*5^{- zz&06_mdF)_%-jA6EWLgoSg6uK6YlIp@7&SUJ$`}4_UvBbGib5A&hC<Twa8zi*?CAeOrRg1{H8Q#uw9ul*YxIRccyqPuaw9B4ST=@B3sw)HU~Fhd4c7NlMlv?0yG$|IFU zc2jdCt0*b(zsOsLFUt_3pjK-pwfa|k+Cn1Z-fi4$=6!cCkAnivV2-|_OqpqDS&i+| z7OKayDkdTAhecF5FzvHgYQIJouExeIvf>1bNEiA>#jYci6Cr0UJr`^hXV1@=>O~;c zo`3~ii*kqG$(=oZu~+tUzU*6#DNv6!w$c}DFP~MxPs-5~?~3h8q$j>0zofP% z6t5*VCTsHxSI1hOIr!;vBKTXB6WVt1ipqeiU?U;N{)$>IfbDa*-wIyeOpjSu5+YrE zG5F&Wnq0Du@N>SGNHq)iagoHWXiL6h-gD*sjf%4Se8h33&jN^-Od zk~pd{dI3JE8+<8>Lub)S*emg>|D+K&NV5tPz7MO!Y;YcgP2M?s!3zj!jxCQ7y1#z( zH3Xw8a5@n05@X{dX6Qaax-l3Kr}Nh62^EX@O?ggCm8lmpM)IlM`$4U!3(MY6@5DS` zQJqa-T>ap&jwoUMC<^m@%%cPeF2bZi&6gq}tt8j|cNzg63Yn|4!mbEW@20pXTB_I+ zroqlp--N6XIEOqyq%!^ox#lX)6cdNvj|(4M-Xg-g&1o`gX%?%B9?zQfa0it!NrQD8 zQ|p;4Xsn;=c#qd820v!Bjp0|7g{CJ-sZHocX?nA?eQ7sr7EtA&j!>CnJSDWZ$lpe4 ztpXG#7d~q0>SQ72Pkwl9Oltf^qdH&^!dhxBn&S1;b?A>g!XDH-;K*g-4e+3mF>R`| zivwCp6E!yc6hpu_94oB`@sq2cX<0$pOlG&-kC~1;4gGMXsrbAysok20_|?d$!pLtb z&CA==@hHg%O{13{$BF@)4JH)<%EZS-i?NLJ%K8SY*%6C31*>a@lu+&f!+7A)_iJFd zPB>d2kJ7+M29jRFib3ll>J{Gp$#1J#k;Ei=%;8*hjW-4wR1Do^d2eruYfB`T_&NSs zPV~Aw+M`)b@cr|Su-!-?gUhIL`4X>wD`aoVVawdQ(&)jl4VI>_89NE(CRAs51b%{j z6Sq&~mA>2XQN`$Kt$rx0ko*wL`T;Ub*AdUqSqwOY8q}oZT|1vZ`GU{aK2G1y6o^22!!YxX^=Ozw^KQvVt341Eh%kk*MAxr zdj9UO%iN>sW1s;s2PALzV%MP14i~u3)bVB7wkW0U9K`1cAM5q79zuki{w-Kl;TNxG zQz0L`vK?bVg$_Nk7)zL~UE1EQC%(gG>S$e}JgI)<;6L@E)aH;lq}JXU=(zuJwUoZMzg!3UyvxSwfekc@h3}q3_EB9q&)-Df z3=OmPkx0kl8D>oP7LMC%)fdNa^Zo;pcfpP-h+lQ|`bIF*?TxdQ(#VOTuf)+j{Ru^; z;@c-ZMdoZfridv7&kDJYwaGztf^hIdE5EDhL1$R!8nBADvt(gY6wg*{a&SevOt z?7&oU>E|eb{}o1_ps1Vpz!_XOHc5AQ1y4P8aj&S_vN)(?nZqM+`kV9dH7H_NvrW-?@jF`_Xnxz zMh_BykC2Cn#zKt7dg<2FeeS;58 zvmR+Waqiml>^AUv76s{ap;`594SM1ps}WE0S098yJ`ebKf&V~VfH@n-gJmK!g`@Zs zh*#}0ScR&a0{W#cBH;C3X{I{?{kGIm%HM%FLD zipFd(RZa3c93nx3Yrlqj)(@($YCmiP%p!!zPpu2(8cWLR`c#80a@et$Bw$j13^LFX zQPx8Ku!`&a{*xtm!+pqen$*Th+jpw?@V;}XL>P5p2Qw85Cqj&j@9k+9@u%p_6~(B4 z5znPlr8Vzmwbzo27o@SUAUdB;Q5bdl>IKMj7BaQp_9PQ@9cCK)>zsorpbTv3jeZ7P zAeSLr)=rX$lyh_Pr9{p9JDzb<=0jFUTv3dKhNrK*RnzBGv-g)QBSKE_B~_q0Gfl~2 zOs-{M%c@rJH(dwCYKn-jZ=&4#Prc!ZCmJ%0h*B$}j6nePiPmgCCQa$`Sh939BGQ_b zXYMSUkLcIXZ-+CAH4q^tNM--f7M|_JAWKN!}_Azj~kb$nr_`M6x(aH=w%uk+z zQ!}jUu%aKty2xg04SJ2SV@6^XAb!&Fd9oObFyEUpfyvC>N5lfmET%g-jOn$&bdRX4 z`fFP;f+t2gRed55mdZ<4n!iGWW76$9exy(A=(c;H z94$T*Zp>Lt2zzL^mXivowLb)hGT?Q(&zNrHc4 zddo*l8GU(BX*$=AF#YH?RALyetuRX4su_ZHBfnii6E&BrKDN=R3cNbL>^1mxOjaFn zfjkIN%#Oh_(M|Yd=h3V0mD7J&jFwhU#N;I}(E9Bvs(&!aK6=z~YxcFpe}*xYiLn~Va>b#6leRYF%X z7!_t+*(5Bj(x$Ot?B67+D|@Wk`13L&jbD9;N}ERN9hlmpJ*hp>^lDKK=@>t+$y7vw zOipeU7mMh;ca`+5e$_=o?W*>$cv&sd18F$5K9pVUPWjnGWSE|38fkiMV8J9AcHF_V z8AR-6N4O3M1@Ydd)VRs<7`P-sf`0a!v|sY2WsQiZU3h)8=;$9j#x#UB0NbJIo4l3i zF7|*m>t0}17sJSoldYTS#|_&^TJl+O2{{Pz^3T;zpGT_$~Qvd^q~7fs0y%_J4qHD|MCBZW)adjqjeigEJF&&GK& z6)T=Ok^YARIeze}X8okTHJ6wic@W~P4Q#bCap)g1>J|gR*4LvP?5%H6^j(pqJ+Ut& zu3+nh7fpZoa%iVASP4jD?(3TGB~>G;*t&|NMmjHkID4jRnl=+^{1Aw7l$lSEi(vkn zd~SS>PTJ6Jv+tMCB-^M3w$xDI!F+F`2bTOaeh3~0k+p-Auy(qkGx`&lx6K$;svOkB zoS>kERdsCiVIYYq+@2IcV!b2fN(97AnAPEW59MDe^je;s*?fOhI zI~xWrO~U*8w_%}+WrcZUaK&-w+_~DHeZ;Vrk(YQ$iPS<@{d75%F?x!}2lw%9Y*q!d z8IojXV#AfsxG+PtI^k=Qf}+1~;KG_pYw~(;T104H$Je@w5bGd1ba*z{ifE%Ol0n+D z8MNb*a#QVKC@92B`-u+$TT4d1);YvQc>XKYvscydXT49ZbssAg6bUL>gYP`t=TZV zYBXtw@Vz>{QrG3aEHAXgjp_&L#dFkIn71>VvE`A%VFy{o5oS+^E&jrWys-A}}c;*FHI)7>M@lAHu}RS6$b zGK#S=4r1L}k?vwNoR15+?{!*kII?eA)PwX1*rA3XK#pMA^b1KA|;?$Y-6B!QG(0vwYX$;QBr!}v84)=4~Lf=2FQgDVm;*V^qau6kjOLb<9b=0YZZ38Aoe zG?gM`Ber%F%wwI(tzz})x=Ym&?jY)AbgtXZ%$>?X17PSFIY-q(TK0kR zK-P&buP|z^gIHKA1R-$z0z^XC`bdh*)Vq_{CiIg1)&R#Lrw{DkZKyd!yk_=HIb0*E zy6Rq#_5CyzjVuW7%Cw(bY7YmTjGK2r;ugQAM$SKIb=06FjxfG{q$TYrXxMibl(We6 zCkrkPN9ud~;4W|}40om`QOv+N&~C2rd_N)pw7$UE0c zkS?MZpm@B%|5AN^z7uKbd^$G~3sWmJP>2F;@H<$_AewQo7$>GSXxkQ-vx3J;I7Kn( zM=`(qqM8qp7Z@(RggB2<|((A?ilM%84i^k^2U|j ztE^DG&MBR?WkINGka74!Hqx)AUq#PWbJ2?839f6)wr@KDwhYo4A$s4O4X?_tPO5ZJ zyQ+LN_%nOf>rS|%OGhl%s%DXeo#4-jiiJT82Ug8iQM#r`So0h)!P+4XXA^*9q)#RDHcL*-9uSz?xaK&AU;Zx-&6@ zKYm_fp}l*HMn&ub)Qf5uX5)iIQUsE*Y9T3$;Vx&koFtZr=7q*JEr7~S0Q}Zj`ilt1l=6-shSgjp(i12TC#gcQzI9vE?{gAl{mc-rqZ2p8yXtrw4oPwRIR;QPi z+7@G{yDW)E8jH|ae0xleB*KglZK-wglIKpZx)M>2dNP%1o-hdVt&yv=`_B_X1HZc5 z*1xEwlzT5*)ry-E+bT0SD(2xodiNidHVZ5s@O=;(yMK%1t+8nHnY&k{b1WJ zzLQYg_I8)LI87eeRD9>+xfAY?eY8JNO|0suiL{~J!!h9#;s=dMcn&k z&`IsmbBg@9AinmG$WnPH=q#CMG!QrxIsV^3rl5_Ke%bjMqBB1Vz=U^K?_#DfsZ@K~;ADopj<4dokoz9)yM-1Le<+7Bw*nhr z{g#kLXxRq#8dk6YbTLA%I`t{&6k72vmFy4N<}oZf;ftmIU+J($@S_plR&JcQ>yPY) z6+ZK7U#=Y#-4Q099%Y_}*LIO^7dH?>lBnpsG6mP1FMHjXp`O5e`K>f=`4bhpbkJGe zB=iERQe$BuX1vy=+W#yxe&aP?$m;w>V`Req`qZYnd~uoydBaL(j%MoXQKGpoa*YGUq;8znh&b z66{ezwjCy1fZ<{Cgwj+xUQbS{|HT~Sz1t_?(V@@x&Rgr7XuGGhzVpDl-m2P<3ARMm zOAaHmg!{3%y?wgDgvSwcAzbj*0zxs4%7A$V6mO8E8(;2-#$2*=gQE-j*^k|yHJi2y zSu^IoS>NIomZV{d$@E;NXjaE*Fi#V9AAE;nVmC^iPS+00Ra8y|$TgLBNqQ=Lmuu<= zDJQ6*(RX~6^G*gxZB>+slCd5=k~B&fmOiO9vsfF)P|Hy@dV|eYBvmHYnt#T;|3Ob) zBYGcHIJzRkOKj;q{^Pqq)~Jq>OD19x|YMaDk4Mq6=-_G+9jxp@v^Jm|HC1+ zfD#)7peSmKm*Lbcb}O2{2?l*!ke_Oaau<>5waE2_?#xl3Z0hDDIF!RVbfQMnmoI&Z zY3*$uN*GGJzfg($oR!kW_oC(={#KYeL!)YYq41IToXmJyJfh0sE6zdOq_$%4Vaozr5B@JYD7V64CFedByg-Cr2mGo-3`B z%Yp^?c!OOV;~^G-U!qzYMwH6ODirJkWxldCcdK3L7y7tbUKUNan|u$ad$fEyhOtYo zX6s#S^Ufz3Kzef;N}Il10H7VddMl;yMpex@EQUEQG=6{%` zwfNqtI{N>$aZl}a016g{W81c!9b?C~&BnHE+qP{qY3wv<*x2?C8_!(KnXB_B*0XND z*G+N)Oo42Qi9okY@6g&n9E650U=O}q2AV^NfhRe+JA{h7R84+IHyRW}FLs_>+-&I< z9HoK)xkW1W(6#;r9zJk5N(;NDi9*iU;CT|XDGzQOji!?Tw}UgS3InZ#Bx1#72`WMV z)P%@TE2Pl8I9omc?rIl^tSo`4Op>r~i7A04jQh&NnGwU+_Ie%YpB_vNu*FG%#s(F{ zzh!8@KflI$|Ji9$^* zFqt2lSMbcoN#b~P-))%6=dV82aWk6{{N8qxQaZq4>O-+DxzLa?S&csbecw2a zOB4Im?Yg>-kJ;E0)}za62fvQZtur55jwN#Vq|wq4$nV1-7y<>X6I!tO!)H82%vrnR zGc@J?lvVf&|F<>mX3+#76Mop%M8!IqqWG5^G5^ncw*<{|F3Q=EqobTbdnzj?J9ll! z3~=w+{ILwEfsay)fywMHk<3hC0y8`i-u6z4ONM#0w{o-=0Z@ z8}&z#e&+6il`+zM)nry5xDgC^Jywe+ZX znbo9R3~KX(+XJ@WdH#BOD>WPMqHI*s=WK^AE)il+I^p%9+8PUO5l3*-2_judLh)!6 z&1HPAt6(X5SGgIz5RRE4NkrbWhygD20X>4<7O3@sst6h>wX^J|^Xl>X_fA{p}%c)jDkNL2#_ z)xrwM9Fun_OYWjb6_)M@zK6IB_s#=C{g)9)z{psFHfhUQSWde$vfzm@7AP!1q;-ARZh}IKiR!c?QFyG%7>+@$Zt$ zASgLL0}bI8hm_%T2mPY8o%uX+Xze|3rQ>G?*ziniskK4%JW(H(wRtx*oJl<;C_*dL zqC5Idi#(A5;Ccgd-khg5A&l!0<&Y7stL}pWOT!*1@em^kKp7XU6^u79#={Rpf}|Af zfsVpjDIopZax2uzVl#2{0=Z3A1|{)5N(9p9XZY8jUZ%oR^D8t=K?%9YTYp}Y1S(rX zai3A6Ul|LH#Y|`%>psF@WBt%wG7D-@X;xbENlA62dM8!gJHkdWAJ;rbdU{sr2FUd# zDEH-d>c5Bxd^YRz+j8xO)vJvTad#K*ykqN2w-Vn$&&=h~WI|^}S0l=M+jZnxn(IZE zm5Zjds+M=z6nxgoU1%XS*XZ7hGms)qyIK|7dq+wX(wf!UqOa} z52+!!MP}$G=jMJt=Nans9bA0cOfl&%jvJd6Yk<41vA@rFHA2C04)0|h-(i2l;@$m zxGGzBUL-zr#>#(Y@QF32lc)O(;l|(+ph;x-9J##ka~z)!0?3nlkJ&-QKlYm)w>1TN zoKjAi%nq{)VoS-h$~vM65&kkL%7-fXhC>0SrIJDyO{9E*-P1geK*2}<$sZi7g*PKR zc5&fxi?tn2q?Og_f)`vO>sfdix}$^DRqaA>dZC)!RX#| zs8ceaJW0$3SD)+ecYfXYTk193OhU7wJf|=RGFZhFoGBJsjd#pXo9kk&E(O^KPAlP5 zC54^m-5nGH-&d~1xi^dc46ilz;Gfc>m03p`Cs=8598@P>(F?cng)$Yg*=tzqaug*m zOm@<+;3FZ2T*KT7AD8vpWOF{rc&QBxZEA@@5kwxAy&*WVc-FB2JWO$_W7S9ouDo^8 zLU2MAIl5(Fs3uzESsUury${)8jWn3TSVE;Jrm7s1LF>+M)pTK2pH_^A$(bNUDVD0J z*t7{C9(8E8#=n~J;2PW3Il)qEpBx#j(ndHjd9@Mth|Wz5WAk;&?LdiM13YU85}W6( z`_`ZblB)E+INhPR*!Ja6DgL_&nV_Y}NrFaoszR1@V9XLE0MrmG-v6)xeu`-vH0!?~8h_o!CVosR>#!AA zn24^#V|_Y?Z4+FY{(-vOHQzk`foxoxl_F#L@OPbW0_W<7Vcf?|DSb@GEx*#M4u`Z`2qdPv3$F|)&b!@FT+;!AcuEH zFdCGt>s+VF46Y*?bzeU*)_u*PVK#xf`MFgq?{i}!=d4J#$NkgXk z-ee(}6;qj`QFx8#SSP7@bM6}G$B+7I?nyQilf9?kO*N67_{;)aDI#;P3Sxv2nSkG9&K$Hs!LLvra^@wtf{8AK8 z>4hi3EtFdn-Iyz7XiE2XUbh{5(GMZDQ?5Nc-1Mv`M1cdSBoJQl%DqmhS1d?&B*ccHeh`)>B`)oDi98c3)Q0k?^_&g%JihvhN zw^G6*y^}se1sA)Y?H_Q4DEegU{3hQoLSRF<75jOD^d4xx1WPPdL?d&>YD1l93}Xm@ z0zh&eI8RRqpqDK23#WGL(~l4?KR#dlbLJDn4u;GoWvN{}(|ni`L${`+Xym+C5%@St zu6$cXeQw`ZjNCy&&|XM7epHW&^`0*Qf#*YgrOvUii8hke?d16H7jQfWF5f#<1na(; zO8lCzh=<;1Eb9&Lxz}NbABssaLOL@WvC8Qs&d7VxUF;R5{1p`=)ii)m8DzAarC7W0 z(gV{m;)zjUrV27s;X739SkGX;u5DpjLFK{=jB)P*v6g0Qc*U{HB-@9N(s@v#eh3#4 zl1RhVg5LWRUpn3K?+Yx3hs=K4RiG&d9NriX5){=enF>KYwi3K`Uf21jUZ+mB@$!z!#!8s-@xsPZ0&{+* z%#zKrw9U78!L&8Cq?a*KQ~252D}eIiZkGKA`5isAeLEBlmHn;46O}Id@D@a|9!8{o ze=hFRgji9qvewGj?4qPIB2e{q3e(&B z9`B5Gw@W&w)HRAd`4}29h1sx*D!j`Wxb`3v@K>>#9B=|KLot@=wBPn`(27qf4fK|xzE1BqQXF0xBc4B_%NvO`X|?p=M|jJSv=WrU zN+Ox&;{W5{Ifnc*RxU`*$Kb@8YN&voS+LW$H)3M%<4kW^tyul2m%uZ|`a9pA0MGG) zsN`49*F|0qF)zJieY}pX^c$&JNv&M472sWlFn4fY0H@$DTC!}OsNP!bXHHYW0o><* z%poN^voubAf^GEQ9Z#W-)2DpD-Zu)TCL@O7fBZBVfBwB5yO+flzi{NI#szy|oj_~s zzCsq+bq@X#rn09(Hp_%lYFElER1xVcK^CF`3~Re=0bTiO#;>h8G*~WOrE!KEB863; z!gACB%sY%A>rF!ves9~dz^p};;*k+Z9uu5u&XP~fxl6kU*E296mJ(g@S-~r=?%o0 z?p!oCPAmN;>y%u%+&+Qr9t_6g+92Nqf34 zMo&b=$B15Z$_Uu(KVN$jhP_%}rkf5xtOmDMfNdG`qkZ6`#kpDA*L79PZMJ{m?%^w5 z>2BFTDGkg)jN4dwC!Cu5E3R&}NE%<*gAX1%^itCOcz5kN9cVdYzEHiM_;(nbKXW$` z8C*5SF`@b1QXBPSMsP{Do-jLWhnbSLKm~m^=)J=Z_wR!p!xT{e#ZW z)TEbVB7uACpky4Eo2*v&gD_(#V}TYUt^WMk36}($#8NOy!sFEeY=-BMlqx`*+@Q^O zA7z%A4U6xxu1qV1G5Dq4gb9ypE3SmrjsN#JKeSU356|BL5SrIY9BEJ5H#Jv59{e$M z4UjS&H{yz}&4jH6iyhsXj-8(OPX(*kCpAkhxlktQ&`pIZqaZ94J*E9_63Q{RnH$0HvWQrusx^#@GGeJkzYT?OwlXpkdk@j=j;j8;qIX!}H3 zQ(wf46w|}hjVTEz-*YRl*n)c!52U{ZB0~yspjh&7p(<1Wh3x_)aS?5`6ETXTTNwPaq4%S`A!tFgyR&cxnvk_7mP<8j)sp5b-itV-flE zf=R6&i~XC@etnQq)pjUO^w8C@k@+W()$)^iuYD#Jc~5;h&biL@BlMgkznXQeWS z;1GX_U))E>K_u&=aqUV}&oP>&)Za>enz)$0vwxJ{x#&Zf%e?bk zc$W@bTr*4uZ@d6&H_5OB#f!f2J3#O;cS93&kK&>(_V0&GhS%j5m?$ebJS5LXFf}!3 z-0h-H*Kt(c+!b%6kT;0nF5pvU?isHLZN0(qwSx0QIb>$4&xT}6^eVZc3LX-;lf(;q zi$2K`e#%{Jaw+XGeUE0LtZ{*c@Kc1qq@^^Vxc`^luVpzW#FXzzC)myk+yyn3G{pL# z@9145@sd3W++S9I7INRjV8*n0I@EqL97q|N`O6hWQM*lAa6cQF0r;N%(tai9OF(`B zbbL@jZQoXpQ~H_>=B(3~IVN_4J`a>o;tQCVtCu}IvIYM|7AcsL%=@&=rl?f4R;C|) zo-raIbC~`p{@iI|IQH)chh`}}&1mUgwLqkG&$2M?=H8;20gVyIIWI-dy&xk2#*cr1 zrNj#DCq*CCUY;0$pF+0nR%)fu$`kDxZPK=G5}nY7qcLO7#cK2V%}juaFI$SKHy5a% z>nkXyn)Hncc+Mbd--azeIc+w#0$oI=Z@ML!@2{rZ74qv#=`g(dA;PcBgRKyQ1$141f)g##SA>)x(#PZgGJboN>^o8(3h=$7H8qJ%vB6j zX{Kkh+ZxSn`^V{@OpbX~c+&X8e7Ss(CSwN&=H(j3iVB54jKygOywj_Nz+7vY$$#lV zGQ8{SwSjKIA6#Z@vDZ^&eDi5|bnVf3)_1tPNx1L%(HZSJXtNn?V{ZtD{ZQZZmF)Nv z>}?+j-v7HmuN2ef8hvQ6f&O%56~j6%3&}?wu`yfD+_vip`h_yK9GwC5SAVKSJxFby za1ytc9(KKF+m33bk-kMa>Dy*@Xz1F@z(*Tzo?7BVN^3Y0lVoG4l1t{H*$hWob4l2J z13nF3VgS89R2m$()?jLL>yYuc%d;H4YwTnvct49L?!iY+LK$B}$eA%h%+!?=eUB;k zxEap9z7`n`q3%ziV__W7Vg1>*P(+5;DsL4dxQuiiX*%7&<$mE*v7MJ3-q^-z5S!rF zjbx;x(QBKx4KfkXKhhCv>2O~h2U$ZUf0bA5F-cl=v0=2ZsoQM~&GsVu5U5M-wvMV| zSCenNUV<_tTmMQO2qK2DsdB_Rj zp60_vah>|;Mp9dgrjf32SD(t4Y0m*-DyDg#8_Gxer&VS2SD(0PBjuoxvB^FE z?8FO1V%u72j=6IRKk+BWl?YxJG42?BPvQ(_;?si3R@Wq6P91~)M^p%&HCgU{Jxa`M z-36K^hwhx4YEuP8Flh|^3d;VXa5|;?mB{NrAlEU={w-BjT2(eVf>h2AJ55Xp;@8b5 z)!6Vk1WFu2b!lwc76w~8t=Y~b&azf{D`R`@XynTZ1qV6eX zk0cVo*5t%1Ig!+14__p3+F7;IF|Muu7-^%zG!sbiD*RJvM1g{qeBDy!(Dqth%Y7;N z>gq+~z`TtMQ@$=}Q(KxdUWs_L21k7!ZM!v)d3ydC+|a$h&wMXD%9gIl1vCsfQ`e$ADeid7(7C zl?P+GhtIPBp-!8k4LLwAXCD+TC&}X52>yp0j^Ob?K2F4z@1oqS8Mqqmnf9X@C}XJ` zB1%Pa9xBD!kF&fWatjEMb!x|Cl9Qw%e2gV;}t~@ z;$4on|I(Z*kSjYKyev*=H5bjwI_L*UIa&Bs(ZwheC@Ydu%}G^sw61Sm1p?<&DHw{Nu+5}R794SzOPrLS!@Af=))8a(3%l!y+dRW>bmky@U!WfcL z>rSI<(XsGzGGxu1(fJjq)NC%5aPIaf_M_JmhcWmIp-GLs_r`M&e#VB&{C*37qp6zp zFMoX3U-!aQs`%@?ivouZdPwI&S)^sJ60PQEB<<)3(ry3mBRS)~vq{_lJE(NMRpHyj ziR@1$OuBp0w$Sz1AlYRX45L=+&Xou;(hm2&wQWicHyi$ruCw_&A!$wXPD->Kk~r>Z zaq$(KtpL%PlRb9DeE?i>g)N2NQ7&*ajGnCsB>`(*L~6;xL52163tGgydHSo9${$pV zs*I9%QrQyydjh@=g5NH~3Q?(#j=WBMla_S{UN9U>;%|Ai%m%$O-EPhNB1disR7E8RE{UOl64vX;z8B)loM z7HN?;MkG0&szdN%v!KcDTFm*0=+O=PemO1ICuQ|geY79o$BT}9MG)BC_r*2R&4I9_ z3@x_q6kX5jzJCzuRpuU5SpRlnSV6_X_5z z4S4Gu6&zG(KqC{{i{p(eNW+N^i+e!KyxO?AN(P3|eQt{DF)s(knXqIyR|rS00W7KY z=Tgg#pbtkxre%Nf+`&*z)HIaO*qJWUtYW&(24D*hQa7kGlPCW00Co)Hbo*&Dg zi0Ost_qGN*TQh`Mwa;Ze+XSM~j6xwhpGw1v;J7~#g$fq*5x)G>R;}8;WRRJIfH$oV7^K3Q=kTRo&%3(L8b+d)hW$tKw{6?6R|AH4?-aKeriK$X~5v zolJrk(z2^e5!33!qvp9iD8&T>r^WXt{^$R89|&+I)P;64b&F zf#(V3OP86<_`>8HR`a?nu~R;JXS`gV-||8<82(gM;Ao6fx>iI`3}z0o&Xq!$S2eN+ zov_f%UqHkP6WRX{;)MObSQ!5+;zUFCU$5)`Bu<$A7sLq@(?8;b;~#Ot%*pb9h&cI2 zbo~DiC&K?1ZX)pyHj(~+ND~cu&3~N9{{&1-?OYuHJ5}J0?l;l6Gk^kz6{F6lf z-;T(Cup$4=5#jhR-|jyh5k?L+rvKDJ{y8E{42=K0mj8S4zd0hzjGV0hyN-yviHgov zChcwTK3quGe>}<@D7Ur)Ir;-TwC?l&ToK4yILM!I&K;k*o*lova>W)a-`=xKs~ap) z!NTfM0z)_^P^my19ZU_33=hD+P?ZE;$lz#nV5<2gSve8R{wWC=8Mpy~LerS$hnBF8 z2<#4^e}UR{Y5=56YwtSl5T~bkAW#4(^MU8zcP!u-o`EO;bY+)&Z~|xmER*{DnZ`#_ z29}3Uc3_?Bn_U4GyUW4@aC)+9XlrzSm-UVG-;^)pTPcVIR)?q2jt>m2U>O`3z{i=W z7=Ta&@$ST|09eNX^`V-X8kzw7X#g$-Z2UtrgDbHFq*Nt^HDf{_dg?Q?iipSG`%Dp) zmDl16!Na2{Dy0GZGoJt`VOgK~^DYI#Uf*di03$E&e)xTMfq#psCCa19uPCJJ8NNHf z0igSF&#w)?%WnLp#oqZa_w|Z;%CtCwu7527@>?BUT#ii)|K8v0H#<8yn1FCGJ@_p0nSN50p zV(0dQKd1-!;G?W`XYT+2o#1|NQzN7OD}}nQng+g}y}{KUlq;Phy$8HAv%RweAo{nl zr!U}C#Sb0=B)o%@`}d;Yevj_#*Wz)nC@OIGPG)a*Y;O37UwwLbU}oeKfBWmxCMy6( zr&?D>_oo&DL_=c>@K@&Uuh~pB?oT6SIK`xdRCxvScpvm!fFR)0Y%Fk5RoRu^g5PCC z_|zj%dj=-}4G#{$sat{+M|xDi_03+HtKge(Ui&k*d@X?)ed#Z)wKjNsSkT<>L4Ca| zL)+U>Pe&GUQfN?161IZaSKr{CEW~f4CXg=RG(bKwz`2F#$Wz&Yp69!f>AR6H`QY3b z;xR-$LxU6Gho&~b9z6sf9oembe^nFq{M=byte-dpjShg574w=~MV}pda9`yVhlW1@ zk6+dv`McekUxfchU5(UZul$T4oa-L|r+iSLB%<+WU8Ln-Ki5sar$pzbCS{feaHU^* zxnJh_;oZ&It3BagViD*!Gc8Ne_@UtJm+Zh94(yKnx8|jo+bHI%#HY@cbxf|HnI)eQv~G#s}_Czf(|# z2&*Z{sELJdyNxeY@&kP^rZ(4lka|aZAernO*=~gNy_WvT0l<3$5A;^f`A7N&fOHI8 zf!aGt0C9G8et*z$%!9c=M@OJ^k>B(mvF*E?e;`QD=kaeyJAe$-Z;<S)4xQYeQ>*d^_f-7<7nk3Ha|b}@8aQs5miksWq6NWmzujb zcXhu%ruTNR6hAEnzX`NdP=B8c^!>^r15&b(`v*ryzt4O*zJ6%euJ4%d>u_IdBY#Kz z;0OQ^&LLTZHU0o(JcMi(vxcR8N~6fB3Mk(*bqZIOXOp@O!6 znJS_M9G7^&ekljxNHtRmG2Z-P-&Cq3x0_K7c9Z!+{d(Y+2#-FsWU>S?qoe^n`}?N48RDt~V9-LXm4^F<{>+{HXI0z82BP zvI2Sg@#2q&Z?s6>D8p@~gAmI~1%0%>^}Qf~%&S}E>=fuIC+V!NuLJj^EiB(Nzg`m% z(iEt)>I~Z_s7iS8g$h#wB-dl;K-D#W{4CGHw))v&EE5QqFhd!0)eoOtwYhn-ANj2fJ#c%o;irn<>`Y=}bc z8Un|ye?ur;Pi$r$>aCH<>FfS}a7{qERAMXda(EJhF>l5r%_@)ExMhVzV6Ls}g-3`x z8=PiN5W+}Z!}kN6p%q;9ZJJX}<>ewumm@c@hC$+9!lXY_^FG4&Qt9Rx#+S634{bs? zqrRF5w|kRpBE?^rB`36?? zKZt)<^;J3hT=VCCg(I>)s}Q>}BhPY?DYH#m&*qs@K;Z@bDMqRr-pu|Fe<4yZ?;XL`WUEllx=ImC>`U;v0bZ9PD4C;bBF*=1+Fj2CGCL6aV`6Xq#c z0U`+(6Gxj{>qdGbjlk5bjH7a{`}(ef>Y+{mp`IDf6t|ZZWpW06-}#*yxD`AYLFIS( z=xMd%1Vsyp4`bvW(AT}yENX_P{U`q^tGmn}t(I-Jyi9a@ZL@j_Jb=G(1ruk&HMR5T zFR=Exns$BU_E5*SN|1?GX0^VX0cnMAP>!w_t>Z_McOs*27D0FKnoUnVJw)w+cW@<~v6e)fKGB~H*w^5k*}TWY^Of=Cdl1WXX z^8;FI0%eKb8d&Iuu{}QAu@DEe#~oHs(R>q?LE(dfne!&|4x1XZL$#>cKknI$_&7dn z+DRL8DMK9u1Hd!%S4CX&d05=h?C#65S5O2q z_$FB-e!&RADUzxQYSQ6D_M|5W*|xBrk5niEu)qY7$#}yv*B%h$hs&e*g*aywy7Hih zgRV2>yRxyF3rr1$tzDHxO2r%pVZGlMvxQ%Mr#Pvd)M1L5B5~CklZH2xaG=o3o}AP{ zDnlhned-2hfp+gK5Iss%f2a~KyO#_%ek~EVMTOn3b&xRg-#gMQThV`!ZgN8_=dHQb zG{iT4J)s}%JI?;38_ol2bwrum^!HF*+O~QV8`=i|bgL#uDQvYeTn%6f?}Z+Ajgp{3 zRBw(eJaDA4FruV!0EvjNaVn;fcR734nk)v-P3nZjKXRbp2s5_R4bD>qg48x%r}2?3 zX7q7HrCD|PkxJ$e7=?(;#8*Ymn1K4h9_)H`&RVsi54@_;uGS^?v2fb0Plki^bl?wr z!+l?s^i~%)*sL8U7QPlOGFQ#wGzKEU`8H1` z&fYvMTUD9A5u0C}A1V_MXy;mX3Wfxi1wlA0_eXyO2+>pXI{J$}l(%4%L4$U+(8Pks z3$!ei zkw50@9R|PN zpZrEY{6OdCifIlyG})G$xl_x@hK-np(es_i@ONrbUJtdl#zZOAB`uxHGy7m=FsFuP zBf*! zkT{JOrsrsbIp6alSZ#Wi-3LZ&K;d5RzQvmgQWC5ZDiEu}RhUCoM+Y6$`H1%vO_K=4 zj%?U+PQ5>QR60NCf1bdHovy*r;28{$EKMRNU5Af1U)q>wCBjDv;4?ZK^viAjogz%p zZ7qZLwRHhp|N7;xEg!hzpiAk~2Xq8F$`k||j)_zF8M}eik&>0PrN0hX}eSf0K__#_m%0!PMohDY87me4JKOfu7Xi z_4e|C^^(ckWGxrQo<0cUmN?X?sN2qIHg0s<2Qn{*Z+M-dz!QLcH>z6Q5?j8?BhwOY z<&aeD=yb{`Pv^muBa;$bY-~S_dzL_OtaPLAq5U4u4FjT6+|XDOJX{b(?QIVs`?YOr zl959&7ot87kDk#aTSG_sK$1wtI$nPR@vh-E{k(hpv#;%&;~!_USNaFGr>K~a$MjOGOqga(_@zalY0p~2-on(WRLIRxn%tiA z^=mE9PE|hF-EsNxes;4_HVszj4>Jjx)9~cSAfl2jN}6;$?L~-vj#LOx%qv`tK=zoM?NZz}bL@hB^Fb;f5`2LXV0y{s+Q-XrTz1(Q{ zt>M@k(rI35bdX8dm=4|nNwpQWLA?H@T_qzF5pz~v`1B5XcZhn%*~>^ky(z4*Ha;yM zEmb3Vn}GBX1^$=G!x>$HA@%CmHx_;}!(2YR&gk)P0#m~Y9Bzvi%~NPScB@2;bhE0^ zO}=WEkrlHX7pz`x=G@#Mwe#>{-BFd)kU!0@?5&ifm&;XwqPJyK!u`ehFpZ4~lOxaO ztJUqrt4Z+rF3oz9X6h2IRFpjd)N+%7wnGFAAwf1dm0C)i8e1)k4()&8`I)U95j*&M zU{%+{NyB=XZZLuGW;>dGC94MM*3pI$$jptBCu+$E`c)j`cpZMwilKQxXm0P?A93#2 zr54;CE3(W+&zA{8H&|1rFJfMDtWR!3<~|F z>652GJtk%#=gb)wAFfTD5&;5>u2^!`_E6MooD;@?fp74K zC;IuV(kCovDRRA*b@R$Ekh_K{sXoj<-HFz!-Q0tFZdFyt>cs}==BmLcqRmE~Y}i>Q z!T5Mr>CP@ksE+F=Bh*^yjI?JH#Ug*@65uTs!q%MrdR|&--Q??)unx;h5u&%INJPXH zGfc$UHzT$_vGT#5J;^}2r+nEK|AF!CWn^tUgsCI4vDg;P!S^69+>W}9O>owGhPL(Z zS9G+eLBYztWv#9?0v|HvxwK^gU-gmi$D{?N5PnWH6?wx(;F=tAi^qk_ZTcDu;?(j4 ziKd+8XvvE(u4mzP+%w?Br{HWXm2u%fyF)|aNC?S~HnsEy&>T#m1e-Z@AitN|dl>I^ zZ#^}5RtQxfEg(~Z4|^$dUA5N1VEIH~p8ay0W;eG?YF&^4Eo9H$7Y?_)@)Y$+Q`2e- zzHS-i%nLg}kyeKxW#lnA{p+7jIYrELSEBBi=K}=pIT1YiEKvkWx^Gb#ziOhy`@)IK*oU&G zY8IEN>b#Ouk|1F0ua)cu=c^yAW>0=@DXd$QIRwidtJBG+BwAou-+8|}IHclUN3xt| zav}@J3lzWJFT`_`h70UN`cT(TKtF*?OhMapuzQn2#jH5qeCKO$sb3Cr4dz+Of3w*W zU9H1#`VQbeDiFOe_zTeI@WR#dLe)|pw@K_Yy=Vp<(vG;y$VpT(-!C}!aE0o0BAu$1 zuJ9tZQDsjf`|Qbmljzt+=5c|MwiO3OM$BLh3`u@FLD5#r*mpTxkv^is^JSCo&pPG% zE_rv%MSlPa5Grv{-J{-9+>^+vk+YDnKO9b-=nyl&d1kniM$Bs@hO$8 zRJ3s962VESMFny5I6n$U6UIU61kK@3ks>B02jr3|ZecLWP@m{DK0N-D%x6>u>*ny|X zTetP3I2it^FE#R&akj^%sfwU`-ON6WY{c0181|TOF!6jSz;(ppYR=1kj zH-eNf&UHH#F0AfjL2yRqT#S4!`1ZW$hdOYKeykOv)BC6nuOApFzr*j=vS1qwlQmBA z5b2xaKYrO7#R*PW#+LP^m;js6=*B@h59ZB5$iiZES*WmEGbzEy*nT0lXOz}GN90N? z$dLdx3|#p3lId{kMjl)m|Chuca7LIjD_$cBEA9HNAK`V@a03)b zbM)^c!}_lI-jaTt0wyQZhYFOgLl$+meEL$3=}}dL&$>ZezM#QUrK#^mf={##SsO>= z`W2(y!;kxZbUq116e2QgLGx}>D3MF`c&gTFK7bLD14)J1KvLBr49Lg%OkbgeN*pYU z94^C!2&8+{$({w-<$&7x33xpPH)UHyLk8Vqnc=3%VuZ|I;k!O`JR8;s$+6#$lHUfVF3?7}%GcRL#kD`X% z5x^Gzb1Op$iA5uS%|vZT+5Y?K<;B3?Ky_E98WyU9{<_K0Ivi#ob&!}f0Ub7v`u^T& zS2UZJp5x{uY_BBj4`@|eUMwhQ+CIcgim;dwllh<;b1Jt*>daQqY&{kYdCTH35Ff!k zy#_58)BszsIkc7B!YhNDBse865enka3c;0wx zyi5b*22~*F>e)BVdm=udZ-xlfxr0sf^T1vZa?{ESIpkJ6^Lqtno->G)*MSn~4c3mvl4&>A9M}XTdzQ?qu64IZixQzYuLld8O%MpVe4sMv{tj zy1?FUucU{uW&93tG?}$-x@b~=ll%OF$Q)$tZ{goPx$TT-dkrmoL@IHR1@oxRp#q){ zDtKe+;62eql9R9OZ>?kbcVWk>=T57_GM2@kKhRBA6vFLE^153o-ETZ69SaKgGGo%V zhW^M?q|x$kF@nqGTqP;EYPodwpdG8xNU1u7&%@GrA1GXFLi2*-H)g_6{ zJW6%8E?B*qiuHT#rI@)iLANCafXV72pddoDBr$u_Okf5|qAKJMm-2q6a=sEhnQ}Od z*N(@qJ>Spknmaj1xwYA;TS9(Qg5`z4IVI8VJ4AP-X`YPw%D(bqC)7pYX3Kvs9YqOV zCXI?>h#_gvimj^X{EoG*!sridyi+;(5o^L63|%5~3BnDyd)55hfIRVl8Im+5#!dtJ zCBVf$uLFB`BA@m20-QkK<~1d6@Ctx`Hd6o_KGTct#qUuGU7e-CxJ?jXu(RKrV0r@k zB)9?yXjdj+A=jg=J~V&E3#;XOV2Ml_^3dVXhiKLOKt*p+5DIz(!7w%@) zn4s;J?0=i0fnkF6W{5MK)t|Xo$9(6V>{PGNh&P(aNjv}{h;*?Q0BEMk<&Yh>i#Ka# zo-ocH-lZen9KZ2un_$&))5+%<=YIo}`2im{lNZBLDZ*O9rk)=4_ksA3qFQNEN#{Ym zvQJ7Cy$L3~uQM3ci6X|EOaBnOD`YpDJV?34m4r>4bt2b;bPq%Zt43Q@q-aDeTUq{W zZ2hF#KQ95LBd!<<(d7miDXuc0Q41zGJMNtPEL?zTN~y_iC5Bu1-F*5QKC;o-E}Tbf)J3yh1DYNn3D5> zj_`?JcEBo^x6a}1%8IkZXZsY0FFv5_agOY)>LIX-Gt|HE&jnbXLHaJPWy&-;r2vZ+ zF1nRZI+B5_?N=wlv=Q?ZY|faAcczwOYL9WRSH;?s6Se!UQsD7e2s)+iV(?F3^D413 zGSQh$cTiFHtsawC*20H#8%K!2);q|w1+iFFB`zYB=NKW4Sh|WLr<9jrkcN?pOYA^h z!$p5)!Y2>=kP8Y0Z^(8~3$La0HEQVSZnux0d7xEBR#qUC#WC*)7QG7#Mtayr5H$zF zmG0P<4SmAl1vCcWu7iYm!cMD8U^A4P6s3dAw3P;Go?r7Z2~@TG7q)DCr$@I6$OcyD z#37Vp0V`l_(eYV$RpE+f1_K zd^?Q4m)Zs+#%-R_6h`Xhn)IbMxUS(M zPHZK?#7byrPn(g{G2M?cS61rN-d<2?Bz625a*28S3Zt}q2*IYRvW9{7F&iCnn#BR+ z_#U0g*w9V6*cb1;4#nj-Dc5H`9u?v2C@5iK(}*+CFqb`9)fqA@kzpHV!LnJ^0I|5D zju+9FV9vIu^QAXae?r{UB}?e?7v#+u6@v_gq=v>GoQ=O6Vsbolav&EKf)Q9rJIai! znEiTeAmQ~lab-$~OPSv97R!ke`a~L$7Sm79Cfvx964-R>-#-hS@YYxl%=%Y=8;z+O zYq`Z{G7{t0Zc0+p4CQszi-r>9Cj_JE?Q${Ew)A=NO=+Y~sIDp(@a1|X*x8F8IaUn= z9`Ut0YRUCFz0W~LV#c!XX)uh1-i4o8bSg!vc_LHpCetH!nPX;h{DdFhkc4meZMEEx z^B93h1N7|bI2|u`BG8Iu7K~Zu_=nleYz%|;6~U;^FhG@@K9=-|$_dv(;gP9m;-@Lg z&__8IwQF_JY>~z$y=H6pv>wSLMWFlWeuq33{wx451ta7tPNv*LdgHZ(v7Yu~DLrpf z$H!C7m;fY~xtI-vB5>{Hkd5ai_d>>74;AMrn77w4i#d{yUTg&>*Cs~p@Y@`rh#AY? zbG5~IF^&tltM&UNt;Uld%j%|cgCr&iw(P2{2bZ|-BLf~aGV;2}q`L~npcgIUt*BsK z{!}WF*sbA*eRU4FTg%xt#q504RIS#|WF7X4%t<|R;o_naI?=doG`Lg_Wb#}Da^y@I z)lfbni(w5BJ7x4CZ4DxnB1ag23XIyjZQg1+;6jU_h$2R-CZLN}{i{IK5L_!2aeb9P^NkwP}Nb=XZ9~ zrTx0SF~cqekFy(^fldmEKWd%tLGc_m=%u&b0FKzI3RSGE8|{}3#@F}bUsnzQuORk0Xx7VAtH2EgciWY)qaZzNZ5Q&?6WAksul!>&XD$JBAFk z566Bs8Y8aJWN#EZ*4j94)IS1+pFj>Ob?Yj-IQtv7Ko%vLfe#nsc!cqkM2?!Yuci5I}q?CsX&wycga0@1sbN%q41 zv1nczk5oj8>Klyq$lK0(;*Z?i$RzqBO4L3GqqDFR2_C8WUCi&ayh3)>dSMiodOa`o zZq1dhRh4T%mcb@0Z!nr-tZq^IWc-_(q-$4zyx1;!$P zac9}5L(l+T%4Uh;vPoScq^ZP4v}REBLqNZo_vBU9*?bE(u_!nx;!nlb-=n(Q+z9d& zy6mZlIPYexe#3VLIXm`k+=c~elWaCmE#9VPjuSzT23!)k%E_SEy8e;H-Cm9`{Q-Da z<~AHuDLC`w5uJ?q7EO?8Lb5r=y?yfMC#_1Jt(W^WtBGQCiKJ0jKn;Y=Kh1e>Fj;GX zxoY;%vMzMl)x_oD%|Nv$=h|uBxnqNu`13q$5$@rplC)S#1xE^(a&NM#F1=fPD$INr zS?CtGc5!y~qcP}ti#BfCb+R~E9!_6m>^r9M(X%oKW7=^YX4ISMFc}H!QEScnD-EP0 zFotPnX(iHTe)lfxz=^X&J7;i81SV+q7(;|EvRX6$f+=8Q_&fmTpA6ppwcD#3c{V|@B$Xwci)I%4&FL2 z?MtQ|c;ELD?ZHW=`V9k)RxiTNZ{T6QeIe>pBLqqL*HzQ4mruy;1Qf$C@y=gVMCX*W zUt94VrSyT z#ARbFICe6(zP!kn!hwn+xL9M-{(nz}lM+LXkqk2g+bbR!s$G@k*q@o@*gjkfX^#w@m6C|w>vdP;C$QMn z9IFUcR`0X>tc4XX%E#NU&pwYv#K=F{hZf17rRZQLLh8eQId}$Z9m8X4fG{;L%6i51 zJ}|pR)oMXydOEYHI^KN8sYH||sND+&wx~RwGhyp<+rbh(tN*AE_U<7HNrCfCiiPsf z_+={!?y@8TY$Tfx1|=vK9a3-KXq$d^8R(%K7 z7dq_o6w;THm-)*QLB+CON0A-p3({@@2XWLV7<98B+g~_jQpD=d#(iwStMDi4qvRR* z4)a~TBEQ(6<^$AaVe)~{rfa6UxU&*^Qqd-zWGgs;ABBiTZd9vFa8F?hyXkK8NTIBP zerBrhnbrxxs=p}EL}^gT_NVw~#6|A1)oA+^c64+*=;yUPxMt5;AJ7|cYguWW%5XYt z{R_+y6RfQ@@e&Bsu?x+OSlCiFgZ#7xyMAFdtmAe|YflGs^U!Nin7Yu_wDC=ymV(cv zx)7~rez#b_HK>UXrVe`6>?^l@2h`mV8EcP}U5)okrgR;Kp(;6YsmxJB z5KCGfRZ2{l;{b<5OO=#BU{PU}_n6ZgJ{|n+ICsw)_h!i*+v@R?1as9MnVSf7{gBdN z8zw`b9~nBmhm8(lrPQkMjpCN=W#ChSY@DBp4ED}8#4>O5IOb)adVeV6ncYRwBaO664`DV?NuGL-KW zA8`&v#trt-4X91m{gz?L+^X9N)R5dXV!GX^W{PPG%vTT8?!0XwpIj>gXW@ZG>+AhW zY`#B1{%Jnr^h2B<)4L(l74}S1Gt~Wjm!I7fqi6aUTi3k&+NgZ*VKuu!rF+siMW({m zI+8Tat$D7~M9?ys^=u#k?}*T~LN7rmdGDZ-Iu3R>AgUE3l4)XB$9$?YvP_E_xmzPr zwRn@;EAzYr$E~!6PV;;CvUVId+ELr4yB%VZQY1?*nLQg^%rrkVl^p!v!5oc1SC1tz zkP~WmB>d9kL(%+!NY+hYQLi=)oW@suL^3i#P5l9<$`O%tD^bJn!k^AomB6&Hmxnl{ zL1+cH9;*L~(YOW_j~dHMo=$pOCs$%K!lz=2r$YkP3$Y#(L-jeeSFmrAN%8GiQz;S( z7xRw$eK?Y626NM9J64eu|bG#(;LL$fVc{7%byeu@FrsRkn`Q zzlg|L0MSML8l{y#1v-j>4ww99$Wjl-Fca2QN^IwZHXGP-Qxet0=JAhmP`IyE-Mi~r z@?&@^|M5SXQT)-(u2!$vURx{If-W%=z7$5sB!T?SCvIT~j=YesBv<_QtPsZO{kwF? zK9DKadr(vz4JX6-HLyY9fWj^6!!=j|>OK=Yvp#Afrs4UJWffZ`NNt!o8-N7rUea4= z?G%Rp)6r4fp@4tBza0$!l-%6dg5RW=mOp@src;FBld`n#fF{~$Fwl^ZQ z1}&NPq(YS@ocCGn0y||Lt5Gs(`}UnX07US6?J5HVdv_8XnWHjgPjCM)PWI_mQale6 zf=+_?sKLG6de9@%IxRxf{kU>3v6(2{kB0L~T6Svqp`wkrZmR-)bKE>)MIJbA z#51I3npKm{ne3d-CL!3%B6%{e;1Y9yq7O?gBwB8P#21VqRZ7JgW6(SqBmbW9CZG}=vfUJ1^5zN+G8%!rUm?u-R9csKe%u4X1eJw`%RtB^UCL`x@3{fhTfW3wsW|MDg*8lf% zQVex3(NwI};Fjl!0@#cuayva>35jh5s)Wu67EUgB8c=7!m#(LlKxah9V=RB!cY+fm zc;q^v57kOWNVnhl7N+2)2|~#gTwbbW1E>!}73LW_7s=WxX9c{#MLPXQP1)CGjsXYD zZNy>kv8e5kvF#6t(FP)r)g@T3lJ(5duf_ito=t&|rL_1Se%EUmbqCnrk#{mKSy` zxUm12u#)!BEYLE5d$!YhJ#v!QD^gD6q(fR%=<<#9ScrRP3y;qEk}nb80+E=rtNKWp zES4Evn|WK;(gU}IE#0&>>dhJ*DIg!rADK0?#fTMIHDY2wQTSdBWpAddxAw2Q1KjA< zJl`R~x!QbAidu02-57I#d>Wd{jI3wHKCkA^jPNw&>*tSS1*V!zzdY0}U=QvQTu}VA z_DA#tIg2VS(>GowJhONFK%iR>Lw(;p58oL|*4_@3n?ws2Un{YJbrnuqj-ER!?~9XJ z%}rGI)3&A4O^B)@sA2^ZCOf1$0AjJ&j5`2BR^w7ih-)|pHRXX!92YxD zSK(je3m3B7ew4i4D^R=3cPK<(@i8zxl=OT<@JCS(KO9b8-JCLjiAk$R4W_&d+skaz zKfUNSoJ>H{C&I+0`GP=JjM^xk>1_1DxH0!jyu+{!IN52qDq!oiT&m#-zi{!dGcDa{ zHb#zvr~u5(k_NsFm8lbw$7U%oY?-AlsNQrysy zAu1QBm)EHzo)d$Ffzyu*-s4e-EezsM5tDV_hn^Gd|0+#dqFs2NbsTrkQRlr;)_t^6 z^va12C$ICzGlLJTU|NSVjpwBgZxoy@2sKxK17%( zsi(kxZFwBEX7gvF)+u=P-{!gCU zxXQ^o>~VQBO*jagEMRrNeGskja!pL-P13P#!yMHdX;R9!E%G2dHb+GGo`_1|U@=KD z>}!R3$rj9J7-N?Mu4P0Np*nFU7WwIQwAc*)k)(01otJNf>X}^){z}^E5K?r>AI7Z% zZ*#kfQ{ngPIIYs;TS2&E7jtsc+=5ARXrg4Qit+7eEqJ3|enOqXWg(}(>W=cwk$N%Z z7XmYLhV?HlS^3NkTw;q{*wD}WjN7QZ3kGz(_c2^}DA#;Nzs zL%iqO^!P;ID+4@sIi_0=y)ECjvuQj#Z^JSp4QRZl`D-2Co|S6f0fW2|Ld^y~2%7iI zxHHRlgP~OOVZHx=%wf;&Jv)e|&8rEPPR;jfAnfoebNiR2Wio31nm&0LaAmi%Wy=9X zs~SEUst+cktfTZ2B0=RuJ20VC>|-ZH##_r+P3A94!pdb7BIICtRYjx=0HJnq>@wt_ z(FHs|;w0i+(GeYalM?o5+tfne-7N>+xLa$znefkO4FWadtCcLKmu5xgG!3GOjrl#s z?BX=O)N>D30=}^3lbZAOa~tjpHxFKCV?W3QVKstYscH!Zcr%@wE9iRGUsR4HB!SP- z@V}|GAOlZHf&ztERDh(nI&ACN0hpum&P0Xn?(NEeqW#2!#LjvYJ(<-Irhi!iKdsr8i$j6cZ>R^Nik)(OCOn+I!#;MTza`Z= zBR&F7ZaSl;rqu>P!d%YbD-Y%TlT@%X7AGf4xTPWXU?Sgpi^#I@^;oE1*hu|Byc(tT zz4V_U8wBhAKs+A3OKN3HRaR*yCAry!U2i15;{4qXE8l>=AA*Q}vk?4N&S{gu5*B__ zQpqR!;hJU-Ucfdu{wTPk2ks*j9aErxc$CX4^HVh>W(@mu=Rf zxp;YILRN?+LmaxxI--x9E9y_~qcXzfvxTcej3 zh;f|BG!hS5LCNCj6Dnmg9KG9q{(FnsV!mdF?R_kwJNVU1aha|s7ooBhIno~lim8k3 z>sK8pVX&#_)7DPfB-#i)4ph`I70E<}T=kD~8uZnnQmTgbV+Lb4nRN?M;u-pIDinVh zwb!8a;kfaegBlgQzgAcP4KRQ@sDBtlDqdqdM?EEk z763Tub%qD(1kUf3g{xufp73!6`7Ti1rgbvZ`?1!S$len*VcBNPDH9asqDTJ9GB0$|r+VwL|3@fQmH$0xcaMxqc}$EnNNKSA8P%UJ3ipa%8P zv;g4j_-ezN&?h`(h<&9b_XrS;(9^cXc$xlu2)=UblPirJ3oZRbCKB#n!)~?7cZGI= zGZQQu;VA9kTKo=$Pwolf!IBx#O4#yNou^I|tU(9uKrS&T7=_^iR0fkg1aS%zOQy#0 z<>^on2JYU>pyhK#es+y68p^C@7BYZUW-qBH%Dq$1VLjq@1okj==LWU@FEY- zBvoGJ`3yRTGr~JfO=x{MX1kJm%#s-`A%al!1Gi1?fv8{^qS9hIyzXXhnY)SG7GTUj zV_P77F>{?fT(-_;OwJ8Hd&vX@qmI{!cbKpnMeSlE3D(@3DXfe6Omi6}(_DY41M$#K z#__+a=z3K->&6-MhKhG!jJwU>-XH5$zxO{oAEaNR+{|tLIBdr3G3cnqNJ~jCOxO>O z-eQ&0TbBUY(K@{>lDMr&fmygMzlR|%4g~QsRBFuxOJC! zSL4m;s%FIExS-!93lud1ss3{o&T_(&%AQcxLEJ zcd3D6%}Q|!5Q}t=VyGg@ZZZ9q2WQ-oR!wRu?d9Mb08_Z2!ga8KF$s`oIN0Ipc7|J$ zoUV-G=#E#c5gwqiHE=iJ&T%r$xZ8@8=?Y3T2b#$IYXa*=QR){>krZ(PquNSdZ-~*| zBK8Q?Q8|oXCOxN4qMJ=DGEV$32Q77Lj_cMnSHc%|BDi_o%L`>`zO2=Z6IXf#XLVVd zrg;9g`8;7enVi_W@I?Vd1)xT=Rke1*9U&PgEmQ)_o-yog1rS_{<*l9Flmmpaks+gy zZYa;D9=z~u`7;h0NVbUJnOdDH=V01r!iFpiLO0J6%?e#Puo|9STRB(RAka803*zaH zje541mkK(3`0S+IOO-CG@}5N69gN)-6c-+Ony(JVFpEF$<^W3qj9h)TTI!Ld=;rrP zkAZfefvxIIbTCt$g{MNSbAeAdg91Uo_4DOWaz#Ct9h!dvvgpy?Wc+LASiiJOSS4iP zR<9Vu_^+8RRl!hsFqLYV?C>l(s4##k&H?U`SSqjQtP||56QrIcyO926_J1&Tk1?JF z@1DThwr$(CZQHhOThn$=ds@@BZQHhu-_AdGZ*n);WcSs{b1JD+>P;o*IraHwQ|ij5 zXzl}5xJ|j|E=n_w%!QPJwcz>*z9lcpyGVA_3^NMz1h8x)HNhEsses4+99TP_?}oD3 zX=n8Ma>yw9p}?+CbasuX1J?y;&x_=#X}w~tke$FWkY>uE0gQKIO08Ze1sg8ocK4iS z)871efFQupcO*}nuH}F)53H}?&kcC+dv849z|J7qLiQQAYw$IjUy1wB+ zOc5=xahiX(JBnAf-WWTQahB0+JBQ#b*Ph}c{<-2p@mRTTx0g~1YFVfsDToM$}Mn7Nj>I0baSzNEm|*cdu|vBTgl!-_IvrzcdQcXWb7bU2;?ez7PKY{Fm%7f4+(@(vZI?AGICRCs+;4^p?j6e_&m+v#B5#~e zgRYxd*_fs)YeW3!y76*JHBu+bc4304{<^w>9Ly3)C=5l=hjTcVQ5beVwegcq?#Pqz z{uk@(vKf5ERB1kG#6nX4v97=z4NY%Z*o@kFsd-2U%qI3MT*yo9@dfb#gwkB<@!5O{ zvdnO4l$cL?GZwbCVHi9~_h&7C(vW0tj+yx8p^O#L2S&12RF}e`Vad;KNDx3pn8iNK zkMZaR!LNu+WvB?!R%2A-Lt4n+Ssy~JnAv$)QUstclW-c+fm?R9RxehqW=9U*M#=cB zDewMqQRL|1?iP4p7Gf@Fb;;_B8$D?myeJSH3r3bFhY0m!%v67y9@~u5SAGp>{nQN2j%v8 zlj=C+1oV?0KE4dA$DhUiOiYH+M+Pw3{366Wy@z;U%GEoK2p$f&@l0$ri*RT5BH8Lk z0uRA~P54-PBW8!hq_OuLt^JtbkDSzi>9KCxO!MbLF^}+Ro<}MOvzjn4yMp6Umec)g zDvLNk`Mc?5n1QP;8;z-h=mKNMlHc&bF8V-_sJlT%m{s)O*tGt6*jq8XGb2=l&0aW6MxPU_qp zEH*&l8wzQ0N}vVqpBYoXeeb+`g?bk$I%zFRZldpvZ8JV4;uTG7Q04m?H$N=Q1T3K${se4+3d>V!t*YS9!KyefWM7}vOT$8b7|0m1VY*w%z>+i z(U8@@hwqm)e}0*}Qrpv`V&WxKru~--A|Yy3iSIt(tsnO+LYDRTSW?IX0s5rz{xX#V zy(_Xq64a3ul_ztHNQ;3gM@ItUL(<8AV9z#>J-F`#lNnqEj za8qF==TAO)3ybt||G->X{yCW}>LX1YL2=PRqjf8*#Ka}c4oYMm6%jVjq$FGahck_l z@G2>^`9CYcOdU68F%+lSCP^r4H8wyG$_hZNE ztjGDEeJD6Y=SNWF2x_QjgF!r3b4Q1M$Z(KbI^$euB2bZrf>NwqGqo=&eRK#4^rQL{QeL z@{$o#*JE$=_vO$Qc}KUpXe|K>vG}CE2#?RUipe_ZB@+F%H&;5eI>|nCBz7!m*1Ty3 z{zuB*c?mfGRB_M9xd6Uy@e*$bmcn~~@2Xx@#WTKi)v^s~%~`v*d`AIm7_Zi>khXva z*}C+M>yn8fXI%D%^|A*Cce8}j^pri^7rK^mnt<4u!Juezc!&7}8)i+RbbSUDlT?Xe z#v8AKEgXnH{c9Yv$#Ln03g^Z%0hP{VjJj%MrOn91Uxk_h-LZw`mS_)m zxAa50=MUw&BkxdBI@IIbCjW8@1W9h@0tUI-rL|2%S=~cRrnekXS~M7MPCF~MP~z%j zDC?;d$;Y|LE9R@Tes_HP}rM&S!c4QR_ zZ}9iku@WCsEgZ1?2noZHm~JSjc}PS!v$2dT7()Z1G~TRo?}bc(@#jl{S!$pJ^gOP9 z_%9amR6426>3#5g5mf&LYMq1&7D{s6VsIW=zomr>82bqS-vd3t=55I0UK zPiY4?PdTCF8!UX3g>A1LLZ_dB$MYV7MwaU*<^`GZ1KTY1?QB(^uv8g!V*6g`J?SfduFq;d zkGIsLKw9u2YKMQ~LoUzSv)v>J;z!U&{&Bt|++4kBi#i?8b`}_mEgk@{g(zcsdd*KO z1wvMPUeW-R%0p^z`XlzEFs|V-?_H#5o3l;|Ynqa1g**1UP3au5kl8v`BZ;llybD|D z7@dq6xBw7R)ODBzT9-7S$&^XTaZq*{Em?nwY!Qyea7^DqY{s6CJYrbxlfM<@>N+S? z_%zDcy9WTBxxgbp(7V30!W1gOuI}$n%SG&p^JTVN)pm#0r@FbR1M#a*9G({y+}CEB z8Ot^PsfA2CmTT3*BvvD(Gg}H*mT>i=P`L-8bH(`E>?KwpztffE( zD}JHe{+FCrivh4HqMD{eTz!u(cjB~`6b7_uMF{= z8b0sbsmlc9F1Vb`mCUeyZ)N!{1rL9@$hcmUJA==jpAHT1uu4owb&tfm+MCcZX=NGE zd$k(^0-?r+U~Tc2>hV8?X%3r_^wjUR1rk|QrpmQF-pu!AxvP2tcyL11D=fc$oFa1#6o48uFsD`nkxiu+ey)-1+{LdR%7rf}t1>5y*+nmxX9J8((({GG2X zU}m;VHzHSBhLm}u-8h?a%sG@lWgO71w!o?RNmW_k+69r_X$)Iu2rymvKdzId(}(8a zZ|@Yk0;vGdhwGTxPh^ahWJyx`S$eHDUpmP&wi+A}sS7IIh+HQur%jz*=MA_?3PKMz z2`W`&F+-1c3l(}-Eg;Jyp)Bom!Z}EI=Y3)Ot_e`Bs2;e>o;rHV>fnrTT_HB`cgqk0 z00&6pL4uIeKN6in`MjqQ2_AG!F=&4=YD+}AB(s!y!)(->KQ}+sUk(Lh2H+zHGEWTl z{h5Y@a1Iydw1b`2;1_k-hf(EDuj@uQD^oZ-#Pb#%U3KYT{2i zX|I<&DB!D_YTcgetM5a78DV@gt618QzHkT6VSHC__?Yc~ySOi_%RbLygrWj#pQ``mxX-*nf8ZkdUN`5!L z$9O6cuJoHKD-r-5pEI2T=s zW^0p0B8O)9vyuXgM|L|6T`P0v(o;z;#Vi9&ZG1Fwm8>|`iRb|qoBgc=4Q&jlB@{K9w8mfs@3wQ2W z{%r3a`l490ini|yPNbwrpiL_$(m3JDh?3qlAN*MC7VaJ<4h8nQ*DwiklBe=_0ZZcd z|54Kdy$$RMG*t@$6PLtGAxzc#spoISlWT6Kx!_$=FibR@bQwpD-Zyb5uy?H8cWa+z zudiiiC}i6UIK8hOscE>G+EHq>;InfQUGM+xFMFtg+gsi%c5y$ECE5mRzx10dyQ3V^ z?wn(S)S0P0hHk{u1Tkq?{)yZo0q}UP>;%Bzl%9W?{uT2@8|E8SaLSGy&b+Bw25LdV zZc3EeAwi+eD~ror1zvsys;0#!FE;$CO|z!c~uEM=Oiu?38-EaW#>q`VtpRtLQ9I zue3xK?}V?yfCF;T2jg@)ewv2hsbILJ*R;x=C!%x+F;6`H%@FQiWR+rq< zK{XNdJ}xGdlPr<=!oL8Q;ZhdT2jfGn!mAEneF^*_ywGGSHuFX(Tqkq3wkZG+z8y{1BWqAQBn?A4|Alqq8I^+VC_)KZ-EPuIln0Z?%?Ks3`eKVPHD zU&Sgu!gEV8+s%)Tgv*u{hw7KburERe`wh{HSC(!HeEV`|`uS^ttKjPAS&+331Aj>q z6%{`?#RGBtg{S`epIn6 zLR8GiL}Q*silV18|8>HTZ; z=DzC+jeXw-TGW+LWcy4C3xRomj3y0jeski6z&!&MDOgBvp#d?Yf4@yv=6D}!sx|J< zIHKikryyZt2hgnwndXwx;kKZf$$8}=CZgE{9FeK}#8o=?Zj(N>vvu4lr3R&lAxSUM z6Wm%j=}&HgszHsPR%1xJYm3-3>9a+ljsJYjbQ`+@*dd>5lz&N5|FYB(u~dOxuhU+u zguxgmLLrrSli!Ut>k`J*xINoUAq1~g-;#QK)|dlA7U$68OVP0&ngyTGRt;s#TQo?$9Oa%vQ$Us?uY3u3tF|EQ{Ub}c z6N>ezcB^T5Iv$JRh{x3(BXlmuB)qd+%fFL#bo(ab zEF*xbKQGDDyLSU!OIbkh@rLNV=TZuztX#k0w1iYg#4aa2YD-T8fDOny9V~TIro5PH zUk{iZj;B0doEZ-wUHOj9g0SmMmjqjVfQ|^Xtq~n^b*q?^ey`2{df*)&;H2H=Crt!A z(CJ-@ixir}Ix&no?qDA$-S+w-W_tvCM=LYFzu-EhM`&D5|4~p$YG0(aDh=xnpsJzR zvkD15YD6SYZ^j07IkP0}jTtPGwEHgjELM1v0SyK{3YU20?Np%09Yhlh1 z^-C;KKDsyO8Kjitih>=B5Gd?1(j}1Q1BB?>jZuz>#GUKC`D`)+4P5J>GWFdRy$95e zPbJIH&@cOTrl5OxoUwl8`X$^Zyv`;%FB|YkX;6-bIB7;*2xx40s#p=}hYkpaB0p!7E#>Y=`N7naY7zjLH)Wavf`t#))YT1EoI z3yfE0npJchEfAN3Z%+PxdpM8p6CibJrEfxXrb!UHK90Nd%-R1b;={OLVMSFgrVO1g zO20+is-`=4Q^=Wzm63AN#bcuEt-ed+U{Kbc&5h3c-Wj|LG2WKRgsj-DE+(-o)%ohDCvdN ztwS+{V{N(30fpFt;w%RL>|9`pjZ$}u_&H3TJM?~`nJi}?NjEV_JVeO|45wOx@s)OlFDQxF#yRH;nq|fe>+ed! zx`n1_mWrH{D-5Hzy8x0BA1)Y5XG8=$1KA4*2yEpPiHVP=RX0+QFUIDpOwbUEn$sZ!(9V8?sC%vKKuc5HjKRlv~x0>f6}1Lm-wG%KA>Ze+n8^#*D&Yeb1x zs*RCkJLj41?HI0cjWYUlhO93n%zgkPS&YX8I-8+n;(Myl5!d?U5vnZO5PN6`X?K7W z01+1XBA9zFi;;rTJ-MvDZF_2~jF;gWcUG~};dP9pZ)Yny0oLZr8OOzqW1evwynK!X zCDM%-BRn#Z%{fW`&p*3s*yjN_nJXN0FvmMBx$c1X zZ_x*A923CXZB9iR!tE?MI)-)XvP{9P=hs6kaDH0^dlx1T|Qje~7*(S4aa z!!-cylxDs(ACW~xr$ZsB;}1a%jAH|O139h;k}AA#K7O;dyofvJ%gsmRb2l>&*eOey zGx(t9M#zl%0d5lesLV-sO`l2cHRmyn2CQNagB@%N%Ap=o;EjA56}Sfw%*wh?5`H)# z{aWhpGn#d&MYx^s^L%+!15SWSaHQdI;l=shWjV@Nf?i+H;X|`HDS{^h!@qDI^o&7g zWq2FINWds8{J||B?g6IaHDtIu-tt$+YqMwKQdqKjmI2~oR}FCcr3vH^_I)M1 zwY}t|^`!W^-p2&Z%Cln%QO#X3YU?^o7 z+hm|0l=~%-GqortK|Z|#4Vy>vcFA<6e>;*Y>3m13bQ<79Ll3ikhrQEk-uAZVwRMLE zBCxwBB|=3&LWjJ-VcK2XZ&~5RHUVLu<4)VP1pA}Y>@cOW7q*! zNAV>km=;b=eC=LOvBl-&+$phCjJ}@aa6RPwQjf0_Xm70Y7ug~TuWavLG20V%G06sv z@2HsR9?iunSBhj=y348S;xVO$2>IG6T?!MI2X14)=YklO*vDGHf3ok%JgLjQ{V3jY z!uj;vyj$!t%%asoAER&RAPPMo*(}j%q_zA>+ecO|BGF>{AlQDY!dbFyDz!CT)TLGr z#TFF+4qFeRvwT>OL`xh9_jj^ralWO-oiC3vy)g^xH%f=dZaaS)!sH$Y(lxG2)_CJ| z)K(+0wCbcC{4a*L-@4Gh+1eE{G<8cp1A|>mNhx4kOr0P==Tx60tT9Vjiwl}>@czaS z0(MXj#K_vaQ)Rb=1S4naPoP$?QiY%N%iuy~onZbBY|j?9_pYGE<;OH1K8wvn>`wLK zIq^U!1g0^#IzwK=YQ&f_-pb>Y^_>h*GguFqs*92mdCDW0!hY*yq-LLFT?WKNSeJGT zWi1K6_CyFhB!+Bt)^g9$j61MPNS5y$u!SFhHuxQzmdCgI8rYY=Rf z50l3rHoBiC9pc!`D?SaOTWHUjJ0+<71$uQp3t`$}diHsV12SdcRlZ#7UXV|JxJ{6d zUuoJZdqxgW2Zy>_|Z(R@rZ6aLqB}rRFbaiR~J!nMDj?k&cG( z*>oxi*IH^=suI&Tq(DfMPy%k?yhquI0%C#!{Il4t!aRb*^KRUxf6w&%)#k%>B0X(AU9VJW@OUrLDMfBBcQo7#z*$nyuXZRjQXi{p$D z3<)+itDb10=S4<|yHWdq7RsXa1cvJZYb}eW>dQ;1aqG8m$qa^d&V8?$qBI2e<*Rlp zBZwDAKjcYXmz$os04K+Bnq+!m^k?R<9S=Hnq*&jhQCMgXUH1o8w&E6+X_-QJ5d{6i zWtcJVms+BQZaZkhX>R*z4Kr>5bzRb(v%`@4X?ZL;Xn6%pIctPohia3lL?ekI#2?%d z?Zb}5oPq1c4|+jLCOy7o7PT+uBO$!-V>R1Pcl{WVt-k~uq=x0ZQa_BvYrxiX#u&jY0^ z^epa&+GYz{eWRo z9k33U2+>R}+LTXwZM z`f{x_D2Ii#xzd!VL3^&$8S$SL7kH^neyFbx8lXL~8*RX5zE3;zv=eC}eV)(c#*||< z%G6+1SCwwiArz@qd)r1-WL6Gz9Bqn)Pnr^~zVhUb%E49)WQNI3=wJi886>;(8nPmz z9E1%0bs=-rfR<&`7mz>a%J-FI6-+q1yv()7tzfLb56c@X z_{{6)epJbfy}6q-3ybW59=nLiLK*2=p7&gyEg31uyiyr^*Mf9+3`G0^_@+IW=pC|q z^=lS;M6V_MymoZCy>w7H%-SBrbp)@l&@8uBuW$_(jTI}Em)jC9%Lk$F z743e?IL*2VfnHVT{kWRX)wnAJLn1=*Obd25Ltn1@9*X<>v8!EO)|(^R;%rjUx?-T# z3toOiJtYkx!f8zN;EcCgxOGD%wI~gn@N@@gprfALEVnTs>?^qyzbPN<_s2L8OhqE1 zmsJoHh`+sWBZJ-*A?1p+1S(RCF^En;M{sml3z(eRn}uL0r+1cPzh?i&R0>`iuJ{x( z*BIBciB}0R8Pop0-{6!gyT9FtbBz;Czl$Ok5Slj&rI1QJU@WQeQ{vWgr!zk@$Y-epM>|kkU zPQ(PmsOn~JuR+Aj^*=fBf3HQYj9paC-H50dMHt2YjSUrxH*&9Q?w?I!Ie0^Y7;-Hmhse_^IXt5aa3!$d zU>pMlA_O4*JI2C;GZ+hI@D7m(VTAM*1R@BjsIZdMjM(N<^fylg28{IGtN_W_Mv$Xm ziH<ayR&;6T>Qpk{2#>;ZwxK-@xax-6h+Y($5>mqK`f zk+d?_QPyWwQqJ|?`Jh3ddk8Mh%|H4tfo=05nZQ3vpe=ayeGO8W;2XFtaI53sge~513 z-q7Fp0D`7SDQM`BwSE9G1Tx4MZ~6A_4Ui_%S?`1vus$G0T}fFPXbrRmzmEvAFcfr9kf22I4IaREdN^OcH5M1t3Vg`kWzD@(;gn1UGR)CK#VD^qbZK1%F_XFQV zegmd4`pT-P7)eVeo&t4F7&)mSStVUnmaB;9S6Tf&j)R?-}p6wcmDqu z6I1gDf9nCnecziVj(|uMNM5s*6F?9i5XW2r98$XwKt1^Nwe26@enUu{q^W@2(2=I9O)F>3mQ5J_a}gb<0O@mi3` z>>v>8LBzu9wKEhDCu;T=0Lo2-5GZpmgLk|o}|i-4PgD6pJWh~4@6`FD8c$)uxEt$z*&%M zgZG5MNq7!{I7ljwb-EkNQ}f3KCPErR-~Se$J}TIOtuk^+h*X^WN|=(Qf!aa!q@EvhG&0&bG%(Lgy3ZXMtT`7fS`_`SwsyC z{!v{}4btBzIya2!TzStwOD7z7&n@ zMZe$*xqZm`S9jbA2f6P<+~1QI3X9$80ly^g@gz|bVz+H)$>oX>4oWK#tnMcH74+EZ zHSMyTSAG+rIjRxtRF=c4CQ>&GiJn$$Qr5g5EHbU@4Mh-ex?p4&-CNG%HLD(&EW$0Y zd{^X(=vl3XNZImkHc_L7m)e+WwHOz`xI7<8Y@jbou&boo3^Lw|BPqh! zQC($C`hwUe*tNrzfelJ{*TbC)R=byCK{{E&y;hxgm`85Bc?2FDr+ZQRD5iaC#|4=I zeOW|^3M!Ihhtt@ZFxOj9{pyiDUd>gR`Hb|zK0k4BAxd^+ioR56LA{ez^2aXbX9TG^ zSH|7AgeP$zcQazRb7Gc!eF<^r%LnlwXct){3h$aco&yHh*k}FLU=GTwL2=d(oe%Mv z^jC(wRk5S3KPJ1yjU63iDVvMHrWDgcx z^@LNJLuV+~*j>XCLZJmh@oHxT91zfLbY=C`V&$un&Brrv{;}^~L69g>XWc(E6F;gu zL)}G7+}$t0LmSVDu5c<%_Hz|XcFyNAyy=mzgKHiJd$xBUVl;iRXRvGELkqkLDbuW> z578uYp*_GplO_nvU6HEJIF6ZK&|gp??{guvCa6pB)`P3AOi4C}X6^oEC=w?Z;ncYX~+s=$4bfo}Wvqp~Y&dD|Np^K=zS$6C=y?R%lf5OvW*kO46uJ z%`;lj8Tow2F=K>xpPoVfM%nHSp@UMxxMpU7y_Q{Y=qXI`e0cEqZaCU82-&wUoq^UZoT%v2g&Qguy_(DZdQxQRD3 zmcZ~!pfp@VUMXBaw+G2;ViX+QkA*pq&~oqOss%G~)r%Jv%X=4Q&f==Z{vaCG+W3!E z_}CA3_k4)yL=Nhh>K4HU5_F*0rcO2A>HgfAqnp|F$Tc&h$-Wz0FHf^N7a@@NVHOXh z`1TCkJ0q<&y&dPvi~l)0Bs;kCB(_1yfA8llKWrzadk6wWL_9Z?Dy#EvhbmGMKF`R3 zpzyV^G(wJ3giCA0ePx9q+@?!q!xfj=EZ zl%;+8a+aV7{-)gO@Xx!=3RK8B zqHac9U8|3w0`8LCWb6t!1Cw`qZ-uSblpsJ|GJ`{L!pe%9vg|?KQ%JfQvfU5wC{%XX z-X8a3-4Sw$(vllWto=|5@j=f*-vU+-H0T6%_i}tyA1}~$V(G#HtTRtiZMD>hs)ef3 zyL)e%TkvW)o{2JI9$X4s(FFeVi9f$m;4P%x*uKc+G)5EQiZWi$1v9@<%@o|p0ZYvifS7?YS6O5hTcRPX&S z=fUo-=qWK7-{c>QN2!a$c%#o?hZrBEX9E;N(4dlnhc#|Vmh{N|w$)l*Y#A!N{|2JHyvvnf`20OX1(?nG_|8gp!(@MCfGv1sD0IVZK@Jo z2c&Fj(OLFyU(Nv~$c1_<|W&AJfZodCMnA8=#bubAv6`Gp+k3v7Vu^BnqSg zVbU5)1_Lp7jBV&0xt8ZcY*hS)x(#XvL?^KVfyG#s_H06X8JYM*ytGT_^tjvB3-vH* zgjqg}DlyYM=gg!A+K?5<^X`1qb{KM^ocJD8mhJYVE5#N*w| z$$9b0aoBw#Hb)?*+8&=<2*NX9#?q}6dM2bP?;CHQT*lQ_-H+qb*3zI}G8+f6G+`{C zA4e9U?q9Yu?q zx$)ecS4_eIifKoa7qdVsNd>660WkW0+-sPlUq2p^yEmfMM>vh5h8A^r*cz`8l9P7cD zqYFIB`f&xkq}XDhZ_X0dr1Aanemtfc6B@HVuJ-ekvSOV1q6UC`EG+a)@ zCq)nq@utD-8(5h#v+10z!U>T$AWN}M?3qENZmgPcBE^pPnk6s0T?acea>9cTbs0YD zoFpKbi-HHa6hX7-;xqd6KwPb8cf}53AvSFP+ey^>TBhVzHssm$JgM=?jbr*<_zf+; zB+H!6UTzVF)NC~5=RM94C8MD1;~Se@bbItKv7Eo@RR2_go3jF~r55v}`z3@(bVte+ z|HG7Sgn#xC|ENkhEDy>4%F~rsSe52a`5-zCs;&u3>EEt_2NuP}9Rtpyv5&mT`C{pc+RS8Cnn%VPV2J!0ypU+eSSyWv zbzn_K`;1d52Q$$y$hu^~hbOW%?B39vJL3I+YARto$O%qJ^888n5oC8I#3q#Su-%5X zk(mkn)FJ+s&Z!ZU`4k&9r}ZEe&7*WN@U35?lqOTp^RJ6#Pi}ctIlLQ@@>hRWh}l4a zdp`UahjyBIJ~V!e4Qu|5Q5cWl@803`_!j7KsRcWf*Ye{2G*UG9l>?{MO9`Cw;(_NR zvg54ges>qHQVaB!cd>D>@nb%8GD{`u%y}^Eh-v7NxQ~RsKH#My;yeWJ_TJ>Pn_|tl zVkC#7x_QoD%CJ+5w@ch$4SjurdV^rMxEn#tlxSX0-yR0vPSpD+jUr%Gk0#spygAm9 zW>4yjk$wV$%JUxufuD~W`dDfNhedRezn_Wg|RP#4M}0*t9(~7@#(o{WR2b68gKm2s+wpjoTBi+ zZc5aWWcxwOE>}}`m;aBJpUuejAkQp=6D6=f2!d|qxP=v&#i;s#crlx;@5L4|`(x%8 z`6OO$SPo<8neYk&Qn|M%2SJ#HP@Xt}cnXT_rlCt?K2s4z`!Gkn-2Go> zj5FnD8;Njkq4jphf?sDEa!nY#OEgeHgLi5SuW~cheGnMW9b#%FcRn2CYjK#Cpn}$2 zn6#@u9CiE&afzDS0puZldJBOGR`^yVM+&U1IKK7z@lV@@od= zr6(DAWV$*bIs@m!pYcX8SBqSL?8Ty);PVXC(S3_Hnp*x0?{+KiY6^z=uNd&%;0mHH z6|^^17>x7E`-ZEI1q49EIqz-G2s)9q30?ls-??HhAX z0_*H{7!(*=zJ4ufY*NfZFAqfA``NPv$NcT@U$lM*SWs>>pBpcl;w*=LKf`;y-1`Agumf2i*wN4XtK3i#w&8?hYK;BGe=)|DC&biQ1lmv@K1P{YC2Z zFi6s0Jh^=t?EVr~oiZId0{7eKz&eK3$N+bjf;^xbMdkMM7sd!5)_xxoP1tnBvplk- zX0`2vxuZQM#zN?R3ax(P2It>iHimOf7`pWmu~-|DO&zY&*9S2bKfh^27Tm@o&OIu! zYTK+HS(P|I*V~YdL}`__6IPBwC&@i@hwy6gm*uUh!9r#iTGFd5dhE3FZ*-R^1o^Wa zVfW;*F0?hgFgziQA0oTlKxKcb7O5_(Np_lmoR1B>L~E|pEE5`f!D{x806|ja=%-p*=k6NW1Q`t?&Q)N0~`)ru+wmewLWC7Vd$)!&Hml0tGKQU zhF%I&tHr)L2Yoe$m{Wa^R+u%Z3EBz%^vbq!G&Qts<6-c6gbzYX2qq2VHzOMR0@~)E zUCR)!ZC1-JiO4_UY#)r9VX{Gct(8RX$nc$xGhdxn?QYF-Haa!L@Ms=Sjq!4W zi%roVF%+sFFvcwilMoPOO5#&v_+c|_+F2w3rEpre`x`s%0*Ar8k9@OKQ>ZG{%&Tlg zyxbocYD`sus1$l~y_t_Kpu_)mN4m}gVWk#r(1LrQW=`PZG+VjaQW{1!ty1^zCo zrpOm+kV#7NAaYE&ZzW2(%54&m-|SviK272TTC;q?(T8DyB3}G4m!l8vTWl z8rm01Sa^<|u~Or*1IphT^BbPP&vLV+7(#D!j{*`RSLpN_>ASZUuAs?_gx2 z6~6=a;D>UHb`jYeEI-k4{{t_O51H(=gw=uhJ2_@CZZj)oTET0LkAc9YO$)@}Usg$r z^shNeEi6x4PbQ4#^M)=acpY?o9<->G*^E0fO@|(XY3m~POWq!tJ3#+lkxr|L%-#5{%=ScaD97-!4uKI#;IBqtv;A zG!a$W!7eF2JR7frLRJM}FqR1NdOW%Mp{5n~`3qXYF2cO;HZQ059@jbtpHKFSk<@u; zDA1G@iHL!A5wZk8A5?T9kXR1VkdJDbv&F&u3q;V`ue~}0W##A-IG#s=2p{S75Dv75 z$Uq)Y9LtE^CP}M2&MG@z`D^^6#>PzAiT#YGk-(le295gBu(m+3i`axz$sH*|!1k~8 zs3Y7Vdm>5q6HoB#tA|dldM@y+jD=jnTe;oJnCLw7bHv>g1#g~b19Vcc3zYIZ zdoZnkEQm{@fDfZRBbOcV2`ODVW-a?+8Z5!HagqwfMuh2Jlt&4*>q@+9`hkS*Ge#^+ zl&AkzK*y~n3wvK~ZggC>7W4VhI*W^NAQ%*hy{{@xJFzasDt9nzm+?lbCL4@r2ddvl z*f_YjLXUhV#?v=fmFTGjcX539j>Y_VvB68W-Bf;S2|b<9!*SiJ+(shzCFs|svFShO z_%uDu9*gioyL}+(ob#rp>vomlorX)PWqX~CMAyid%dvsdyUmuTTvwCh#A+NIt&BHVLUE)i@HN#q6B+v%|J$1P+m zvoxjr+IaWYR4TK)??ob+?)^i%!3HLL4t|`4yeKLH_oZ{_lTMGp9;`erOeKv8*2S5v zmJ_13>Y|((x$1ubJwU?0c5LzAOZ<$82j~1ZyTVevq&XDW=I<03r~2XK zYm-J!YHksOTVL4lV7AldV{o=*k@&@|PD<$}(SEfu|2X8o(?)9bK)4a^$*6>F70C0v zG45XveC_+;lPa1Eq1G(`-SDaKr&G_|M{jk9U+~Hfl6qrr$2Jl5*q{0*)HW2z?;)Z-J#Cy!b${c zHNnXIF~(s%G-97*Z^7}Oo{KxoID!_E@A7Yt5|h{#j@+)tq}7v3*PB)l>!BIbAK3zjdAjEZrLE23KO!Mi&|J2pKDJdEzQh4X2Wef62{= z{b*#}4Iei6IEYxf(XUSTV^Z&Ceedh_4<1Zls#i^*n%yiRKQB~t&(f@K`+#W zSNyCVZ{V^2+fIP<7%rh_3qXg_Zz~vVygrJ~@k> zJ|drKTP$VD_SX^x%FgUk6W@1^(jSs;bbbaM`_Rvk_f|e1F3jsyr~2O8U95+t#3ti- zrpmyAXL+DkOrp@gp1~H>*1^GcR7@Wb@KrNq$j&gp2wQqdXipBA2b86~4VijxLxxyh zzgQS1xbmN;>cXJ#l6XCT(#JTRFWYDZHMw=rboiBmBN#4ELJND}r72#LU#*!(=oLnF zyp8QEg5w*lO8yXdJ-lvBL7HcLAtl3(@lgB3Cz{|UZrkqSW}|0u#{QUM1jp^q-JOC? z^9rju$G!RShVC)Ri)4_|Vh6v=0ZNzLlaBYyM(HqZbSb8Psmv^a<7k?gkrP&^2s^vW zae1Z%8&BVCdjBm|r%+$zud^K?TdFF?S?_4f@TgBR*9Zr&T+f#o4|nbwK!AXzLJ4{@ ztD}W7bCnvg7^soVoLnZ)SZ;)I7o(fsW@cI5koxA^qj}4+6jGT+gje&PL@j^xd$nq^ z4uj=DY1c&MUR3NrA)mtFa+q_(Px^_KwilT1nESLO2+`013Vu17F(MX0raGi*K$u_+@u-`&Ax^DZ-py9PAG@oXHrMSZ+?9H~qefNpb_B^I1VK zL1zM*uN7lJh1H{&U?C<`&BtF3C{2= zbAZFPfYmMg(gkgTE|2xpT40rve((?z=7PTYoOR=C^GFlt8f4cKKXPf94jLPSO)aOZ zejz6>dXd1?8S~Y!s`JL~j@G6t7m9X&gBq61>GstMr_ZF#GZF*&{G(vikYf2n*b=ES z75w@2czE!<5z=$^O>H|X_A7(x1@b#pcvpmnnyR`iHlR7mAjaAzh`2vIhgz)PHXcbh zUnk%eyOR-HE8H!lF#1KQh(e@_=x99#OW* z3!wC-z>GVCHxfy8JDC#+| z^23WC9mlLMf2ddXs;7HmMLnFOv1lgSpk||taPX&c&wHV<)R~{#^X>OXoN3!9*-Cy> zflU(~c%#o|u6xq5V0pihsZg})fI4W+cbMcl(cyQAKZPaTh767U*Kkp1c*?;vm9b1=>jF!>>f| zrsJr~*H7#T78pLC^LUqM>K5dx?jd6Beh#Fa*1#H<9kUbiM;AN#iIs49uk?zS4-?1d z6zK++Vn|2hy{_?U%_IN9gnDUCNng1f?aKG)_lj$Cfx3!#6rRHH1r6xoJTApW+qEoq z?zumWzlNe^4RK7)A)oMw{1RpZdLnEw9NpKFATA`)FFR5sR5#3>IPvKM<58S3mAIy3 z2TskDK4t@@Ww|r&kWf%U=J6xkJ>1`?uy1^e%1Y)5?+nNi#~Txi9<%MZqk> z;&_Xy-^7+g#eL?af_&K{1>4Vm{nETUI;GtSMdc-yz(vr%j9x7s7n6KdO>l5)`sgsP zFuX$sG97KW+Tg6#;tDN~I~gRZ2$g}I5khEG>@^C_Pu&TZg7>)@!;LjP zh|T>riDy3UsJi21=^3CpyqmHT>3#3KfjsJrXH9ns;a5J zD&l;8L`#vBqxl_!Z4-_IQjrH4(d=Rnbt?l+r=Xe}mAZ3V3%E(h~u zb~BTgg*;2&iPtNA*1@a$AZ7N99~~qIL+`z|)#Pm4jA`QG136phsHT+IPeWbaE|9vN zOMuhoG6e~wUp|gDNO&DmOIB;l54_MTFOg{|0+5kqX`S5m6vW>R9vB?`y(n_5zJohu)f04X=zGRz>wLyr(DIRIztg9@1|eq$%NN6!sm_zhPjA&Cy_F0 zdm<~f{D#n`YtZ=v|3>Gi_W%_x@+aZY@i2f1T@3(6p(V(A_~8=8ePMiR^;5I1ND}Qv z`N`|^sD!4?G(OPxkp~Gc6=#>*l`lcWm~>BNc0V0p*@)5Pr17T{YUs@ii9-)K@>eq> z;C^Y0zQzodS-#ie)0917+UwI9QyGom0>aPoqput zCx5o~YgSE|E1dewSAiqd-tdctK5NMYnj@>TvK*G=+pEBXuVW*fL!>e0Y~sp8@n0(= zFImIM4O$c87&1a~ji%r$)CxFop3En}2iH3tw+iaCu|)nOt{%2UER}t5P}I!^>BWap z22ye16(1DYaBr)EhA(7guk3D_3`LfdTrUAs10rX9&8?ICQA!;$5~VN1Vdy9mbih7TdUvE*2z@A352=s$W+D+{zVwbQk##e(C5S|g2owzB z>@C3#W|VykRAkSX&N=Y`Kd!yi{BUR1p;P-8s;nPu88XlN0j^T$B&rrRsZiZPn!*$k zz`92N6~eZ$^R&%*IVC}Lt`LA^I{3OPyHMwNoP*~1BwgkKJNMLB3Y&)dEAbwD{n|$? zT)E>Hd+r0794n6;J?ggL8tU)xMtLJey}#h_X&b4~0;(U*S%!9cgYDg`$5Lg`l9_k) zb9z=R0)MzJ=Dm2cV~+LUqoi+CDu2ABatEB1_c8knhv(0tM@i7UU6A9pWB00y*Q|~H z;Fa>}Q;V~K9D|}q(7DS9im5%aL2WaQywXiLPF6@CYLESnw8m##(`Oaa%ZMzQ^i-45 zdL=1nCnCdmB&DXz3*8cZXhzn`Uu+8kdr=a(E-?hrjJ_0=;xMWrU&)7-V19b&c3An< zrGF@GO>yq-N!+RjtYUPnuf-S>bNIXM?kZv&bCl+J7eEj06E&$BqjJ?EtMhh{sgV+d z<|qI`H#$E}Lhv_5YYxw;-7vR32F~g1D44%>YX}I7u%1jmYz%|kDW6H3s#1@2kBm-R z>_y6t_%>ig;>yulg*aZDu_M=nDAK%l!CgqXO7pWA{mIF}Tf(X0coh`W>dW+T^;_EB+DcyV0|?powQP|7%QW z&Y+wx`2NpN0Mu_H`X0|JXG3bX47@7NHgMCRy;=)^9U=?tpTf zf+2lUJX=ZNtV&(1Y%@Re*rk2kS^n6JRX*ZG!dN+k{}}E2RG9Quq!X43b!MW}ivRHk z(pHuT!_P^~vQd|zX;BsGB|R_~&YXzD8DGX~+Jsd~QEd2$%JPu{*wHU27*!cQu{syo z@s}(&CVp0Lc=qugp>VPw5Kys(RTid02H13%Fy_sMSg&DkmB1 z0%)OmFylPWGTLK)G!^%dM^B9jsCO+;@29oUn64oIMpz&XzAc+CVF;lTa-4IeZtxA7 zKqH{As@(8BbFSP?sQF^5EL8If@FJ~!J55)h^d(b%E#JcoaWMA$s$I@h=1%aH3QPkY z3UlO}n2jO4K%&(xyv;*&VEUJVdR-*}C)l(hElw?!BQe(6{X|>!cREkWpU09TF%-xC`F-WUFgDMx z3&Z!|vs8O^ECL|tGX5%wH==UJ=!g+W6Y76M)w?hW=vGZ~c5(=@hq(zLfOnYZk(G5oVN#`W#ibfh z^~hvz-Ia5#W_Hvuwedq~ikixKY@9(KR7{UmwOK7GZc(O#^wJO0*E9vXc|hariuC*> zMmaokOnL;RRs0aMKuzfw(^NePN22<I%ht@?$wR+3C5+3-Voo%bY#$wuyy57x%f!o(&%-f*_E2rr(({=8af zEq_^M^A_O5NMfa{Og?0?jI`^vRW)JVjl0jAxK?z399i30mFA9c%3DwPMaY0{M{D zouDbnjan0l-E|7>g)pKPCJe*u;i+=sq!SR7z*OIYNu2dZZ25ox52wFyv*@jiT$A1(2#0}TfrkJ+AD`(>S^G~c@9 zW-6kgh+(-Sjh(eNYWt@3-BglMsqbjHG+Fg9mIgRQx zCaIfB3V`=s>6J<|s^z>y&wU5O^W)YA5k$N^wR<_HRU?h^PnOqVS#*}+69TiC&ILjn~8;`;=rqIu6$ z(1XmYRNU3}wBAseQ~9_!jcYu*mccP5W6`Mii4*rwGoPTH``*)At}GnOHlBfU2@=bO zh<@eEIw|yUyg4SOK(08bPQI1wX1d&#l5DHZCN>U!Co3>;W=U+Q@Qhh>Ggk>DtrY3H>OULqs7cZ3L1s6?c=)(p)rZ*ko=C&pZwa~mpikd|cU^)Vq zt(=E(m?^XO^cf?fC2exZzE6wdmuGUxZz!B0$ZgL_(m0H#DE%f85_0@8C^dKyrRlR; zfM8`q4Y!)RR~%9Js~>1@nJQR($4bf9@-@Ea*o_}?zk2bn~?BU;gl5{;l!5SO;{;u1;SSqk#+

  • +
  • +

    Henrik John. Author. +

    +
  • @@ -149,7 +153,7 @@

    Authors

    diff --git a/docs/reference/fitDeepNNTorch.html b/docs/reference/fitDeepNNTorch.html index 1ac8298..fa8c2e3 100644 --- a/docs/reference/fitDeepNNTorch.html +++ b/docs/reference/fitDeepNNTorch.html @@ -48,7 +48,7 @@