diff --git a/R/sits_classify.R b/R/sits_classify.R index 725aa949d..cbf177821 100644 --- a/R/sits_classify.R +++ b/R/sits_classify.R @@ -39,6 +39,7 @@ #' @param multicores Number of cores to be used for classification #' (integer, min = 1, max = 2048). #' @param gpu_memory Memory available in GPU in GB (default = 4) +#' @param batch_size Batch size for GPU classification. #' @param n_sam_pol Number of time series per segment to be classified #' (integer, min = 10, max = 50). #' @param output_dir Valid directory for output file. @@ -79,7 +80,11 @@ #' Use an sf object or a shapefile to define it. #' #' When using a GPU for deep learning, \code{gpu_memory} indicates the -#' memory of available in the graphics card. +#' memory of available in the graphics card. The parameter \code{batch_size} +#' defines the size of the matrix (measured in number of rows) +#' which is sent to the GPU for classification. Users can test +#' different sizes to best fit their GPU architecture. +#' #' It is not possible to have an exact idea of the size of Deep Learning #' models in GPU memory, as the complexity of the model and factors #' such as CUDA Context increase the size of the model in memory. diff --git a/R/sits_lighttae.R b/R/sits_lighttae.R index 1760db0ad..faa3938d2 100644 --- a/R/sits_lighttae.R +++ b/R/sits_lighttae.R @@ -310,14 +310,14 @@ sits_lighttae <- function(samples = NULL, values <- array( data = as.matrix(values), dim = c(n_samples, n_times, n_bands) ) - # Get batch size - batch_size <- sits_env[["batch_size"]] # if CUDA is available and gpu memory is defined, transform values # to torch dataloader - if (.torch_has_cuda() && .has(gpu_memory)) { + if (.torch_has_cuda()) { + # Get batch size + batch_size <- sits_env[["batch_size"]] # transfor the input array to a dataset values <- .as_dataset(values) - # To the data set to a torcj transform in a dataloader to use the batch size + # To the data set to a torch transform in a dataloader to use the batch size values <- torch::dataloader(values, batch_size = batch_size) # Do GPU classification with dataloader values <- .try( diff --git a/R/sits_mlp.R b/R/sits_mlp.R index 945643a43..50df44452 100644 --- a/R/sits_mlp.R +++ b/R/sits_mlp.R @@ -250,13 +250,11 @@ sits_mlp <- function(samples = NULL, values <- .pred_normalize(pred = values, stats = ml_stats) # Transform input into matrix values <- as.matrix(values) - # Get batch size - batch_size <- sits_env[["batch_size"]] # if CUDA is available and gpu memory is defined, transform values # to torch dataloader - if (.torch_has_cuda() && .has(gpu_memory)) { - # set the batch size according to the GPU memory - b_size <- 2^gpu_memory + if (.torch_has_cuda()) { + # Get batch size + batch_size <- sits_env[["batch_size"]] # transfor the input array to a dataset values <- .as_dataset(values) # To the data set to a torcj transform in a dataloader to use the batch size diff --git a/R/sits_tae.R b/R/sits_tae.R index f68286a5b..829f05383 100644 --- a/R/sits_tae.R +++ b/R/sits_tae.R @@ -277,12 +277,12 @@ sits_tae <- function(samples = NULL, values <- array( data = as.matrix(values), dim = c(n_samples, n_times, n_bands) ) - # Get batch size - batch_size <- sits_env[["batch_size"]] # if CUDA is available and gpu memory is defined, transform values # to torch dataloader - if (.torch_has_cuda() && .has(gpu_memory)) { - # transfor the input array to a dataset + if (.torch_has_cuda()) { + # Get batch size + batch_size <- sits_env[["batch_size"]] + # transform the input array to a dataset values <- .as_dataset(values) # To the data set to a torcj transform in a dataloader to use the batch size values <- torch::dataloader(values, batch_size = batch_size) diff --git a/R/sits_tempcnn.R b/R/sits_tempcnn.R index cc5b03dd3..4ba2c3751 100644 --- a/R/sits_tempcnn.R +++ b/R/sits_tempcnn.R @@ -323,12 +323,12 @@ sits_tempcnn <- function(samples = NULL, values <- array( data = as.matrix(values), dim = c(n_samples, n_times, n_bands) ) - # Get batch size - batch_size <- sits_env[["batch_size"]] # if CUDA is available and gpu memory is defined, transform values # to torch dataloader if (.torch_has_cuda()) { - # transfor the input array to a dataset + # Get batch size + batch_size <- sits_env[["batch_size"]] + # transform the input array to a dataset values <- .as_dataset(values) # To the data set to a torch transform in a dataloader to use the batch size values <- torch::dataloader(values, batch_size = batch_size)