Skip to content

Commit

Permalink
adjustments in CUDA processing for DL models
Browse files Browse the repository at this point in the history
  • Loading branch information
gilbertocamara committed Jan 30, 2025
1 parent 8e127ad commit a7d847f
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 17 deletions.
7 changes: 6 additions & 1 deletion R/sits_classify.R
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
#' @param multicores Number of cores to be used for classification
#' (integer, min = 1, max = 2048).
#' @param gpu_memory Memory available in GPU in GB (default = 4)
#' @param batch_size Batch size for GPU classification.
#' @param n_sam_pol Number of time series per segment to be classified
#' (integer, min = 10, max = 50).
#' @param output_dir Valid directory for output file.
Expand Down Expand Up @@ -79,7 +80,11 @@
#' Use an sf object or a shapefile to define it.
#'
#' When using a GPU for deep learning, \code{gpu_memory} indicates the
#' memory of available in the graphics card.
#' memory of available in the graphics card. The parameter \code{batch_size}
#' defines the size of the matrix (measured in number of rows)
#' which is sent to the GPU for classification. Users can test
#' different sizes to best fit their GPU architecture.
#'
#' It is not possible to have an exact idea of the size of Deep Learning
#' models in GPU memory, as the complexity of the model and factors
#' such as CUDA Context increase the size of the model in memory.
Expand Down
8 changes: 4 additions & 4 deletions R/sits_lighttae.R
Original file line number Diff line number Diff line change
Expand Up @@ -310,14 +310,14 @@ sits_lighttae <- function(samples = NULL,
values <- array(
data = as.matrix(values), dim = c(n_samples, n_times, n_bands)
)
# Get batch size
batch_size <- sits_env[["batch_size"]]
# if CUDA is available and gpu memory is defined, transform values
# to torch dataloader
if (.torch_has_cuda() && .has(gpu_memory)) {
if (.torch_has_cuda()) {
# Get batch size
batch_size <- sits_env[["batch_size"]]
# transfor the input array to a dataset
values <- .as_dataset(values)
# To the data set to a torcj transform in a dataloader to use the batch size
# To the data set to a torch transform in a dataloader to use the batch size
values <- torch::dataloader(values, batch_size = batch_size)
# Do GPU classification with dataloader
values <- .try(
Expand Down
8 changes: 3 additions & 5 deletions R/sits_mlp.R
Original file line number Diff line number Diff line change
Expand Up @@ -250,13 +250,11 @@ sits_mlp <- function(samples = NULL,
values <- .pred_normalize(pred = values, stats = ml_stats)
# Transform input into matrix
values <- as.matrix(values)
# Get batch size
batch_size <- sits_env[["batch_size"]]
# if CUDA is available and gpu memory is defined, transform values
# to torch dataloader
if (.torch_has_cuda() && .has(gpu_memory)) {
# set the batch size according to the GPU memory
b_size <- 2^gpu_memory
if (.torch_has_cuda()) {
# Get batch size
batch_size <- sits_env[["batch_size"]]
# transfor the input array to a dataset
values <- .as_dataset(values)
# To the data set to a torcj transform in a dataloader to use the batch size
Expand Down
8 changes: 4 additions & 4 deletions R/sits_tae.R
Original file line number Diff line number Diff line change
Expand Up @@ -277,12 +277,12 @@ sits_tae <- function(samples = NULL,
values <- array(
data = as.matrix(values), dim = c(n_samples, n_times, n_bands)
)
# Get batch size
batch_size <- sits_env[["batch_size"]]
# if CUDA is available and gpu memory is defined, transform values
# to torch dataloader
if (.torch_has_cuda() && .has(gpu_memory)) {
# transfor the input array to a dataset
if (.torch_has_cuda()) {
# Get batch size
batch_size <- sits_env[["batch_size"]]
# transform the input array to a dataset
values <- .as_dataset(values)
# To the data set to a torcj transform in a dataloader to use the batch size
values <- torch::dataloader(values, batch_size = batch_size)
Expand Down
6 changes: 3 additions & 3 deletions R/sits_tempcnn.R
Original file line number Diff line number Diff line change
Expand Up @@ -323,12 +323,12 @@ sits_tempcnn <- function(samples = NULL,
values <- array(
data = as.matrix(values), dim = c(n_samples, n_times, n_bands)
)
# Get batch size
batch_size <- sits_env[["batch_size"]]
# if CUDA is available and gpu memory is defined, transform values
# to torch dataloader
if (.torch_has_cuda()) {
# transfor the input array to a dataset
# Get batch size
batch_size <- sits_env[["batch_size"]]
# transform the input array to a dataset
values <- .as_dataset(values)
# To the data set to a torch transform in a dataloader to use the batch size
values <- torch::dataloader(values, batch_size = batch_size)
Expand Down

0 comments on commit a7d847f

Please sign in to comment.