|
9 | 9 | #' hyperparameters for deep learning models. |
10 | 10 | #' |
11 | 11 | #' @note |
12 | | -#' Machine learning models use stochastic gradient descent (SGD) techniques to |
13 | | -#' find optimal solutions. To perform SGD, models use optimization |
14 | | -#' algorithms which have hyperparameters that have to be adjusted |
15 | | -#' to achieve best performance for each application. |
16 | | -# |
| 12 | +#' |
| 13 | +#' Machine learning algorithms have hyperparameters that control |
| 14 | +#' the algorithm's behaviour. This function allows users to test |
| 15 | +#' different combinations of hyperparameters for a given sample set, |
| 16 | +#' thus selecting a set of values which fits the training data. |
| 17 | +#' The \code{sits_tuning} function can be used with both traditional |
| 18 | +#' machine learning methods (e.g., random forests) as weel as |
| 19 | +#' deep learning ones. |
| 20 | +#' |
17 | 21 | #' Instead of performing an exhaustive test of all parameter combinations, |
18 | 22 | #' \code{sits_tuning} selects them randomly. |
19 | 23 | #' Validation is done using an independent set |
|
22 | 26 | #' parameter should be passed by calling |
23 | 27 | #' \code{\link[sits]{sits_tuning_hparams}}. |
24 | 28 | #' |
| 29 | +#' Deep learning models use stochastic gradient descent (SGD) techniques to |
| 30 | +#' find optimal solutions. To perform SGD, models use optimization |
| 31 | +#' algorithms which have hyperparameters that have to be adjusted |
| 32 | +#' to achieve best performance for each application. |
| 33 | +#' |
25 | 34 | #' When using a GPU for deep learning, \code{gpu_memory} indicates the |
26 | 35 | #' memory of the graphics card which is available for processing. |
27 | 36 | #' The parameter \code{batch_size} defines the size of the matrix |
|
69 | 78 | #' |
70 | 79 | #' @examples |
71 | 80 | #' if (sits_run_examples()) { |
72 | | -#' # find best learning rate parameters for TempCNN |
| 81 | +#' # find best learning rate for TempCNN |
73 | 82 | #' tuned <- sits_tuning( |
74 | 83 | #' samples_modis_ndvi, |
75 | 84 | #' ml_method = sits_tempcnn(), |
|
89 | 98 | #' accuracy <- tuned$accuracy[[1]] |
90 | 99 | #' kappa <- tuned$kappa[[1]] |
91 | 100 | #' best_lr <- tuned$opt_hparams[[1]]$lr |
| 101 | +#'. |
| 102 | +#' # find best number of trees for random foresr |
| 103 | +#' rf_tuned <- sits_tuning( |
| 104 | +#' samples_modis_ndvi, |
| 105 | +#' ml_method = sits_rfor(), |
| 106 | +#' params = sits_tuning_hparams( |
| 107 | +#' num_trees = choice(100, 200, 300) |
| 108 | +#' ), |
| 109 | +#' trials = 10, |
| 110 | +#' multicores = 2, |
| 111 | +#' progress = FALSE |
| 112 | +#' ) |
| 113 | +#' # obtain best accuracy, kappa and best_lr |
| 114 | +#' rf_accuracy <- rf_tuned$accuracy[[1]] |
| 115 | +#' rf_kappa <- rf_tuned$kappa[[1]] |
| 116 | +#' rf_best_num_trees <- rf_tuned$num_trees |
92 | 117 | #' } |
93 | 118 | #' |
94 | 119 | #' @export |
@@ -130,7 +155,11 @@ sits_tuning <- function(samples, |
130 | 155 | .check_that(!"samples" %in% names(params), |
131 | 156 | msg = .conf("messages", "sits_tuning_samples") |
132 | 157 | ) |
| 158 | + # get the parameters with defaults |
133 | 159 | params_default <- formals(ml_function) |
| 160 | + # remove dots from parameters |
| 161 | + params_default <- params_default[names(params_default) != "..."] |
| 162 | + # check parameters |
134 | 163 | .check_chr_within( |
135 | 164 | x = names(params), |
136 | 165 | within = names(params_default) |
|
0 commit comments