|
| 1 | +""" |
| 2 | + MiniBatch(b::Int) |
| 3 | + `b` represents the size of the batch which should be sampled. |
| 4 | +
|
| 5 | + Sculley et al. 2007 Mini batch k-means algorithm implementation. |
| 6 | +
|
| 7 | +```julia |
| 8 | +X = rand(30, 100_000) # 100_000 random points in 30 dimensions |
| 9 | +
|
| 10 | +kmeans(MiniBatch(100), X, 3) # 3 clusters, MiniBatch algorithm with 100 batch samples at each iteration |
| 11 | +``` |
| 12 | +""" |
| 13 | +struct MiniBatch <: AbstractKMeansAlg |
| 14 | + b::Int # batch size |
| 15 | +end |
| 16 | + |
| 17 | + |
| 18 | +MiniBatch() = MiniBatch(100) |
| 19 | + |
| 20 | +function kmeans!(alg::MiniBatch, containers, X, k, |
| 21 | + weights = nothing, metric = Euclidean(); n_threads = Threads.nthreads(), |
| 22 | + k_init = "k-means++", init = nothing, max_iters = 300, |
| 23 | + tol = eltype(X)(1e-6), max_no_improvement = 10, verbose = false, rng = Random.GLOBAL_RNG) |
| 24 | + |
| 25 | + # Retrieve initialized artifacts from the container |
| 26 | + centroids = containers.centroids_new |
| 27 | + batch_rand_idx = containers.batch_rand_idx |
| 28 | + labels = containers.labels |
| 29 | + |
| 30 | + # Get the type and dimensions of design matrix, X - (Step 1) |
| 31 | + T = eltype(X) |
| 32 | + nrow, ncol = size(X) |
| 33 | + |
| 34 | + # Initiate cluster centers - (Step 2) in paper |
| 35 | + centroids .= isnothing(init) ? smart_init(X, k, n_threads, weights, rng, init = k_init).centroids : deepcopy(init) |
| 36 | + |
| 37 | + # Initialize counter for the no. of data in each cluster - (Step 3) in paper |
| 38 | + N = zeros(T, k) |
| 39 | + |
| 40 | + # Initialize various artifacts |
| 41 | + converged = false |
| 42 | + niters = 1 |
| 43 | + counter = 0 |
| 44 | + J_previous = zero(T) |
| 45 | + J = zero(T) |
| 46 | + totalcost = zero(T) |
| 47 | + |
| 48 | + # Main Steps. Batch update centroids until convergence |
| 49 | + while niters <= max_iters # Step 4 in paper |
| 50 | + |
| 51 | + # b examples picked randomly from X (Step 5 in paper) |
| 52 | + isnothing(weights) ? rand!(rng, batch_rand_idx, 1:ncol) : wsample!(rng, 1:ncol, weights, batch_rand_idx) |
| 53 | + |
| 54 | + # Cache/label the batch samples nearest to the centers (Step 6 & 7) |
| 55 | + @inbounds for i in batch_rand_idx |
| 56 | + min_dist = distance(metric, X, centroids, i, 1) |
| 57 | + label = 1 |
| 58 | + |
| 59 | + for j in 2:size(centroids, 2) |
| 60 | + dist = distance(metric, X, centroids, i, j) |
| 61 | + label = dist < min_dist ? j : label |
| 62 | + min_dist = dist < min_dist ? dist : min_dist |
| 63 | + end |
| 64 | + |
| 65 | + labels[i] = label |
| 66 | + |
| 67 | + ##### Batch gradient step ##### |
| 68 | + # iterate over examples (each column) ==> (Step 9) |
| 69 | + # Get cached center/label for each example label = labels[i] => (Step 10) |
| 70 | + |
| 71 | + # Update per-center counts |
| 72 | + N[label] += isnothing(weights) ? 1 : weights[i] # (Step 11) |
| 73 | + |
| 74 | + # Get per-center learning rate (Step 12) |
| 75 | + lr = 1 / N[label] |
| 76 | + |
| 77 | + # Take gradient step (Step 13) # TODO: Replace with faster loop? |
| 78 | + @views centroids[:, label] .= (1 - lr) .* centroids[:, label] .+ (lr .* X[:, i]) |
| 79 | + end |
| 80 | + |
| 81 | + # Reassign all labels based on new centres generated from the latest sample |
| 82 | + labels .= reassign_labels(X, metric, labels, centroids) |
| 83 | + |
| 84 | + # Calculate cost on whole dataset after reassignment and check for convergence |
| 85 | + @parallelize 1 ncol sum_of_squares(containers, X, labels, centroids, weights, metric) |
| 86 | + J = sum(containers.sum_of_squares) |
| 87 | + |
| 88 | + if verbose |
| 89 | + # Show progress and terminate if J stopped decreasing. |
| 90 | + println("Iteration $niters: Jclust = $J") |
| 91 | + end |
| 92 | + |
| 93 | + # Check for early stopping convergence |
| 94 | + if (niters > 1) & (abs(J - J_previous) < (tol * J)) |
| 95 | + counter += 1 |
| 96 | + |
| 97 | + # Declare convergence if max_no_improvement criterion is met |
| 98 | + if counter >= max_no_improvement |
| 99 | + converged = true |
| 100 | + # Compute label assignment for the complete dataset |
| 101 | + labels .= reassign_labels(X, metric, labels, centroids) |
| 102 | + |
| 103 | + # Compute totalcost for the complete dataset |
| 104 | + @parallelize 1 ncol sum_of_squares(containers, X, labels, centroids, weights, metric) |
| 105 | + totalcost = sum(containers.sum_of_squares) |
| 106 | + |
| 107 | + # Print convergence message to user |
| 108 | + if verbose |
| 109 | + println("Successfully terminated with convergence.") |
| 110 | + end |
| 111 | + |
| 112 | + break |
| 113 | + end |
| 114 | + else |
| 115 | + counter = 0 |
| 116 | + end |
| 117 | + |
| 118 | + # Warn users if model doesn't converge at max iterations |
| 119 | + if (niters >= max_iters) & (!converged) |
| 120 | + |
| 121 | + if verbose |
| 122 | + println("Clustering model failed to converge. Labelling data with latest centroids.") |
| 123 | + end |
| 124 | + |
| 125 | + labels .= reassign_labels(X, metric, labels, centroids) |
| 126 | + |
| 127 | + # Compute totalcost for unconverged model |
| 128 | + @parallelize 1 ncol sum_of_squares(containers, X, labels, centroids, weights, metric) |
| 129 | + totalcost = sum(containers.sum_of_squares) |
| 130 | + |
| 131 | + break |
| 132 | + end |
| 133 | + |
| 134 | + J_previous = J |
| 135 | + niters += 1 |
| 136 | + end |
| 137 | + |
| 138 | + # Push learned artifacts to KmeansResult |
| 139 | + return KmeansResult(centroids, labels, T[], Int[], T[], totalcost, niters, converged) |
| 140 | +end |
| 141 | + |
| 142 | +""" |
| 143 | + reassign_labels(DMatrix, metric, labels, centres) |
| 144 | +
|
| 145 | +An internal function to relabel DMatrix based on centres and metric. |
| 146 | +""" |
| 147 | +function reassign_labels(DMatrix, metric, labels, centres) |
| 148 | + @inbounds for i in axes(DMatrix, 2) |
| 149 | + min_dist = distance(metric, DMatrix, centres, i, 1) |
| 150 | + label = 1 |
| 151 | + |
| 152 | + for j in 2:size(centres, 2) |
| 153 | + dist = distance(metric, DMatrix, centres, i, j) |
| 154 | + label = dist < min_dist ? j : label |
| 155 | + min_dist = dist < min_dist ? dist : min_dist |
| 156 | + end |
| 157 | + |
| 158 | + labels[i] = label |
| 159 | + end |
| 160 | + return labels |
| 161 | +end |
| 162 | + |
| 163 | +""" |
| 164 | + create_containers(::MiniBatch, k, nrow, ncol, n_threads) |
| 165 | +
|
| 166 | +Internal function for the creation of all necessary intermidiate structures. |
| 167 | +
|
| 168 | +- `centroids_new` - container which holds new positions of centroids |
| 169 | +- `labels` - vector which holds labels of corresponding points |
| 170 | +- `sum_of_squares` - vector which holds the sum of squares values for each thread |
| 171 | +- `batch_rand_idx` - vector which holds the selected batch indices |
| 172 | +""" |
| 173 | +function create_containers(alg::MiniBatch, X, k, nrow, ncol, n_threads) |
| 174 | + # Initiate placeholders to avoid allocations |
| 175 | + T = eltype(X) |
| 176 | + labels = Vector{Int}(undef, ncol) # labels vector |
| 177 | + sum_of_squares = Vector{T}(undef, 1) # total_sum_calculation |
| 178 | + batch_rand_idx = Vector{Int}(undef, alg.b) # selected batch indices |
| 179 | + centroids_new = Matrix{T}(undef, nrow, k) # centroids |
| 180 | + |
| 181 | + return (batch_rand_idx = batch_rand_idx, centroids_new = centroids_new, |
| 182 | + labels = labels, sum_of_squares = sum_of_squares) |
| 183 | +end |
0 commit comments