Skip to content

Commit 21850fe

Browse files
committed
Adding logistic regression & optimizing the gradient descent algorithm
1 parent 274ca13 commit 21850fe

File tree

4 files changed

+66
-1
lines changed

4 files changed

+66
-1
lines changed

DIRECTORY.md

+1
Original file line numberDiff line numberDiff line change
@@ -156,6 +156,7 @@
156156
* [Cholesky](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/cholesky.rs)
157157
* [K Means](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/k_means.rs)
158158
* [Linear Regression](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/linear_regression.rs)
159+
* [Logistic Regression](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/logistic_regression.rs)
159160
* Loss Function
160161
* [Average Margin Ranking Loss](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/loss_function/average_margin_ranking_loss.rs)
161162
* [Hinge Loss](https://github.com/TheAlgorithms/Rust/blob/master/src/machine_learning/loss_function/hinge_loss.rs)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
use super::optimization::gradient_descent;
2+
use std::f64::consts::E;
3+
4+
/// Returns the wieghts after performing Logistic regression on the input data points.
5+
pub fn logistic_regression(
6+
data_points: Vec<(Vec<f64>, f64)>,
7+
iterations: usize,
8+
learning_rate: f64,
9+
) -> Option<Vec<f64>> {
10+
if data_points.is_empty() {
11+
return None;
12+
}
13+
14+
let num_features = data_points[0].0.len();
15+
let mut params = vec![0.0; num_features];
16+
17+
let derivative_fn = |params: &[f64]| derivative(params, &data_points);
18+
19+
gradient_descent(derivative_fn, &mut params, learning_rate, iterations as i32);
20+
21+
Some(params)
22+
}
23+
24+
fn derivative(params: &[f64], data_points: &[(Vec<f64>, f64)]) -> Vec<f64> {
25+
let num_features = params.len();
26+
let mut gradients = vec![0.0; num_features];
27+
28+
for (features, y_i) in data_points {
29+
let z = params.iter().zip(features).map(|(p, x)| p * x).sum::<f64>();
30+
let prediction = 1.0 / (1.0 + E.powf(-z));
31+
32+
for (i, x_i) in features.iter().enumerate() {
33+
gradients[i] += (prediction - y_i) * x_i;
34+
}
35+
}
36+
37+
gradients
38+
}
39+
40+
#[cfg(test)]
41+
mod test {
42+
use super::*;
43+
44+
#[test]
45+
fn test_logistic_regression() {
46+
let data = vec![
47+
(vec![0.0, 0.0], 0.0),
48+
(vec![1.0, 1.0], 1.0),
49+
(vec![2.0, 2.0], 1.0),
50+
];
51+
let result = logistic_regression(data, 10000, 0.1);
52+
assert!(result.is_some());
53+
let params = result.unwrap();
54+
assert!((params[0] - 6.902976808251308).abs() < 1e-6);
55+
assert!((params[1] - 2000.4659358334482).abs() < 1e-6);
56+
}
57+
58+
#[test]
59+
fn test_empty_list_logistic_regression() {
60+
assert_eq!(logistic_regression(vec![], 10000, 0.1), None);
61+
}
62+
}

src/machine_learning/mod.rs

+2
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,14 @@
11
mod cholesky;
22
mod k_means;
33
mod linear_regression;
4+
mod logistic_regression;
45
mod loss_function;
56
mod optimization;
67

78
pub use self::cholesky::cholesky;
89
pub use self::k_means::k_means;
910
pub use self::linear_regression::linear_regression;
11+
pub use self::logistic_regression::logistic_regression;
1012
pub use self::loss_function::average_margin_ranking_loss;
1113
pub use self::loss_function::hng_loss;
1214
pub use self::loss_function::huber_loss;

src/machine_learning/optimization/gradient_descent.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
/// A reference to the optimized parameter vector `x`.
2424
2525
pub fn gradient_descent(
26-
derivative_fn: fn(&[f64]) -> Vec<f64>,
26+
derivative_fn: impl Fn(&[f64]) -> Vec<f64>,
2727
x: &mut Vec<f64>,
2828
learning_rate: f64,
2929
num_iterations: i32,

0 commit comments

Comments
 (0)