Skip to content

Commit cb1b83d

Browse files
authored
Add mse_loss (#582)
1 parent db4b25c commit cb1b83d

File tree

3 files changed

+40
-0
lines changed

3 files changed

+40
-0
lines changed
Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
mod mse_loss;
2+
3+
pub use self::mse_loss::mse_loss;
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
//! # Mean Square Loss Function
2+
//!
3+
//! The `mse_loss` function calculates the Mean Square Error loss, which is a
4+
//! robust loss function used in machine learning.
5+
//!
6+
//! ## Formula
7+
//!
8+
//! For a pair of actual and predicted values, represented as vectors `actual`
9+
//! and `predicted`, the Mean Square loss is calculated as:
10+
//!
11+
//! - loss = `(actual - predicted)^2 / n_elements`.
12+
//!
13+
//! It returns the average loss by dividing the `total_loss` by total no. of
14+
//! elements.
15+
//!
16+
pub fn mse_loss(predicted: &Vec<f64>, actual: &[f64]) -> f64 {
17+
let mut total_loss: f64 = 0.0;
18+
for (p, a) in predicted.iter().zip(actual.iter()) {
19+
let diff: f64 = p - a;
20+
total_loss += diff * diff;
21+
}
22+
total_loss / (predicted.len() as f64)
23+
}
24+
25+
#[cfg(test)]
26+
mod tests {
27+
use super::*;
28+
29+
#[test]
30+
fn test_mse_loss() {
31+
let predicted_values: Vec<f64> = vec![1.0, 2.0, 3.0, 4.0];
32+
let actual_values: Vec<f64> = vec![1.0, 3.0, 3.5, 4.5];
33+
assert_eq!(mse_loss(&predicted_values, &actual_values), 0.375);
34+
}
35+
}

src/machine_learning/mod.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
mod linear_regression;
2+
mod loss_function;
23
mod optimization;
34

45
pub use self::linear_regression::linear_regression;
6+
pub use self::loss_function::mse_loss;
57
pub use self::optimization::gradient_descent;

0 commit comments

Comments
 (0)