Skip to content

Commit

Permalink
Remove type alias
Browse files Browse the repository at this point in the history
  • Loading branch information
sbaldu committed Dec 5, 2024
1 parent 3f43747 commit 99a888c
Show file tree
Hide file tree
Showing 6 changed files with 75 additions and 78 deletions.
18 changes: 9 additions & 9 deletions src/cuda/include/Activators.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
#include "Layer.h"

template <typename T>
using shared = std::shared_ptr<T>;
using std::shared_ptr = std::shared_ptr<T>;

/*
template <typename T>
Expand Down Expand Up @@ -45,7 +45,7 @@ struct Step {
// Derivative of the activation function
double grad(double activated_value) { return 0; }
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
return std::vector<double>(layer->size(), 0);
}
Expand Down Expand Up @@ -81,7 +81,7 @@ struct Linear {
// Derivative of the activation function
double grad(double activated_value) { return 1; }
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
return std::vector<double>(layer->size(), 1);
}
std::vector<double> grad(std::vector<T> node_values) {
Expand Down Expand Up @@ -146,7 +146,7 @@ struct Sigmoid {
return activated_value * (1 - activated_value);
}

__host__ std::vector<double> grad(shared<Layer<T>> layer) {
__host__ std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
int N{layer->size()};
std::vector<double> gradient_values(N);

Expand Down Expand Up @@ -227,7 +227,7 @@ struct Tanh {
double grad(double activated_value) {
return 1 + pow(activated_value, 2);
}
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
int N{layer->size()};
std::vector<double> gradient_values(N);
for (int i{}; i < N; ++i) {
Expand Down Expand Up @@ -288,7 +288,7 @@ struct Elu {
return activated_value + A;
}
}
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
int N{layer->size()};
std::vector<double> gradient_values(N);
for (int i{}; i < N; ++i) {
Expand Down Expand Up @@ -341,7 +341,7 @@ struct Leaky_ReLU {
return 0.1;
}
}
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
int N{layer->size()};
std::vector<double> gradient_values(N);
for (int i{}; i < N; ++i) {
Expand Down Expand Up @@ -396,7 +396,7 @@ struct Parametric_ReLU {
return A;
}
}
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
int N{layer->size()};
std::vector<double> gradient_values(N);
for (int i{}; i < N; ++i) {
Expand Down Expand Up @@ -447,7 +447,7 @@ struct Swish {
double grad(double x) {
return Sigmoid<T>()(x) * (1 + x) * (1 - Sigmoid<T>()(x));
}
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
int N{layer->size()};
std::vector<double> gradient_values(N);
for (int i{}; i < N; ++i) {
Expand Down
6 changes: 3 additions & 3 deletions src/cuda/include/ErrorFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
#include "Layer.h"

template <typename T>
using shared = std::shared_ptr<T>;
using std::shared_ptr = std::shared_ptr<T>;

template <typename T, typename W, template <typename E> typename Activator>
struct MeanSquaredError {
Expand All @@ -29,8 +29,8 @@ struct MeanSquaredError {
template <typename U>
__host__ std::vector<double> grad(const std::vector<U>& expected_values,
int layer_id,
const std::vector<shared<Layer<T>>>& layers,
const std::vector<shared<Matrix<W>>>& weights) {
const std::vector<std::shared_ptr<Layer<T>>>& layers,
const std::vector<std::shared_ptr<Matrix<W>>>& weights) {
if (layers[layer_id + 1] == nullptr) {
int N{layers[layer_id]->size()};
std::vector<double> delta(N);
Expand Down
53 changes: 25 additions & 28 deletions src/cuda/include/Network.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,8 @@
#include "DataFormats/Matrix.h"
#include "DataFormats/VectorOperations.h"

template <typename T>
using shared = std::shared_ptr<T>;

template <typename W>
void random_matrix(shared<Matrix<W>> matrix);
void random_matrix(std::shared_ptr<Matrix<W>> matrix);

template <typename T,
typename W,
Expand All @@ -28,9 +25,9 @@ template <typename T,
class Network {
private:
int n_layers;
std::vector<shared<Layer<T>>> m_layers;
std::vector<shared<Matrix<W>>> m_weights;
std::vector<shared<std::vector<W>>> m_bias;
std::vector<std::shared_ptr<Layer<T>>> m_layers;
std::vector<std::shared_ptr<Matrix<W>>> m_weights;
std::vector<std::shared_ptr<std::vector<W>>> m_bias;

public:
Network() = delete;
Expand All @@ -42,20 +39,20 @@ class Network {
void load_from_file(const std::string& path_to_file);

// Getters
const shared<Matrix<W>> weight_matrix(int layer_id) const;
const std::shared_ptr<Matrix<W>> weight_matrix(int layer_id) const;
const std::vector<T>& output_layer() const;

// Setters for the weight matrices
void set_matrix_data(int layer_id, Matrix<W> weight_matrix);
void set_matrix_data(int layer_id, shared<Matrix<W>> weight_matrix_ptr);
void set_matrix_data(int layer_id, std::shared_ptr<Matrix<W>> weight_matrix_ptr);

// Setters for the bias vectors
void set_bias_data(int layer_id, std::vector<W> bias_vector);
void set_bias_data(int layer_id, shared<std::vector<W>> bias_vector_ptr);
void set_bias_data(int layer_id, std::shared_ptr<std::vector<W>> bias_vector_ptr);

std::vector<T> forward_propatation(shared<Layer<T>>,
shared<Matrix<W>>,
shared<std::vector<W>>);
std::vector<T> forward_propatation(std::shared_ptr<Layer<T>>,
std::shared_ptr<Matrix<W>>,
std::shared_ptr<std::vector<W>>);
void forward_propatation();

template <typename U>
Expand Down Expand Up @@ -94,15 +91,15 @@ Network<T, W, Activator, Loss>::Network(const std::vector<int>& nodes_per_layer)
m_weights(n_layers + 1),
m_bias(n_layers - 1) {
for (int i{}; i < n_layers - 1; ++i) {
m_layers[i] = std::make_shared<Layer<T>>(nodes_per_layer[i]);
m_layers[i] = std::make_std::shared_ptr<Layer<T>>(nodes_per_layer[i]);
m_weights[i] =
std::make_shared<Matrix<W>>(nodes_per_layer[i + 1], nodes_per_layer[i]);
m_bias[i] = std::make_shared<std::vector<W>>(nodes_per_layer[i + 1]);
std::make_std::shared_ptr<Matrix<W>>(nodes_per_layer[i + 1], nodes_per_layer[i]);
m_bias[i] = std::make_std::shared_ptr<std::vector<W>>(nodes_per_layer[i + 1]);

// Generate random weight matrices
random_matrix(m_weights[i]);
}
m_layers[n_layers - 1] = std::make_shared<Layer<T>>(nodes_per_layer.back());
m_layers[n_layers - 1] = std::make_std::shared_ptr<Layer<T>>(nodes_per_layer.back());
m_layers[n_layers] = nullptr;
m_weights[n_layers - 1] = nullptr;
m_weights[n_layers] = nullptr;
Expand Down Expand Up @@ -142,7 +139,7 @@ template <typename T,
typename Activator,
template <typename E, typename LW, template <typename K> typename Act>
typename Loss>
const shared<Matrix<W>> Network<T, W, Activator, Loss>::weight_matrix(int layer_id) const {
const std::shared_ptr<Matrix<W>> Network<T, W, Activator, Loss>::weight_matrix(int layer_id) const {
return m_weights[layer_id];
}

Expand All @@ -164,7 +161,7 @@ template <typename T,
typename Loss>
void Network<T, W, Activator, Loss>::set_matrix_data(int layer_id,
Matrix<W> weight_matrix) {
m_weights[layer_id] = std::make_shared<Matrix<W>>(weight_matrix);
m_weights[layer_id] = std::make_std::shared_ptr<Matrix<W>>(weight_matrix);
}

template <typename T,
Expand All @@ -174,7 +171,7 @@ template <typename T,
template <typename E, typename LW, template <typename K> typename Act>
typename Loss>
void Network<T, W, Activator, Loss>::set_matrix_data(
int layer_id, shared<Matrix<W>> weight_matrix_ptr) {
int layer_id, std::shared_ptr<Matrix<W>> weight_matrix_ptr) {
m_weights[layer_id] = weight_matrix_ptr;
}

Expand All @@ -186,7 +183,7 @@ template <typename T,
typename Loss>
void Network<T, W, Activator, Loss>::set_bias_data(int layer_id,
std::vector<W> bias_vector) {
m_bias[layer_id] = std::make_shared<std::vector<W>>(bias_vector);
m_bias[layer_id] = std::make_std::shared_ptr<std::vector<W>>(bias_vector);
}

template <typename T,
Expand All @@ -196,7 +193,7 @@ template <typename T,
template <typename E, typename LW, template <typename K> typename Act>
typename Loss>
void Network<T, W, Activator, Loss>::set_bias_data(
int layer_id, shared<std::vector<W>> bias_vector_ptr) {
int layer_id, std::shared_ptr<std::vector<W>> bias_vector_ptr) {
m_bias[layer_id] = bias_vector_ptr;
}

Expand All @@ -207,9 +204,9 @@ template <typename T,
template <typename E, typename LW, template <typename K> typename Act>
typename Loss>
std::vector<T> Network<T, W, Activator, Loss>::forward_propatation(
shared<Layer<T>> layer,
shared<Matrix<W>> weight_matrix,
shared<std::vector<W>> bias_vector) {
std::shared_ptr<Layer<T>> layer,
std::shared_ptr<Matrix<W>> weight_matrix,
std::shared_ptr<std::vector<W>> bias_vector) {
std::vector<W> next_layer_nodes{*weight_matrix * layer->nodes() + *bias_vector};

return Activator<T>()(next_layer_nodes);
Expand Down Expand Up @@ -313,9 +310,9 @@ void Network<T, W, Activator, Loss>::import_network(const std::string& filepath)
bias.push_back(std::stod(value));
}

m_weights[i] = std::make_shared<Matrix<W>>(
m_weights[i] = std::make_std::shared_ptr<Matrix<W>>(
m_weights[i]->nrows(), m_weights[i]->ncols(), weights);
m_bias[i] = std::make_shared<std::vector<W>>(bias);
m_bias[i] = std::make_std::shared_ptr<std::vector<W>>(bias);
}
}

Expand Down Expand Up @@ -358,7 +355,7 @@ std::ostream& operator<<(std::ostream& out, const Network<T, W, Activator, Loss>
}

template <typename W>
inline void random_matrix(shared<Matrix<W>> matrix) {
inline void random_matrix(std::shared_ptr<Matrix<W>> matrix) {
std::mt19937 gen;
std::uniform_real_distribution<W> dis(-0.5, 0.5);

Expand Down
18 changes: 9 additions & 9 deletions src/nnhep/headers/Activators.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
namespace nnhep {

template <typename T>
using shared = std::shared_ptr<T>;
using std::shared_ptr = std::shared_ptr<T>;

template <typename T>
struct Step {
Expand Down Expand Up @@ -43,7 +43,7 @@ namespace nnhep {
}

double grad(double activated_value) { return 0.; }
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
return std::vector<double>(layer->size(), 0.);
}
std::vector<double> grad(std::vector<T> node_values) {
Expand Down Expand Up @@ -75,7 +75,7 @@ namespace nnhep {
}

double grad(double activated_value) { return 1; }
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
return std::vector<double>(layer->size(), 1);
}
std::vector<double> grad(std::vector<T> node_values) {
Expand Down Expand Up @@ -109,7 +109,7 @@ namespace nnhep {
double grad(double activated_value) {
return activated_value * (1 - activated_value);
}
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
int N{layer->size()};
std::vector<double> gradient_values(N);
for (int i{}; i < N; ++i) {
Expand Down Expand Up @@ -153,7 +153,7 @@ namespace nnhep {
}

double grad(double activated_value) { return 1 + pow(activated_value, 2); }
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
int N{layer->size()};
std::vector<double> gradient_values(N);
for (int i{}; i < N; ++i) {
Expand Down Expand Up @@ -211,7 +211,7 @@ namespace nnhep {
return activated_value + A;
}
}
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
int N{layer->size()};
std::vector<double> gradient_values(N);
for (int i{}; i < N; ++i) {
Expand Down Expand Up @@ -261,7 +261,7 @@ namespace nnhep {
return 0.1;
}
}
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
int N{layer->size()};
std::vector<double> gradient_values(N);
for (int i{}; i < N; ++i) {
Expand Down Expand Up @@ -313,7 +313,7 @@ namespace nnhep {
return A;
}
}
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
int N{layer->size()};
std::vector<double> gradient_values(N);
for (int i{}; i < N; ++i) {
Expand Down Expand Up @@ -357,7 +357,7 @@ namespace nnhep {
}

double grad(double x) { return Sigmoid<T>()(x) * (1 + x) * (1 - Sigmoid<T>()(x)); }
std::vector<double> grad(shared<Layer<T>> layer) {
std::vector<double> grad(std::shared_ptr<Layer<T>> layer) {
int N{layer->size()};
std::vector<double> gradient_values(N);
for (int i{}; i < N; ++i) {
Expand Down
6 changes: 3 additions & 3 deletions src/nnhep/headers/ErrorFunction.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
namespace nnhep {

template <typename T>
using shared = std::shared_ptr<T>;
using std::shared_ptr = std::shared_ptr<T>;

template <typename T, typename W, template <typename E> typename Activator>
struct MeanSquaredError {
Expand All @@ -31,8 +31,8 @@ namespace nnhep {
template <typename U>
std::vector<double> grad(const std::vector<U>& expected_values,
int layer_id,
const std::vector<shared<Layer<T>>>& layers,
const std::vector<shared<Matrix<W>>>& weights) {
const std::vector<std::shared_ptr<Layer<T>>>& layers,
const std::vector<std::shared_ptr<Matrix<W>>>& weights) {
if (layers[layer_id + 1] == nullptr) {
int N{layers[layer_id]->size()};
std::vector<double> delta(N);
Expand Down
Loading

0 comments on commit 99a888c

Please sign in to comment.