mirror of
https://github.com/davidalbertonogueira/MLP.git
synced 2025-12-16 20:07:07 +03:00
finished of correcting "int" to "size_t" to avoid nasty errors and
implement a test for SetWeights() function
This commit is contained in:
BIN
data/iris.mlp
BIN
data/iris.mlp
Binary file not shown.
13
src/Layer.h
13
src/Layer.h
@@ -5,9 +5,6 @@
|
|||||||
#ifndef LAYER_H
|
#ifndef LAYER_H
|
||||||
#define LAYER_H
|
#define LAYER_H
|
||||||
|
|
||||||
#include "Utils.h"
|
|
||||||
#include "Node.h"
|
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
@@ -16,6 +13,8 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cassert> // for assert()
|
#include <cassert> // for assert()
|
||||||
|
#include "Node.h"
|
||||||
|
#include "Utils.h"
|
||||||
|
|
||||||
class Layer {
|
class Layer {
|
||||||
public:
|
public:
|
||||||
@@ -82,7 +81,7 @@ public:
|
|||||||
|
|
||||||
output->resize(m_num_nodes);
|
output->resize(m_num_nodes);
|
||||||
|
|
||||||
for (int i = 0; i < m_num_nodes; ++i) {
|
for (size_t i = 0; i < m_num_nodes; ++i) {
|
||||||
m_nodes[i].GetOutputAfterActivationFunction(input,
|
m_nodes[i].GetOutputAfterActivationFunction(input,
|
||||||
m_activation_function,
|
m_activation_function,
|
||||||
&((*output)[i]));
|
&((*output)[i]));
|
||||||
@@ -111,7 +110,7 @@ public:
|
|||||||
dE_doj = deriv_error[i];
|
dE_doj = deriv_error[i];
|
||||||
doj_dnetj = m_deriv_activation_function(net_sum);
|
doj_dnetj = m_deriv_activation_function(net_sum);
|
||||||
|
|
||||||
for (int j = 0; j < m_num_inputs_per_node; j++) {
|
for (size_t j = 0; j < m_num_inputs_per_node; j++) {
|
||||||
(*deltas)[j] += dE_doj * doj_dnetj * m_nodes[i].GetWeights()[j];
|
(*deltas)[j] += dE_doj * doj_dnetj * m_nodes[i].GetWeights()[j];
|
||||||
|
|
||||||
dnetj_dwij = input_layer_activation[j];
|
dnetj_dwij = input_layer_activation[j];
|
||||||
@@ -180,8 +179,8 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
int m_num_inputs_per_node{ 0 };
|
size_t m_num_inputs_per_node{ 0 };
|
||||||
int m_num_nodes{ 0 };
|
size_t m_num_nodes{ 0 };
|
||||||
std::vector<Node> m_nodes;
|
std::vector<Node> m_nodes;
|
||||||
|
|
||||||
std::string m_activation_function_str;
|
std::string m_activation_function_str;
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
// Author : David Nogueira
|
// Author : David Nogueira
|
||||||
//============================================================================
|
//============================================================================
|
||||||
#include "MLP.h"
|
#include "MLP.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
@@ -10,6 +11,7 @@
|
|||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
|
||||||
#include "easylogging++.h"
|
#include "easylogging++.h"
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,10 +5,6 @@
|
|||||||
#ifndef MLP_H
|
#ifndef MLP_H
|
||||||
#define MLP_H
|
#define MLP_H
|
||||||
|
|
||||||
#include "Layer.h"
|
|
||||||
#include "Sample.h"
|
|
||||||
#include "Utils.h"
|
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
@@ -17,6 +13,9 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <exception>
|
#include <exception>
|
||||||
|
#include "Layer.h"
|
||||||
|
#include "Sample.h"
|
||||||
|
#include "Utils.h"
|
||||||
|
|
||||||
class MLP {
|
class MLP {
|
||||||
public:
|
public:
|
||||||
@@ -54,7 +53,7 @@ private:
|
|||||||
const std::vector<std::string> & layers_activfuncs,
|
const std::vector<std::string> & layers_activfuncs,
|
||||||
bool use_constant_weight_init,
|
bool use_constant_weight_init,
|
||||||
double constant_weight_init = 0.5);
|
double constant_weight_init = 0.5);
|
||||||
int m_num_inputs{ 0 };
|
size_t m_num_inputs{ 0 };
|
||||||
int m_num_outputs{ 0 };
|
int m_num_outputs{ 0 };
|
||||||
int m_num_hidden_layers{ 0 };
|
int m_num_hidden_layers{ 0 };
|
||||||
std::vector<uint64_t> m_layers_nodes;
|
std::vector<uint64_t> m_layers_nodes;
|
||||||
|
|||||||
@@ -347,6 +347,17 @@ UNIT(GetWeightsSetWeights) {
|
|||||||
// get layer weights
|
// get layer weights
|
||||||
std::vector<std::vector<double>> weights = my_mlp.GetLayerWeights( 1 );
|
std::vector<std::vector<double>> weights = my_mlp.GetLayerWeights( 1 );
|
||||||
|
|
||||||
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
|
std::vector<double> output;
|
||||||
|
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
||||||
|
for (size_t i = 0; i < num_outputs; i++) {
|
||||||
|
bool predicted_output = output[i] > 0.5 ? true : false;
|
||||||
|
std::cout << "PREDICTED OUTPUT IS NOW: " << output[i] << std::endl;
|
||||||
|
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
||||||
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// the expected value of the internal weights
|
// the expected value of the internal weights
|
||||||
// after training are 1.65693 -0.538749
|
// after training are 1.65693 -0.538749
|
||||||
ASSERT_TRUE( 1.6 <= weights[0][0] && weights[0][0] <= 1.7 );
|
ASSERT_TRUE( 1.6 <= weights[0][0] && weights[0][0] <= 1.7 );
|
||||||
@@ -358,24 +369,14 @@ UNIT(GetWeightsSetWeights) {
|
|||||||
|
|
||||||
my_mlp.SetLayerWeights( 1, zeroWeights );
|
my_mlp.SetLayerWeights( 1, zeroWeights );
|
||||||
|
|
||||||
/*
|
|
||||||
*
|
|
||||||
* PREDICTED OUTPUT IS NOW: 0.335394
|
|
||||||
PREDICTED OUTPUT IS NOW: 1.13887
|
|
||||||
PREDICTED OUTPUT IS NOW: 0.180468
|
|
||||||
PREDICTED OUTPUT IS NOW: 1.00535
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
||||||
for (size_t i = 0; i < num_outputs; i++) {
|
for (size_t i = 0; i < num_outputs; i++) {
|
||||||
bool predicted_output = output[i] > 0.5 ? true : false;
|
ASSERT_TRUE( -0.0001L <= output[i] && output[i] <= 0.0001L );
|
||||||
std::cout << "PREDICTED OUTPUT IS NOW: " << output[i] << std::endl;
|
|
||||||
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
|
||||||
ASSERT_TRUE(predicted_output == correct_output);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG(INFO) << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,8 +5,6 @@
|
|||||||
#ifndef NODE_H
|
#ifndef NODE_H
|
||||||
#define NODE_H
|
#define NODE_H
|
||||||
|
|
||||||
#include "Utils.h"
|
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
@@ -16,6 +14,7 @@
|
|||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cassert> // for assert()
|
#include <cassert> // for assert()
|
||||||
#include <exception>
|
#include <exception>
|
||||||
|
#include "Utils.h"
|
||||||
|
|
||||||
#define CONSTANT_WEIGHT_INITIALIZATION 0
|
#define CONSTANT_WEIGHT_INITIALIZATION 0
|
||||||
|
|
||||||
@@ -150,7 +149,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
int m_num_inputs{ 0 };
|
size_t m_num_inputs{ 0 };
|
||||||
double m_bias{ 0.0 };
|
double m_bias{ 0.0 };
|
||||||
std::vector<double> m_weights;
|
std::vector<double> m_weights;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -5,7 +5,6 @@
|
|||||||
#ifndef UTILS_H
|
#ifndef UTILS_H
|
||||||
#define UTILS_H
|
#define UTILS_H
|
||||||
|
|
||||||
#include "Chrono.h"
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
@@ -22,6 +21,8 @@
|
|||||||
#include <typeinfo>
|
#include <typeinfo>
|
||||||
#include <typeindex>
|
#include <typeindex>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
|
||||||
|
#include "Chrono.h"
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
#else
|
#else
|
||||||
|
|||||||
Reference in New Issue
Block a user