Small changes in test file. Add more tests.

This commit is contained in:
davidjacnogueira
2016-11-03 21:57:35 +00:00
parent be4d839c98
commit c7b67885a4
2 changed files with 88 additions and 7 deletions

View File

@@ -3,4 +3,4 @@
Simple multilayer perceptron c++ implementation.
David Nogueira, 2016.03.26
David Nogueira, 2016.11.01

View File

@@ -35,7 +35,8 @@ UNIT(LearnAND) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 1, 2, false);
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
@@ -73,7 +74,8 @@ UNIT(LearnNAND) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 1, 2, false);
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
@@ -111,7 +113,8 @@ UNIT(LearnOR) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 1, 2, false);
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
@@ -149,7 +152,8 @@ UNIT(LearnNOR) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 1, 2, false);
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
@@ -185,7 +189,8 @@ UNIT(LearnXOR) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 1, 2, false);
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 50'000, 0.25);
@@ -219,7 +224,83 @@ UNIT(LearnNOT) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 1, 2, false);
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
}
std::cout << "Trained with success." << std::endl;
std::cout << std::endl;
}
UNIT(LearnX1) {
std::cout << "Train X1 function with mlp." << std::endl;
std::vector<TrainingSample> training_set =
{
{ { 0, 0 },{ 0.0 } },
{ { 0, 1 },{ 0.0 } },
{ { 1, 0 },{ 1.0 } },
{ { 1, 1 },{ 1.0 } }
};
bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set);
//set up bias
if (!bias_already_in) {
for (auto & training_sample_with_bias : training_sample_set_with_bias) {
training_sample_with_bias.AddBiasValue(1);
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
}
std::cout << "Trained with success." << std::endl;
std::cout << std::endl;
}
UNIT(LearnX2) {
std::cout << "Train X2 function with mlp." << std::endl;
std::vector<TrainingSample> training_set =
{
{ { 0, 0 },{ 0.0 } },
{ { 0, 1 },{ 1.0 } },
{ { 1, 0 },{ 0.0 } },
{ { 1, 1 },{ 1.0 } }
};
bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set);
//set up bias
if (!bias_already_in) {
for (auto & training_sample_with_bias : training_sample_set_with_bias) {
training_sample_with_bias.AddBiasValue(1);
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);