From c7b67885a47054d09d83afe2bbb70a317f4086ca Mon Sep 17 00:00:00 2001 From: davidjacnogueira Date: Thu, 3 Nov 2016 21:57:35 +0000 Subject: [PATCH] Small changes in test file. Add more tests. --- README.md | 2 +- src/MLPTest.cpp | 93 +++++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 88 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index feaedab..fc2852e 100644 --- a/README.md +++ b/README.md @@ -3,4 +3,4 @@ Simple multilayer perceptron c++ implementation. -David Nogueira, 2016.03.26 +David Nogueira, 2016.11.01 diff --git a/src/MLPTest.cpp b/src/MLPTest.cpp index 332b83c..1c3af38 100644 --- a/src/MLPTest.cpp +++ b/src/MLPTest.cpp @@ -35,7 +35,8 @@ UNIT(LearnAND) { size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); - MLP my_mlp(num_features, 1, 1, 2, false); + size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); + MLP my_mlp(num_features, num_outputs, 1, 2, false); //Train MLP my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25); @@ -73,7 +74,8 @@ UNIT(LearnNAND) { size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); - MLP my_mlp(num_features, 1, 1, 2, false); + size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); + MLP my_mlp(num_features, num_outputs, 1, 2, false); //Train MLP my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25); @@ -111,7 +113,8 @@ UNIT(LearnOR) { size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); - MLP my_mlp(num_features, 1, 1, 2, false); + size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); + MLP my_mlp(num_features, num_outputs, 1, 2, false); //Train MLP my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25); @@ -149,7 +152,8 @@ UNIT(LearnNOR) { size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); - MLP my_mlp(num_features, 1, 1, 2, false); + size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); + MLP my_mlp(num_features, num_outputs, 1, 2, false); //Train MLP my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25); @@ -185,7 +189,8 @@ UNIT(LearnXOR) { size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); - MLP my_mlp(num_features, 1, 1, 2, false); + size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); + MLP my_mlp(num_features, num_outputs, 1, 2, false); //Train MLP my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 50'000, 0.25); @@ -219,7 +224,83 @@ UNIT(LearnNOT) { size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); - MLP my_mlp(num_features, 1, 1, 2, false); + size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); + MLP my_mlp(num_features, num_outputs, 1, 2, false); + //Train MLP + my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25); + + for (const auto & training_sample : training_sample_set_with_bias) { + std::vector output; + my_mlp.GetOutput(training_sample.input_vector(), &output); + bool predicted_output = output[0] > 0.5 ? true : false; + bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false; + ASSERT_TRUE(predicted_output == correct_output); + } + std::cout << "Trained with success." << std::endl; + std::cout << std::endl; +} + +UNIT(LearnX1) { + std::cout << "Train X1 function with mlp." << std::endl; + + std::vector training_set = + { + { { 0, 0 },{ 0.0 } }, + { { 0, 1 },{ 0.0 } }, + { { 1, 0 },{ 1.0 } }, + { { 1, 1 },{ 1.0 } } + }; + bool bias_already_in = false; + std::vector training_sample_set_with_bias(training_set); + //set up bias + if (!bias_already_in) { + for (auto & training_sample_with_bias : training_sample_set_with_bias) { + training_sample_with_bias.AddBiasValue(1); + } + } + + size_t num_examples = training_sample_set_with_bias.size(); + size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); + size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); + MLP my_mlp(num_features, num_outputs, 1, 2, false); + //Train MLP + my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25); + + for (const auto & training_sample : training_sample_set_with_bias) { + std::vector output; + my_mlp.GetOutput(training_sample.input_vector(), &output); + bool predicted_output = output[0] > 0.5 ? true : false; + bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false; + ASSERT_TRUE(predicted_output == correct_output); + } + std::cout << "Trained with success." << std::endl; + std::cout << std::endl; +} + + +UNIT(LearnX2) { + std::cout << "Train X2 function with mlp." << std::endl; + + std::vector training_set = + { + { { 0, 0 },{ 0.0 } }, + { { 0, 1 },{ 1.0 } }, + { { 1, 0 },{ 0.0 } }, + { { 1, 1 },{ 1.0 } } + }; + bool bias_already_in = false; + std::vector training_sample_set_with_bias(training_set); + //set up bias + if (!bias_already_in) { + for (auto & training_sample_with_bias : training_sample_set_with_bias) { + training_sample_with_bias.AddBiasValue(1); + } + } + + size_t num_examples = training_sample_set_with_bias.size(); + size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); + size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); + MLP my_mlp(num_features, num_outputs, 1, 2, false); //Train MLP my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);