Slow learning time solved with equal number of positive and negative training samples.

This commit is contained in:
davidjacnogueira
2016-11-03 02:59:32 +00:00
parent ff7bfe1fa2
commit 9ff33f7b65
6 changed files with 124 additions and 119 deletions

View File

@@ -17,10 +17,12 @@ UNIT(LearnAND) {
std::vector<TrainingSample> training_set =
{
{{ 0, 0 },{0.0}},
{{ 0, 1 },{0.0}},
{{ 1, 0 },{0.0}},
{{ 1, 1 },{1.0}}
{ { 0, 0 },{ 0.0 } },
{ { 0, 1 },{ 0.0 } },
{ { 1, 0 },{ 0.0 } },
{ { 1, 1 },{ 1.0 } },
{ { 1, 1 },{ 1.0 } },
{ { 1, 1 },{ 1.0 } }
};
bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set);
@@ -33,14 +35,14 @@ UNIT(LearnAND) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 0, 5, true, 0.5);
MLP my_mlp(num_features, 1, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245);
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) {
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0]> 0.5 ? true : false;
bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
}
@@ -53,10 +55,12 @@ UNIT(LearnNAND) {
std::vector<TrainingSample> training_set =
{
{{ 0, 0 },{1.0}},
{{ 0, 1 },{1.0}},
{{ 1, 0 },{1.0}},
{{ 1, 1 },{0.0}}
{ { 0, 0 },{ 1.0 } },
{ { 0, 1 },{ 1.0 } },
{ { 1, 0 },{ 1.0 } },
{ { 1, 1 },{ 0.0 } },
{ { 1, 1 },{ 0.0 } },
{ { 1, 1 },{ 0.0 } }
};
bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set);
@@ -69,14 +73,14 @@ UNIT(LearnNAND) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 0, 5, true, 0.5);
MLP my_mlp(num_features, 1, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245);
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0]> 0.5 ? true : false;
bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
}
@@ -89,10 +93,12 @@ UNIT(LearnOR) {
std::vector<TrainingSample> training_set =
{
{{ 0, 0 },{0.0}},
{{ 0, 1 },{1.0}},
{{ 1, 0 },{1.0}},
{{ 1, 1 },{1.0}}
{ { 0, 0 },{ 0.0 } },
{ { 0, 0 },{ 0.0 } },
{ { 0, 0 },{ 0.0 } },
{ { 0, 1 },{ 1.0 } },
{ { 1, 0 },{ 1.0 } },
{ { 1, 1 },{ 1.0 } }
};
bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set);
@@ -105,14 +111,14 @@ UNIT(LearnOR) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 0, 5, true, 0.5);
MLP my_mlp(num_features, 1, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245);
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0]> 0.5 ? true : false;
bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
}
@@ -125,10 +131,12 @@ UNIT(LearnNOR) {
std::vector<TrainingSample> training_set =
{
{{ 0, 0 },{1.0}},
{{ 0, 1 },{0.0}},
{{ 1, 0 },{0.0}},
{{ 1, 1 },{0.0}}
{ { 0, 0 },{ 1.0 } },
{ { 0, 0 },{ 1.0 } },
{ { 0, 0 },{ 1.0 } },
{ { 0, 1 },{ 0.0 } },
{ { 1, 0 },{ 0.0 } },
{ { 1, 1 },{ 0.0 } }
};
bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set);
@@ -141,14 +149,14 @@ UNIT(LearnNOR) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 0, 5, true, 0.5);
MLP my_mlp(num_features, 1, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245);
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0]> 0.5 ? true : false;
bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
}
@@ -156,49 +164,15 @@ UNIT(LearnNOR) {
std::cout << std::endl;
}
//UNIT(LearnXOR) {
// std::cout << "Train XOR function with mlp." << std::endl;
//
// std::vector<TrainingSample> training_set =
// {
// { { 0, 0 },{ 0.0 } },
// { { 0, 1 },{ 1.0 } },
// { { 1, 0 },{ 1.0 } },
// { { 1, 1 },{ 0.0 } }
// };
// bool bias_already_in = false;
// std::vector<TrainingSample> training_sample_set_with_bias(training_set);
// //set up bias
// if (!bias_already_in) {
// for (auto & training_sample_with_bias : training_sample_set_with_bias) {
// training_sample_with_bias.AddBiasValue(1);
// }
// }
//
// size_t num_examples = training_sample_set_with_bias.size();
// size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
// MLP my_mlp(num_features, 1, 0, 5, true, 0.5);
// //Train MLP
// my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245);
//
// for (const auto & training_sample : training_sample_set_with_bias) {
// std::vector<double> output;
// my_mlp.GetOutput(training_sample.input_vector(), &output);
// bool predicted_output = output[0]> 0.5 ? true : false;
// bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
// ASSERT_TRUE(predicted_output == correct_output);
// }
// std::cout << "Trained with success." << std::endl;
// std::cout << std::endl;
//}
UNIT(LearnNOT) {
std::cout << "Train NOT function with mlp." << std::endl;
UNIT(LearnXOR) {
std::cout << "Train XOR function with mlp." << std::endl;
std::vector<TrainingSample> training_set =
{
{{ 0},{1.0 }},
{{ 1},{0.0 }}
{ { 0, 0 },{ 0.0 } },
{ { 0, 1 },{ 1.0 } },
{ { 1, 0 },{ 1.0 } },
{ { 1, 1 },{ 0.0 } }
};
bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set);
@@ -211,14 +185,48 @@ UNIT(LearnNOT) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 0, 5, true, 0.5);
MLP my_mlp(num_features, 1, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245);
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 50'000, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0]> 0.5 ? true : false;
bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
}
std::cout << "Trained with success." << std::endl;
std::cout << std::endl;
}
UNIT(LearnNOT) {
std::cout << "Train NOT function with mlp." << std::endl;
std::vector<TrainingSample> training_set =
{
{ { 0},{ 1.0 } },
{ { 1},{ 0.0 } }
};
bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set);
//set up bias
if (!bias_already_in) {
for (auto & training_sample_with_bias : training_sample_set_with_bias) {
training_sample_with_bias.AddBiasValue(1);
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
}