Slow learning time solved with equal number of positive and negative training samples.

This commit is contained in:
davidjacnogueira
2016-11-03 02:59:32 +00:00
parent ff7bfe1fa2
commit 9ff33f7b65
6 changed files with 124 additions and 119 deletions

View File

@@ -35,22 +35,18 @@ public:
constant_weight_init))); constant_weight_init)));
}; };
~Layer() { ~Layer() {
m_num_nodes = 0; m_num_nodes = 0;
m_num_inputs_per_node = 0; m_num_inputs_per_node = 0;
m_nodes.clear(); m_nodes.clear();
}; };
//std::vector<Node> & GetNodes() {
// return m_nodes;
//}
const std::vector<Node> & GetNodes() const { const std::vector<Node> & GetNodes() const {
return m_nodes; return m_nodes;
} }
void GetOutputAfterSigmoid(const std::vector<double> &input, std::vector<double> * output) const { void GetOutputAfterSigmoid(const std::vector<double> &input,
std::vector<double> * output) const {
assert(input.size() == m_num_inputs_per_node); assert(input.size() == m_num_inputs_per_node);
output->resize(m_num_nodes); output->resize(m_num_nodes);
@@ -71,7 +67,8 @@ public:
for (size_t i = 0; i < m_nodes.size(); i++) { for (size_t i = 0; i < m_nodes.size(); i++) {
double net_sum; double net_sum;
m_nodes[i].GetInputInnerProdWithWeights(input_layer_activation, &net_sum); m_nodes[i].GetInputInnerProdWithWeights(input_layer_activation,
&net_sum);
//dE/dwij = dE/doj . doj/dnetj . dnetj/dwij //dE/dwij = dE/doj . doj/dnetj . dnetj/dwij
double dE_doj = 0.0; double dE_doj = 0.0;
@@ -81,7 +78,6 @@ public:
dE_doj = deriv_error[i]; dE_doj = deriv_error[i];
doj_dnetj = utils::deriv_sigmoid(net_sum); doj_dnetj = utils::deriv_sigmoid(net_sum);
for (int j = 0; j < m_num_inputs_per_node; j++) { for (int j = 0; j < m_num_inputs_per_node; j++) {
(*deltas)[j] += dE_doj * doj_dnetj * m_nodes[i].GetWeights()[j]; (*deltas)[j] += dE_doj * doj_dnetj * m_nodes[i].GetWeights()[j];

View File

@@ -20,8 +20,7 @@ bool MLP::ImportNNWeights(const std::vector<double> & weights) {
void MLP::GetOutput(const std::vector<double> &input, void MLP::GetOutput(const std::vector<double> &input,
std::vector<double> * output, std::vector<double> * output,
std::vector<std::vector<double>> * all_layers_activations, std::vector<std::vector<double>> * all_layers_activations) const {
bool apply_softmax) const {
assert(input.size() == m_num_inputs); assert(input.size() == m_num_inputs);
int temp_size; int temp_size;
if (m_num_hidden_layers == 0) if (m_num_hidden_layers == 0)
@@ -50,7 +49,7 @@ void MLP::GetOutput(const std::vector<double> &input,
m_layers[i].GetOutputAfterSigmoid(temp_in, &temp_out); m_layers[i].GetOutputAfterSigmoid(temp_in, &temp_out);
} }
if (apply_softmax && temp_out.size() > 1) if (temp_out.size() > 1)
utils::Softmax(&temp_out); utils::Softmax(&temp_out);
*output = temp_out; *output = temp_out;
@@ -105,11 +104,11 @@ void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set
} }
} }
} }
size_t i = 0;
for (int i = 0; i < max_iterations; i++) { for ( i = 0; i < max_iterations; i++) {
std::cout << "******************************" << std::endl; //std::cout << "******************************" << std::endl;
std::cout << "******** ITER " << i << std::endl; //std::cout << "******** ITER " << i << std::endl;
std::cout << "******************************" << std::endl; //std::cout << "******************************" << std::endl;
double current_iteration_cost_function = 0.0; double current_iteration_cost_function = 0.0;
for (auto & training_sample_with_bias : training_sample_set_with_bias) { for (auto & training_sample_with_bias : training_sample_set_with_bias) {
std::vector<double> predicted_output; std::vector<double> predicted_output;
@@ -123,16 +122,16 @@ void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set
assert(correct_output.size() == predicted_output.size()); assert(correct_output.size() == predicted_output.size());
std::vector<double> deriv_error_output(predicted_output.size()); std::vector<double> deriv_error_output(predicted_output.size());
std::cout << training_sample_with_bias << "\t\t"; //std::cout << training_sample_with_bias << "\t\t";
{ //{
std::cout << "Predicted output: ["; // std::cout << "Predicted output: [";
for (int i = 0; i < predicted_output.size(); i++) { // for (int i = 0; i < predicted_output.size(); i++) {
if (i != 0) // if (i != 0)
std::cout << ", "; // std::cout << ", ";
std::cout << predicted_output[i]; // std::cout << predicted_output[i];
} // }
std::cout << "]" << std::endl; // std::cout << "]" << std::endl;
} //}
for (int j = 0; j < predicted_output.size(); j++) { for (int j = 0; j < predicted_output.size(); j++) {
current_iteration_cost_function += current_iteration_cost_function +=
@@ -146,7 +145,8 @@ void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set
learning_rate); learning_rate);
} }
std::cout << "Iteration cost function f(error): " if((i% (max_iterations/100))==0)
std::cout << "Iteration "<< i << " cost function f(error): "
<< current_iteration_cost_function << std::endl; << current_iteration_cost_function << std::endl;
if (current_iteration_cost_function < min_error_cost) if (current_iteration_cost_function < min_error_cost)
break; break;
@@ -173,6 +173,7 @@ void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set
std::cout << "******************************" << std::endl; std::cout << "******************************" << std::endl;
std::cout << "******* TRAINING ENDED *******" << std::endl; std::cout << "******* TRAINING ENDED *******" << std::endl;
std::cout << "******* " << i << " iters *******" << std::endl;
std::cout << "******************************" << std::endl; std::cout << "******************************" << std::endl;
{ {
int layer_i = -1; int layer_i = -1;

View File

@@ -48,8 +48,7 @@ public:
void GetOutput(const std::vector<double> &input, void GetOutput(const std::vector<double> &input,
std::vector<double> * output, std::vector<double> * output,
std::vector<std::vector<double>> * all_layers_activations = nullptr, std::vector<std::vector<double>> * all_layers_activations = nullptr) const;
bool apply_softmax = false) const;
void GetOutputClass(const std::vector<double> &output, size_t * class_id) const; void GetOutputClass(const std::vector<double> &output, size_t * class_id) const;
void UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set_with_bias, void UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set_with_bias,

View File

@@ -17,10 +17,12 @@ UNIT(LearnAND) {
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
{{ 0, 0 },{0.0}}, { { 0, 0 },{ 0.0 } },
{{ 0, 1 },{0.0}}, { { 0, 1 },{ 0.0 } },
{{ 1, 0 },{0.0}}, { { 1, 0 },{ 0.0 } },
{{ 1, 1 },{1.0}} { { 1, 1 },{ 1.0 } },
{ { 1, 1 },{ 1.0 } },
{ { 1, 1 },{ 1.0 } }
}; };
bool bias_already_in = false; bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set); std::vector<TrainingSample> training_sample_set_with_bias(training_set);
@@ -33,14 +35,14 @@ UNIT(LearnAND) {
size_t num_examples = training_sample_set_with_bias.size(); size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 0, 5, true, 0.5); MLP my_mlp(num_features, 1, 1, 2, false);
//Train MLP //Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245); my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) { for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output; std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output); my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0]> 0.5 ? true : false; bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output); ASSERT_TRUE(predicted_output == correct_output);
} }
@@ -53,10 +55,12 @@ UNIT(LearnNAND) {
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
{{ 0, 0 },{1.0}}, { { 0, 0 },{ 1.0 } },
{{ 0, 1 },{1.0}}, { { 0, 1 },{ 1.0 } },
{{ 1, 0 },{1.0}}, { { 1, 0 },{ 1.0 } },
{{ 1, 1 },{0.0}} { { 1, 1 },{ 0.0 } },
{ { 1, 1 },{ 0.0 } },
{ { 1, 1 },{ 0.0 } }
}; };
bool bias_already_in = false; bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set); std::vector<TrainingSample> training_sample_set_with_bias(training_set);
@@ -69,14 +73,14 @@ UNIT(LearnNAND) {
size_t num_examples = training_sample_set_with_bias.size(); size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 0, 5, true, 0.5); MLP my_mlp(num_features, 1, 1, 2, false);
//Train MLP //Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245); my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) { for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output; std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output); my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0]> 0.5 ? true : false; bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output); ASSERT_TRUE(predicted_output == correct_output);
} }
@@ -89,10 +93,12 @@ UNIT(LearnOR) {
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
{{ 0, 0 },{0.0}}, { { 0, 0 },{ 0.0 } },
{{ 0, 1 },{1.0}}, { { 0, 0 },{ 0.0 } },
{{ 1, 0 },{1.0}}, { { 0, 0 },{ 0.0 } },
{{ 1, 1 },{1.0}} { { 0, 1 },{ 1.0 } },
{ { 1, 0 },{ 1.0 } },
{ { 1, 1 },{ 1.0 } }
}; };
bool bias_already_in = false; bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set); std::vector<TrainingSample> training_sample_set_with_bias(training_set);
@@ -105,14 +111,14 @@ UNIT(LearnOR) {
size_t num_examples = training_sample_set_with_bias.size(); size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 0, 5, true, 0.5); MLP my_mlp(num_features, 1, 1, 2, false);
//Train MLP //Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245); my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) { for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output; std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output); my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0]> 0.5 ? true : false; bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output); ASSERT_TRUE(predicted_output == correct_output);
} }
@@ -125,10 +131,12 @@ UNIT(LearnNOR) {
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
{{ 0, 0 },{1.0}}, { { 0, 0 },{ 1.0 } },
{{ 0, 1 },{0.0}}, { { 0, 0 },{ 1.0 } },
{{ 1, 0 },{0.0}}, { { 0, 0 },{ 1.0 } },
{{ 1, 1 },{0.0}} { { 0, 1 },{ 0.0 } },
{ { 1, 0 },{ 0.0 } },
{ { 1, 1 },{ 0.0 } }
}; };
bool bias_already_in = false; bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set); std::vector<TrainingSample> training_sample_set_with_bias(training_set);
@@ -141,14 +149,14 @@ UNIT(LearnNOR) {
size_t num_examples = training_sample_set_with_bias.size(); size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 0, 5, true, 0.5); MLP my_mlp(num_features, 1, 1, 2, false);
//Train MLP //Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245); my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) { for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output; std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output); my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0]> 0.5 ? true : false; bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output); ASSERT_TRUE(predicted_output == correct_output);
} }
@@ -156,49 +164,15 @@ UNIT(LearnNOR) {
std::cout << std::endl; std::cout << std::endl;
} }
//UNIT(LearnXOR) { UNIT(LearnXOR) {
// std::cout << "Train XOR function with mlp." << std::endl; std::cout << "Train XOR function with mlp." << std::endl;
//
// std::vector<TrainingSample> training_set =
// {
// { { 0, 0 },{ 0.0 } },
// { { 0, 1 },{ 1.0 } },
// { { 1, 0 },{ 1.0 } },
// { { 1, 1 },{ 0.0 } }
// };
// bool bias_already_in = false;
// std::vector<TrainingSample> training_sample_set_with_bias(training_set);
// //set up bias
// if (!bias_already_in) {
// for (auto & training_sample_with_bias : training_sample_set_with_bias) {
// training_sample_with_bias.AddBiasValue(1);
// }
// }
//
// size_t num_examples = training_sample_set_with_bias.size();
// size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
// MLP my_mlp(num_features, 1, 0, 5, true, 0.5);
// //Train MLP
// my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245);
//
// for (const auto & training_sample : training_sample_set_with_bias) {
// std::vector<double> output;
// my_mlp.GetOutput(training_sample.input_vector(), &output);
// bool predicted_output = output[0]> 0.5 ? true : false;
// bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
// ASSERT_TRUE(predicted_output == correct_output);
// }
// std::cout << "Trained with success." << std::endl;
// std::cout << std::endl;
//}
UNIT(LearnNOT) {
std::cout << "Train NOT function with mlp." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
{{ 0},{1.0 }}, { { 0, 0 },{ 0.0 } },
{{ 1},{0.0 }} { { 0, 1 },{ 1.0 } },
{ { 1, 0 },{ 1.0 } },
{ { 1, 1 },{ 0.0 } }
}; };
bool bias_already_in = false; bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set); std::vector<TrainingSample> training_sample_set_with_bias(training_set);
@@ -211,14 +185,48 @@ UNIT(LearnNOT) {
size_t num_examples = training_sample_set_with_bias.size(); size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 0, 5, true, 0.5); MLP my_mlp(num_features, 1, 1, 2, false);
//Train MLP //Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245); my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 50'000, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) { for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output; std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output); my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0]> 0.5 ? true : false; bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
}
std::cout << "Trained with success." << std::endl;
std::cout << std::endl;
}
UNIT(LearnNOT) {
std::cout << "Train NOT function with mlp." << std::endl;
std::vector<TrainingSample> training_set =
{
{ { 0},{ 1.0 } },
{ { 1},{ 0.0 } }
};
bool bias_already_in = false;
std::vector<TrainingSample> training_sample_set_with_bias(training_set);
//set up bias
if (!bias_already_in) {
for (auto & training_sample_with_bias : training_sample_set_with_bias) {
training_sample_with_bias.AddBiasValue(1);
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
MLP my_mlp(num_features, 1, 1, 2, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
bool predicted_output = output[0] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output); ASSERT_TRUE(predicted_output == correct_output);
} }

View File

@@ -52,7 +52,7 @@ public:
} else { } else {
m_weights.resize(m_num_inputs); m_weights.resize(m_num_inputs);
std::generate_n(m_weights.begin(), std::generate_n(m_weights.begin(),
m_num_inputs, m_num_inputs,
utils::gen_rand()); utils::gen_rand());
} }
} }
@@ -102,10 +102,11 @@ public:
} }
void GetBooleanOutput(const std::vector<double> &input, void GetBooleanOutput(const std::vector<double> &input,
bool * bool_output) const { bool * bool_output,
double threshold = 0.5) const {
double value; double value;
GetOutputAfterSigmoid(input, &value); GetOutputAfterSigmoid(input, &value);
*bool_output = (value > 0.5) ? true : false; *bool_output = (value >threshold) ? true : false;
}; };
void UpdateWeights(const std::vector<double> &x, void UpdateWeights(const std::vector<double> &x,

View File

@@ -36,7 +36,7 @@ void Train(Node & node,
int error_count = 0; int error_count = 0;
for (auto & training_sample_with_bias : training_sample_set_with_bias) { for (auto & training_sample_with_bias : training_sample_set_with_bias) {
bool prediction; bool prediction;
node.GetBooleanOutput(training_sample_with_bias.input_vector(), &prediction); node.GetBooleanOutput(training_sample_with_bias.input_vector(), &prediction, 0.5);
bool correct_output = training_sample_with_bias.output_vector()[0] > 0.5 ? true : false; bool correct_output = training_sample_with_bias.output_vector()[0] > 0.5 ? true : false;
if (prediction != correct_output) { if (prediction != correct_output) {
error_count++; error_count++;
@@ -82,8 +82,8 @@ UNIT(LearnAND) {
for (const auto & training_sample : training_sample_set_with_bias) { for (const auto & training_sample : training_sample_set_with_bias) {
bool class_id; bool class_id;
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id); my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
bool correct_output = training_sample.output_vector()[0] > 0 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(class_id == correct_output); ASSERT_TRUE(class_id == correct_output);
} }
std::cout << "Trained with success." << std::endl; std::cout << "Trained with success." << std::endl;
@@ -115,8 +115,8 @@ UNIT(LearnNAND) {
for (const auto & training_sample : training_sample_set_with_bias) { for (const auto & training_sample : training_sample_set_with_bias) {
bool class_id; bool class_id;
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id); my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
bool correct_output = training_sample.output_vector()[0] > 0 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(class_id == correct_output); ASSERT_TRUE(class_id == correct_output);
} }
std::cout << "Trained with success." << std::endl; std::cout << "Trained with success." << std::endl;
@@ -148,8 +148,8 @@ UNIT(LearnOR) {
for (const auto & training_sample : training_sample_set_with_bias) { for (const auto & training_sample : training_sample_set_with_bias) {
bool class_id; bool class_id;
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id); my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
bool correct_output = training_sample.output_vector()[0] > 0 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(class_id == correct_output); ASSERT_TRUE(class_id == correct_output);
} }
std::cout << "Trained with success." << std::endl; std::cout << "Trained with success." << std::endl;
@@ -180,8 +180,8 @@ UNIT(LearnNOR) {
for (const auto & training_sample : training_sample_set_with_bias) { for (const auto & training_sample : training_sample_set_with_bias) {
bool class_id; bool class_id;
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id); my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
bool correct_output = training_sample.output_vector()[0] > 0 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(class_id == correct_output); ASSERT_TRUE(class_id == correct_output);
} }
std::cout << "Trained with success." << std::endl; std::cout << "Trained with success." << std::endl;
@@ -211,8 +211,8 @@ UNIT(LearnNOT) {
for (const auto & training_sample : training_sample_set_with_bias) { for (const auto & training_sample : training_sample_set_with_bias) {
bool class_id; bool class_id;
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id); my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
bool correct_output = training_sample.output_vector()[0] > 0 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(class_id == correct_output); ASSERT_TRUE(class_id == correct_output);
} }
std::cout << "Trained with success." << std::endl; std::cout << "Trained with success." << std::endl;
@@ -244,8 +244,8 @@ UNIT(LearnXOR) {
for (const auto & training_sample : training_sample_set_with_bias) { for (const auto & training_sample : training_sample_set_with_bias) {
bool class_id; bool class_id;
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id); my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
bool correct_output = training_sample.output_vector()[0] > 0 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
if (class_id != correct_output) { if (class_id != correct_output) {
std::cout << "Failed to train. " << std::cout << "Failed to train. " <<
" A simple perceptron cannot learn the XOR function." << std::endl; " A simple perceptron cannot learn the XOR function." << std::endl;