mirror of
https://github.com/davidalbertonogueira/MLP.git
synced 2025-12-17 04:14:41 +03:00
Slow learning time solved with equal number of positive and negative training samples.
This commit is contained in:
12
src/Layer.h
12
src/Layer.h
@@ -35,22 +35,18 @@ public:
|
|||||||
constant_weight_init)));
|
constant_weight_init)));
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
~Layer() {
|
~Layer() {
|
||||||
m_num_nodes = 0;
|
m_num_nodes = 0;
|
||||||
m_num_inputs_per_node = 0;
|
m_num_inputs_per_node = 0;
|
||||||
m_nodes.clear();
|
m_nodes.clear();
|
||||||
};
|
};
|
||||||
|
|
||||||
//std::vector<Node> & GetNodes() {
|
|
||||||
// return m_nodes;
|
|
||||||
//}
|
|
||||||
|
|
||||||
const std::vector<Node> & GetNodes() const {
|
const std::vector<Node> & GetNodes() const {
|
||||||
return m_nodes;
|
return m_nodes;
|
||||||
}
|
}
|
||||||
|
|
||||||
void GetOutputAfterSigmoid(const std::vector<double> &input, std::vector<double> * output) const {
|
void GetOutputAfterSigmoid(const std::vector<double> &input,
|
||||||
|
std::vector<double> * output) const {
|
||||||
assert(input.size() == m_num_inputs_per_node);
|
assert(input.size() == m_num_inputs_per_node);
|
||||||
|
|
||||||
output->resize(m_num_nodes);
|
output->resize(m_num_nodes);
|
||||||
@@ -71,7 +67,8 @@ public:
|
|||||||
|
|
||||||
for (size_t i = 0; i < m_nodes.size(); i++) {
|
for (size_t i = 0; i < m_nodes.size(); i++) {
|
||||||
double net_sum;
|
double net_sum;
|
||||||
m_nodes[i].GetInputInnerProdWithWeights(input_layer_activation, &net_sum);
|
m_nodes[i].GetInputInnerProdWithWeights(input_layer_activation,
|
||||||
|
&net_sum);
|
||||||
|
|
||||||
//dE/dwij = dE/doj . doj/dnetj . dnetj/dwij
|
//dE/dwij = dE/doj . doj/dnetj . dnetj/dwij
|
||||||
double dE_doj = 0.0;
|
double dE_doj = 0.0;
|
||||||
@@ -81,7 +78,6 @@ public:
|
|||||||
dE_doj = deriv_error[i];
|
dE_doj = deriv_error[i];
|
||||||
doj_dnetj = utils::deriv_sigmoid(net_sum);
|
doj_dnetj = utils::deriv_sigmoid(net_sum);
|
||||||
|
|
||||||
|
|
||||||
for (int j = 0; j < m_num_inputs_per_node; j++) {
|
for (int j = 0; j < m_num_inputs_per_node; j++) {
|
||||||
(*deltas)[j] += dE_doj * doj_dnetj * m_nodes[i].GetWeights()[j];
|
(*deltas)[j] += dE_doj * doj_dnetj * m_nodes[i].GetWeights()[j];
|
||||||
|
|
||||||
|
|||||||
39
src/MLP.cpp
39
src/MLP.cpp
@@ -20,8 +20,7 @@ bool MLP::ImportNNWeights(const std::vector<double> & weights) {
|
|||||||
|
|
||||||
void MLP::GetOutput(const std::vector<double> &input,
|
void MLP::GetOutput(const std::vector<double> &input,
|
||||||
std::vector<double> * output,
|
std::vector<double> * output,
|
||||||
std::vector<std::vector<double>> * all_layers_activations,
|
std::vector<std::vector<double>> * all_layers_activations) const {
|
||||||
bool apply_softmax) const {
|
|
||||||
assert(input.size() == m_num_inputs);
|
assert(input.size() == m_num_inputs);
|
||||||
int temp_size;
|
int temp_size;
|
||||||
if (m_num_hidden_layers == 0)
|
if (m_num_hidden_layers == 0)
|
||||||
@@ -50,7 +49,7 @@ void MLP::GetOutput(const std::vector<double> &input,
|
|||||||
m_layers[i].GetOutputAfterSigmoid(temp_in, &temp_out);
|
m_layers[i].GetOutputAfterSigmoid(temp_in, &temp_out);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (apply_softmax && temp_out.size() > 1)
|
if (temp_out.size() > 1)
|
||||||
utils::Softmax(&temp_out);
|
utils::Softmax(&temp_out);
|
||||||
*output = temp_out;
|
*output = temp_out;
|
||||||
|
|
||||||
@@ -105,11 +104,11 @@ void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
size_t i = 0;
|
||||||
for (int i = 0; i < max_iterations; i++) {
|
for ( i = 0; i < max_iterations; i++) {
|
||||||
std::cout << "******************************" << std::endl;
|
//std::cout << "******************************" << std::endl;
|
||||||
std::cout << "******** ITER " << i << std::endl;
|
//std::cout << "******** ITER " << i << std::endl;
|
||||||
std::cout << "******************************" << std::endl;
|
//std::cout << "******************************" << std::endl;
|
||||||
double current_iteration_cost_function = 0.0;
|
double current_iteration_cost_function = 0.0;
|
||||||
for (auto & training_sample_with_bias : training_sample_set_with_bias) {
|
for (auto & training_sample_with_bias : training_sample_set_with_bias) {
|
||||||
std::vector<double> predicted_output;
|
std::vector<double> predicted_output;
|
||||||
@@ -123,16 +122,16 @@ void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set
|
|||||||
assert(correct_output.size() == predicted_output.size());
|
assert(correct_output.size() == predicted_output.size());
|
||||||
std::vector<double> deriv_error_output(predicted_output.size());
|
std::vector<double> deriv_error_output(predicted_output.size());
|
||||||
|
|
||||||
std::cout << training_sample_with_bias << "\t\t";
|
//std::cout << training_sample_with_bias << "\t\t";
|
||||||
{
|
//{
|
||||||
std::cout << "Predicted output: [";
|
// std::cout << "Predicted output: [";
|
||||||
for (int i = 0; i < predicted_output.size(); i++) {
|
// for (int i = 0; i < predicted_output.size(); i++) {
|
||||||
if (i != 0)
|
// if (i != 0)
|
||||||
std::cout << ", ";
|
// std::cout << ", ";
|
||||||
std::cout << predicted_output[i];
|
// std::cout << predicted_output[i];
|
||||||
}
|
// }
|
||||||
std::cout << "]" << std::endl;
|
// std::cout << "]" << std::endl;
|
||||||
}
|
//}
|
||||||
|
|
||||||
for (int j = 0; j < predicted_output.size(); j++) {
|
for (int j = 0; j < predicted_output.size(); j++) {
|
||||||
current_iteration_cost_function +=
|
current_iteration_cost_function +=
|
||||||
@@ -146,7 +145,8 @@ void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set
|
|||||||
learning_rate);
|
learning_rate);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::cout << "Iteration cost function f(error): "
|
if((i% (max_iterations/100))==0)
|
||||||
|
std::cout << "Iteration "<< i << " cost function f(error): "
|
||||||
<< current_iteration_cost_function << std::endl;
|
<< current_iteration_cost_function << std::endl;
|
||||||
if (current_iteration_cost_function < min_error_cost)
|
if (current_iteration_cost_function < min_error_cost)
|
||||||
break;
|
break;
|
||||||
@@ -173,6 +173,7 @@ void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set
|
|||||||
|
|
||||||
std::cout << "******************************" << std::endl;
|
std::cout << "******************************" << std::endl;
|
||||||
std::cout << "******* TRAINING ENDED *******" << std::endl;
|
std::cout << "******* TRAINING ENDED *******" << std::endl;
|
||||||
|
std::cout << "******* " << i << " iters *******" << std::endl;
|
||||||
std::cout << "******************************" << std::endl;
|
std::cout << "******************************" << std::endl;
|
||||||
{
|
{
|
||||||
int layer_i = -1;
|
int layer_i = -1;
|
||||||
|
|||||||
@@ -48,8 +48,7 @@ public:
|
|||||||
|
|
||||||
void GetOutput(const std::vector<double> &input,
|
void GetOutput(const std::vector<double> &input,
|
||||||
std::vector<double> * output,
|
std::vector<double> * output,
|
||||||
std::vector<std::vector<double>> * all_layers_activations = nullptr,
|
std::vector<std::vector<double>> * all_layers_activations = nullptr) const;
|
||||||
bool apply_softmax = false) const;
|
|
||||||
void GetOutputClass(const std::vector<double> &output, size_t * class_id) const;
|
void GetOutputClass(const std::vector<double> &output, size_t * class_id) const;
|
||||||
|
|
||||||
void UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set_with_bias,
|
void UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set_with_bias,
|
||||||
|
|||||||
@@ -20,6 +20,8 @@ UNIT(LearnAND) {
|
|||||||
{ { 0, 0 },{ 0.0 } },
|
{ { 0, 0 },{ 0.0 } },
|
||||||
{ { 0, 1 },{ 0.0 } },
|
{ { 0, 1 },{ 0.0 } },
|
||||||
{ { 1, 0 },{ 0.0 } },
|
{ { 1, 0 },{ 0.0 } },
|
||||||
|
{ { 1, 1 },{ 1.0 } },
|
||||||
|
{ { 1, 1 },{ 1.0 } },
|
||||||
{ { 1, 1 },{ 1.0 } }
|
{ { 1, 1 },{ 1.0 } }
|
||||||
};
|
};
|
||||||
bool bias_already_in = false;
|
bool bias_already_in = false;
|
||||||
@@ -33,9 +35,9 @@ UNIT(LearnAND) {
|
|||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
size_t num_examples = training_sample_set_with_bias.size();
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
MLP my_mlp(num_features, 1, 0, 5, true, 0.5);
|
MLP my_mlp(num_features, 1, 1, 2, false);
|
||||||
//Train MLP
|
//Train MLP
|
||||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245);
|
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||||
|
|
||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
@@ -56,6 +58,8 @@ UNIT(LearnNAND) {
|
|||||||
{ { 0, 0 },{ 1.0 } },
|
{ { 0, 0 },{ 1.0 } },
|
||||||
{ { 0, 1 },{ 1.0 } },
|
{ { 0, 1 },{ 1.0 } },
|
||||||
{ { 1, 0 },{ 1.0 } },
|
{ { 1, 0 },{ 1.0 } },
|
||||||
|
{ { 1, 1 },{ 0.0 } },
|
||||||
|
{ { 1, 1 },{ 0.0 } },
|
||||||
{ { 1, 1 },{ 0.0 } }
|
{ { 1, 1 },{ 0.0 } }
|
||||||
};
|
};
|
||||||
bool bias_already_in = false;
|
bool bias_already_in = false;
|
||||||
@@ -69,9 +73,9 @@ UNIT(LearnNAND) {
|
|||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
size_t num_examples = training_sample_set_with_bias.size();
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
MLP my_mlp(num_features, 1, 0, 5, true, 0.5);
|
MLP my_mlp(num_features, 1, 1, 2, false);
|
||||||
//Train MLP
|
//Train MLP
|
||||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245);
|
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||||
|
|
||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
@@ -89,6 +93,8 @@ UNIT(LearnOR) {
|
|||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
|
{ { 0, 0 },{ 0.0 } },
|
||||||
|
{ { 0, 0 },{ 0.0 } },
|
||||||
{ { 0, 0 },{ 0.0 } },
|
{ { 0, 0 },{ 0.0 } },
|
||||||
{ { 0, 1 },{ 1.0 } },
|
{ { 0, 1 },{ 1.0 } },
|
||||||
{ { 1, 0 },{ 1.0 } },
|
{ { 1, 0 },{ 1.0 } },
|
||||||
@@ -105,9 +111,9 @@ UNIT(LearnOR) {
|
|||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
size_t num_examples = training_sample_set_with_bias.size();
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
MLP my_mlp(num_features, 1, 0, 5, true, 0.5);
|
MLP my_mlp(num_features, 1, 1, 2, false);
|
||||||
//Train MLP
|
//Train MLP
|
||||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245);
|
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||||
|
|
||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
@@ -125,6 +131,8 @@ UNIT(LearnNOR) {
|
|||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
|
{ { 0, 0 },{ 1.0 } },
|
||||||
|
{ { 0, 0 },{ 1.0 } },
|
||||||
{ { 0, 0 },{ 1.0 } },
|
{ { 0, 0 },{ 1.0 } },
|
||||||
{ { 0, 1 },{ 0.0 } },
|
{ { 0, 1 },{ 0.0 } },
|
||||||
{ { 1, 0 },{ 0.0 } },
|
{ { 1, 0 },{ 0.0 } },
|
||||||
@@ -141,9 +149,9 @@ UNIT(LearnNOR) {
|
|||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
size_t num_examples = training_sample_set_with_bias.size();
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
MLP my_mlp(num_features, 1, 0, 5, true, 0.5);
|
MLP my_mlp(num_features, 1, 1, 2, false);
|
||||||
//Train MLP
|
//Train MLP
|
||||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245);
|
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||||
|
|
||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
@@ -156,41 +164,41 @@ UNIT(LearnNOR) {
|
|||||||
std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
//UNIT(LearnXOR) {
|
UNIT(LearnXOR) {
|
||||||
// std::cout << "Train XOR function with mlp." << std::endl;
|
std::cout << "Train XOR function with mlp." << std::endl;
|
||||||
//
|
|
||||||
// std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
// {
|
{
|
||||||
// { { 0, 0 },{ 0.0 } },
|
{ { 0, 0 },{ 0.0 } },
|
||||||
// { { 0, 1 },{ 1.0 } },
|
{ { 0, 1 },{ 1.0 } },
|
||||||
// { { 1, 0 },{ 1.0 } },
|
{ { 1, 0 },{ 1.0 } },
|
||||||
// { { 1, 1 },{ 0.0 } }
|
{ { 1, 1 },{ 0.0 } }
|
||||||
// };
|
};
|
||||||
// bool bias_already_in = false;
|
bool bias_already_in = false;
|
||||||
// std::vector<TrainingSample> training_sample_set_with_bias(training_set);
|
std::vector<TrainingSample> training_sample_set_with_bias(training_set);
|
||||||
// //set up bias
|
//set up bias
|
||||||
// if (!bias_already_in) {
|
if (!bias_already_in) {
|
||||||
// for (auto & training_sample_with_bias : training_sample_set_with_bias) {
|
for (auto & training_sample_with_bias : training_sample_set_with_bias) {
|
||||||
// training_sample_with_bias.AddBiasValue(1);
|
training_sample_with_bias.AddBiasValue(1);
|
||||||
// }
|
}
|
||||||
// }
|
}
|
||||||
//
|
|
||||||
// size_t num_examples = training_sample_set_with_bias.size();
|
size_t num_examples = training_sample_set_with_bias.size();
|
||||||
// size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
// MLP my_mlp(num_features, 1, 0, 5, true, 0.5);
|
MLP my_mlp(num_features, 1, 1, 2, false);
|
||||||
// //Train MLP
|
//Train MLP
|
||||||
// my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245);
|
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 50'000, 0.25);
|
||||||
//
|
|
||||||
// for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
// std::vector<double> output;
|
std::vector<double> output;
|
||||||
// my_mlp.GetOutput(training_sample.input_vector(), &output);
|
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
||||||
// bool predicted_output = output[0]> 0.5 ? true : false;
|
bool predicted_output = output[0] > 0.5 ? true : false;
|
||||||
// bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
||||||
// ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
// }
|
}
|
||||||
// std::cout << "Trained with success." << std::endl;
|
std::cout << "Trained with success." << std::endl;
|
||||||
// std::cout << std::endl;
|
std::cout << std::endl;
|
||||||
//}
|
}
|
||||||
|
|
||||||
UNIT(LearnNOT) {
|
UNIT(LearnNOT) {
|
||||||
std::cout << "Train NOT function with mlp." << std::endl;
|
std::cout << "Train NOT function with mlp." << std::endl;
|
||||||
@@ -211,9 +219,9 @@ UNIT(LearnNOT) {
|
|||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
size_t num_examples = training_sample_set_with_bias.size();
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
MLP my_mlp(num_features, 1, 0, 5, true, 0.5);
|
MLP my_mlp(num_features, 1, 1, 2, false);
|
||||||
//Train MLP
|
//Train MLP
|
||||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 2, 1000, 0.245);
|
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||||
|
|
||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
|
|||||||
@@ -102,10 +102,11 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
void GetBooleanOutput(const std::vector<double> &input,
|
void GetBooleanOutput(const std::vector<double> &input,
|
||||||
bool * bool_output) const {
|
bool * bool_output,
|
||||||
|
double threshold = 0.5) const {
|
||||||
double value;
|
double value;
|
||||||
GetOutputAfterSigmoid(input, &value);
|
GetOutputAfterSigmoid(input, &value);
|
||||||
*bool_output = (value > 0.5) ? true : false;
|
*bool_output = (value >threshold) ? true : false;
|
||||||
};
|
};
|
||||||
|
|
||||||
void UpdateWeights(const std::vector<double> &x,
|
void UpdateWeights(const std::vector<double> &x,
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ void Train(Node & node,
|
|||||||
int error_count = 0;
|
int error_count = 0;
|
||||||
for (auto & training_sample_with_bias : training_sample_set_with_bias) {
|
for (auto & training_sample_with_bias : training_sample_set_with_bias) {
|
||||||
bool prediction;
|
bool prediction;
|
||||||
node.GetBooleanOutput(training_sample_with_bias.input_vector(), &prediction);
|
node.GetBooleanOutput(training_sample_with_bias.input_vector(), &prediction, 0.5);
|
||||||
bool correct_output = training_sample_with_bias.output_vector()[0] > 0.5 ? true : false;
|
bool correct_output = training_sample_with_bias.output_vector()[0] > 0.5 ? true : false;
|
||||||
if (prediction != correct_output) {
|
if (prediction != correct_output) {
|
||||||
error_count++;
|
error_count++;
|
||||||
@@ -82,8 +82,8 @@ UNIT(LearnAND) {
|
|||||||
|
|
||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
bool class_id;
|
bool class_id;
|
||||||
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id);
|
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
|
||||||
bool correct_output = training_sample.output_vector()[0] > 0 ? true : false;
|
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(class_id == correct_output);
|
ASSERT_TRUE(class_id == correct_output);
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
std::cout << "Trained with success." << std::endl;
|
||||||
@@ -115,8 +115,8 @@ UNIT(LearnNAND) {
|
|||||||
|
|
||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
bool class_id;
|
bool class_id;
|
||||||
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id);
|
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
|
||||||
bool correct_output = training_sample.output_vector()[0] > 0 ? true : false;
|
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(class_id == correct_output);
|
ASSERT_TRUE(class_id == correct_output);
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
std::cout << "Trained with success." << std::endl;
|
||||||
@@ -148,8 +148,8 @@ UNIT(LearnOR) {
|
|||||||
|
|
||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
bool class_id;
|
bool class_id;
|
||||||
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id);
|
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
|
||||||
bool correct_output = training_sample.output_vector()[0] > 0 ? true : false;
|
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(class_id == correct_output);
|
ASSERT_TRUE(class_id == correct_output);
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
std::cout << "Trained with success." << std::endl;
|
||||||
@@ -180,8 +180,8 @@ UNIT(LearnNOR) {
|
|||||||
|
|
||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
bool class_id;
|
bool class_id;
|
||||||
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id);
|
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
|
||||||
bool correct_output = training_sample.output_vector()[0] > 0 ? true : false;
|
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(class_id == correct_output);
|
ASSERT_TRUE(class_id == correct_output);
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
std::cout << "Trained with success." << std::endl;
|
||||||
@@ -211,8 +211,8 @@ UNIT(LearnNOT) {
|
|||||||
|
|
||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
bool class_id;
|
bool class_id;
|
||||||
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id);
|
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
|
||||||
bool correct_output = training_sample.output_vector()[0] > 0 ? true : false;
|
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(class_id == correct_output);
|
ASSERT_TRUE(class_id == correct_output);
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
std::cout << "Trained with success." << std::endl;
|
||||||
@@ -244,8 +244,8 @@ UNIT(LearnXOR) {
|
|||||||
|
|
||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
bool class_id;
|
bool class_id;
|
||||||
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id);
|
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
|
||||||
bool correct_output = training_sample.output_vector()[0] > 0 ? true : false;
|
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
||||||
if (class_id != correct_output) {
|
if (class_id != correct_output) {
|
||||||
std::cout << "Failed to train. " <<
|
std::cout << "Failed to train. " <<
|
||||||
" A simple perceptron cannot learn the XOR function." << std::endl;
|
" A simple perceptron cannot learn the XOR function." << std::endl;
|
||||||
|
|||||||
Reference in New Issue
Block a user