mirror of
https://github.com/davidalbertonogueira/MLP.git
synced 2025-12-17 04:14:41 +03:00
Add logging header file.
This commit is contained in:
@@ -151,8 +151,9 @@
|
|||||||
</Link>
|
</Link>
|
||||||
</ItemDefinitionGroup>
|
</ItemDefinitionGroup>
|
||||||
<ItemGroup>
|
<ItemGroup>
|
||||||
|
<ClInclude Include="..\deps\Chrono.h" />
|
||||||
|
<ClInclude Include="..\deps\easylogging++.h" />
|
||||||
<ClInclude Include="..\deps\microunit.h" />
|
<ClInclude Include="..\deps\microunit.h" />
|
||||||
<ClInclude Include="..\src\Chrono.h" />
|
|
||||||
<ClInclude Include="..\src\Layer.h">
|
<ClInclude Include="..\src\Layer.h">
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
|
||||||
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</ExcludedFromBuild>
|
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</ExcludedFromBuild>
|
||||||
|
|||||||
@@ -33,7 +33,10 @@
|
|||||||
<ClInclude Include="..\deps\microunit.h">
|
<ClInclude Include="..\deps\microunit.h">
|
||||||
<Filter>Header Files</Filter>
|
<Filter>Header Files</Filter>
|
||||||
</ClInclude>
|
</ClInclude>
|
||||||
<ClInclude Include="..\src\Chrono.h">
|
<ClInclude Include="..\deps\Chrono.h">
|
||||||
|
<Filter>Header Files</Filter>
|
||||||
|
</ClInclude>
|
||||||
|
<ClInclude Include="..\deps\easylogging++.h">
|
||||||
<Filter>Header Files</Filter>
|
<Filter>Header Files</Filter>
|
||||||
</ClInclude>
|
</ClInclude>
|
||||||
</ItemGroup>
|
</ItemGroup>
|
||||||
|
|||||||
0
src/Chrono.h → deps/Chrono.h
vendored
0
src/Chrono.h → deps/Chrono.h
vendored
6695
deps/easylogging++.h
vendored
Normal file
6695
deps/easylogging++.h
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@@ -15,8 +15,12 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include "microunit.h"
|
#include "microunit.h"
|
||||||
|
#include "easylogging++.h"
|
||||||
|
|
||||||
int main() {
|
INITIALIZE_EASYLOGGINGPP
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
START_EASYLOGGINGPP(argc, argv);
|
||||||
microunit::UnitTester::Run();
|
microunit::UnitTester::Run();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
152
src/MLP.cpp
152
src/MLP.cpp
@@ -10,6 +10,7 @@
|
|||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include "easylogging++.h"
|
||||||
|
|
||||||
bool MLP::ExportNNWeights(std::vector<double> *weights) const {
|
bool MLP::ExportNNWeights(std::vector<double> *weights) const {
|
||||||
return true;
|
return true;
|
||||||
@@ -86,75 +87,10 @@ void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set
|
|||||||
int num_examples = training_sample_set_with_bias.size();
|
int num_examples = training_sample_set_with_bias.size();
|
||||||
int num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
int num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
|
|
||||||
{
|
|
||||||
int layer_i = -1;
|
|
||||||
int node_i = -1;
|
|
||||||
std::cout << "Starting weights:" << std::endl;
|
|
||||||
for (const auto & layer : m_layers) {
|
|
||||||
layer_i++;
|
|
||||||
node_i = -1;
|
|
||||||
std::cout << "Layer " << layer_i << " :" << std::endl;
|
|
||||||
for (const auto & node : layer.GetNodes()) {
|
|
||||||
node_i++;
|
|
||||||
std::cout << "\tNode " << node_i << " :\t";
|
|
||||||
for (auto m_weightselement : node.GetWeights()) {
|
|
||||||
std::cout << m_weightselement << "\t";
|
|
||||||
}
|
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
size_t i = 0;
|
|
||||||
for ( i = 0; i < max_iterations; i++) {
|
|
||||||
//std::cout << "******************************" << std::endl;
|
|
||||||
//std::cout << "******** ITER " << i << std::endl;
|
|
||||||
//std::cout << "******************************" << std::endl;
|
|
||||||
double current_iteration_cost_function = 0.0;
|
|
||||||
for (auto & training_sample_with_bias : training_sample_set_with_bias) {
|
|
||||||
std::vector<double> predicted_output;
|
|
||||||
std::vector< std::vector<double> > all_layers_activations;
|
|
||||||
GetOutput(training_sample_with_bias.input_vector(),
|
|
||||||
&predicted_output,
|
|
||||||
&all_layers_activations);
|
|
||||||
const std::vector<double> & correct_output =
|
|
||||||
training_sample_with_bias.output_vector();
|
|
||||||
|
|
||||||
assert(correct_output.size() == predicted_output.size());
|
|
||||||
std::vector<double> deriv_error_output(predicted_output.size());
|
|
||||||
|
|
||||||
//std::cout << training_sample_with_bias << "\t\t";
|
|
||||||
//{
|
|
||||||
// std::cout << "Predicted output: [";
|
|
||||||
// for (int i = 0; i < predicted_output.size(); i++) {
|
|
||||||
// if (i != 0)
|
|
||||||
// std::cout << ", ";
|
|
||||||
// std::cout << predicted_output[i];
|
|
||||||
// }
|
|
||||||
// std::cout << "]" << std::endl;
|
|
||||||
//}
|
|
||||||
|
|
||||||
for (int j = 0; j < predicted_output.size(); j++) {
|
|
||||||
current_iteration_cost_function +=
|
|
||||||
(std::pow)((correct_output[j] - predicted_output[j]), 2);
|
|
||||||
deriv_error_output[j] =
|
|
||||||
-2 * (correct_output[j] - predicted_output[j]);
|
|
||||||
}
|
|
||||||
|
|
||||||
UpdateWeights(all_layers_activations,
|
|
||||||
deriv_error_output,
|
|
||||||
learning_rate);
|
|
||||||
}
|
|
||||||
|
|
||||||
if((i% (max_iterations/100))==0)
|
|
||||||
std::cout << "Iteration "<< i << " cost function f(error): "
|
|
||||||
<< current_iteration_cost_function << std::endl;
|
|
||||||
if (current_iteration_cost_function < min_error_cost)
|
|
||||||
break;
|
|
||||||
|
|
||||||
//{
|
//{
|
||||||
// int layer_i = -1;
|
// int layer_i = -1;
|
||||||
// int node_i = -1;
|
// int node_i = -1;
|
||||||
// std::cout << "Current weights:" << std::endl;
|
// std::cout << "Starting weights:" << std::endl;
|
||||||
// for (const auto & layer : m_layers) {
|
// for (const auto & layer : m_layers) {
|
||||||
// layer_i++;
|
// layer_i++;
|
||||||
// node_i = -1;
|
// node_i = -1;
|
||||||
@@ -169,30 +105,76 @@ void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set
|
|||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
//}
|
//}
|
||||||
|
size_t i = 0;
|
||||||
|
for (i = 0; i < max_iterations; i++) {
|
||||||
|
double current_iteration_cost_function = 0.0;
|
||||||
|
for (auto & training_sample_with_bias : training_sample_set_with_bias) {
|
||||||
|
std::vector<double> predicted_output;
|
||||||
|
std::vector< std::vector<double> > all_layers_activations;
|
||||||
|
GetOutput(training_sample_with_bias.input_vector(),
|
||||||
|
&predicted_output,
|
||||||
|
&all_layers_activations);
|
||||||
|
const std::vector<double> & correct_output =
|
||||||
|
training_sample_with_bias.output_vector();
|
||||||
|
|
||||||
|
assert(correct_output.size() == predicted_output.size());
|
||||||
|
std::vector<double> deriv_error_output(predicted_output.size());
|
||||||
|
|
||||||
|
if ((i % (max_iterations / 100)) == 0) {
|
||||||
|
std::stringstream temp_training;
|
||||||
|
temp_training << training_sample_with_bias << "\t\t";
|
||||||
|
|
||||||
|
temp_training << "Predicted output: [";
|
||||||
|
for (int i = 0; i < predicted_output.size(); i++) {
|
||||||
|
if (i != 0)
|
||||||
|
temp_training << ", ";
|
||||||
|
temp_training << predicted_output[i];
|
||||||
|
}
|
||||||
|
temp_training << "]";
|
||||||
|
LOG(INFO) << temp_training.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::cout << "******************************" << std::endl;
|
for (int j = 0; j < predicted_output.size(); j++) {
|
||||||
std::cout << "******* TRAINING ENDED *******" << std::endl;
|
current_iteration_cost_function +=
|
||||||
std::cout << "******* " << i << " iters *******" << std::endl;
|
(std::pow)((correct_output[j] - predicted_output[j]), 2);
|
||||||
std::cout << "******************************" << std::endl;
|
deriv_error_output[j] =
|
||||||
{
|
-2 * (correct_output[j] - predicted_output[j]);
|
||||||
int layer_i = -1;
|
|
||||||
int node_i = -1;
|
|
||||||
std::cout << "Final weights:" << std::endl;
|
|
||||||
for (const auto & layer : m_layers) {
|
|
||||||
layer_i++;
|
|
||||||
node_i = -1;
|
|
||||||
std::cout << "Layer " << layer_i << " :" << std::endl;
|
|
||||||
for (const auto & node : layer.GetNodes()) {
|
|
||||||
node_i++;
|
|
||||||
std::cout << "\tNode " << node_i << " :\t";
|
|
||||||
for (auto m_weightselement : node.GetWeights()) {
|
|
||||||
std::cout << m_weightselement << "\t";
|
|
||||||
}
|
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
UpdateWeights(all_layers_activations,
|
||||||
|
deriv_error_output,
|
||||||
|
learning_rate);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ((i % (max_iterations / 100)) == 0)
|
||||||
|
LOG(INFO) << "Iteration " << i << " cost function f(error): "
|
||||||
|
<< current_iteration_cost_function;
|
||||||
|
if (current_iteration_cost_function < min_error_cost)
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LOG(INFO) << "******************************" ;
|
||||||
|
LOG(INFO) << "******* TRAINING ENDED *******";
|
||||||
|
LOG(INFO) << "******* " << i << " iters *******";
|
||||||
|
LOG(INFO) << "******************************";
|
||||||
|
//{
|
||||||
|
// int layer_i = -1;
|
||||||
|
// int node_i = -1;
|
||||||
|
// std::cout << "Final weights:" << std::endl;
|
||||||
|
// for (const auto & layer : m_layers) {
|
||||||
|
// layer_i++;
|
||||||
|
// node_i = -1;
|
||||||
|
// std::cout << "Layer " << layer_i << " :" << std::endl;
|
||||||
|
// for (const auto & node : layer.GetNodes()) {
|
||||||
|
// node_i++;
|
||||||
|
// std::cout << "\tNode " << node_i << " :\t";
|
||||||
|
// for (auto m_weightselement : node.GetWeights()) {
|
||||||
|
// std::cout << m_weightselement << "\t";
|
||||||
|
// }
|
||||||
|
// std::cout << std::endl;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -11,9 +11,12 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include "microunit.h"
|
#include "microunit.h"
|
||||||
|
#include "easylogging++.h"
|
||||||
|
|
||||||
|
INITIALIZE_EASYLOGGINGPP
|
||||||
|
|
||||||
UNIT(LearnAND) {
|
UNIT(LearnAND) {
|
||||||
std::cout << "Train AND function with mlp." << std::endl;
|
LOG(INFO) << "Train AND function with mlp." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -49,12 +52,11 @@ UNIT(LearnAND) {
|
|||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UNIT(LearnNAND) {
|
UNIT(LearnNAND) {
|
||||||
std::cout << "Train NAND function with mlp." << std::endl;
|
LOG(INFO) << "Train NAND function with mlp." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -90,12 +92,11 @@ UNIT(LearnNAND) {
|
|||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UNIT(LearnOR) {
|
UNIT(LearnOR) {
|
||||||
std::cout << "Train OR function with mlp." << std::endl;
|
LOG(INFO) << "Train OR function with mlp." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -131,12 +132,11 @@ UNIT(LearnOR) {
|
|||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UNIT(LearnNOR) {
|
UNIT(LearnNOR) {
|
||||||
std::cout << "Train NOR function with mlp." << std::endl;
|
LOG(INFO) << "Train NOR function with mlp." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -172,12 +172,11 @@ UNIT(LearnNOR) {
|
|||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UNIT(LearnXOR) {
|
UNIT(LearnXOR) {
|
||||||
std::cout << "Train XOR function with mlp." << std::endl;
|
LOG(INFO) << "Train XOR function with mlp." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -211,12 +210,11 @@ UNIT(LearnXOR) {
|
|||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UNIT(LearnNOT) {
|
UNIT(LearnNOT) {
|
||||||
std::cout << "Train NOT function with mlp." << std::endl;
|
LOG(INFO) << "Train NOT function with mlp." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -248,12 +246,11 @@ UNIT(LearnNOT) {
|
|||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UNIT(LearnX1) {
|
UNIT(LearnX1) {
|
||||||
std::cout << "Train X1 function with mlp." << std::endl;
|
LOG(INFO) << "Train X1 function with mlp." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -287,12 +284,11 @@ UNIT(LearnX1) {
|
|||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UNIT(LearnX2) {
|
UNIT(LearnX2) {
|
||||||
std::cout << "Train X2 function with mlp." << std::endl;
|
LOG(INFO) << "Train X2 function with mlp." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -326,11 +322,11 @@ UNIT(LearnX2) {
|
|||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int main() {
|
int main(int argc, char* argv[]) {
|
||||||
|
START_EASYLOGGINGPP(argc, argv);
|
||||||
microunit::UnitTester::Run();
|
microunit::UnitTester::Run();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -13,6 +13,9 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include "microunit.h"
|
#include "microunit.h"
|
||||||
|
#include "easylogging++.h"
|
||||||
|
|
||||||
|
INITIALIZE_EASYLOGGINGPP
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
void Train(Node & node,
|
void Train(Node & node,
|
||||||
@@ -27,10 +30,10 @@ void Train(Node & node,
|
|||||||
use_constant_weight_init,
|
use_constant_weight_init,
|
||||||
constant_weight_init);
|
constant_weight_init);
|
||||||
|
|
||||||
std::cout << "Starting weights:\t";
|
//std::cout << "Starting weights:\t";
|
||||||
for (auto m_weightselement : node.GetWeights())
|
//for (auto m_weightselement : node.GetWeights())
|
||||||
std::cout << m_weightselement << "\t";
|
// std::cout << m_weightselement << "\t";
|
||||||
std::cout << std::endl;
|
//std::cout << std::endl;
|
||||||
|
|
||||||
for (int i = 0; i < max_iterations; i++) {
|
for (int i = 0; i < max_iterations; i++) {
|
||||||
int error_count = 0;
|
int error_count = 0;
|
||||||
@@ -49,15 +52,15 @@ void Train(Node & node,
|
|||||||
if (error_count == 0) break;
|
if (error_count == 0) break;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::cout << "Final weights:\t\t";
|
//std::cout << "Final weights:\t\t";
|
||||||
for (auto m_weightselement : node.GetWeights())
|
//for (auto m_weightselement : node.GetWeights())
|
||||||
std::cout << m_weightselement << "\t";
|
// std::cout << m_weightselement << "\t";
|
||||||
std::cout << std::endl;
|
//std::cout << std::endl;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
UNIT(LearnAND) {
|
UNIT(LearnAND) {
|
||||||
std::cout << "Train AND function with Node." << std::endl;
|
LOG(INFO) << "Train AND function with Node." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -86,12 +89,11 @@ UNIT(LearnAND) {
|
|||||||
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(class_id == correct_output);
|
ASSERT_TRUE(class_id == correct_output);
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UNIT(LearnNAND) {
|
UNIT(LearnNAND) {
|
||||||
std::cout << "Train NAND function with Node." << std::endl;
|
LOG(INFO) << "Train NAND function with Node." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -119,12 +121,11 @@ UNIT(LearnNAND) {
|
|||||||
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(class_id == correct_output);
|
ASSERT_TRUE(class_id == correct_output);
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UNIT(LearnOR) {
|
UNIT(LearnOR) {
|
||||||
std::cout << "Train OR function with Node." << std::endl;
|
LOG(INFO) << "Train OR function with Node." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -152,11 +153,10 @@ UNIT(LearnOR) {
|
|||||||
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(class_id == correct_output);
|
ASSERT_TRUE(class_id == correct_output);
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
UNIT(LearnNOR) {
|
UNIT(LearnNOR) {
|
||||||
std::cout << "Train NOR function with Node." << std::endl;
|
LOG(INFO) << "Train NOR function with Node." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -184,12 +184,11 @@ UNIT(LearnNOR) {
|
|||||||
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(class_id == correct_output);
|
ASSERT_TRUE(class_id == correct_output);
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UNIT(LearnNOT) {
|
UNIT(LearnNOT) {
|
||||||
std::cout << "Train NOT function with Node." << std::endl;
|
LOG(INFO) << "Train NOT function with Node." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -215,12 +214,11 @@ UNIT(LearnNOT) {
|
|||||||
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(class_id == correct_output);
|
ASSERT_TRUE(class_id == correct_output);
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
UNIT(LearnXOR) {
|
UNIT(LearnXOR) {
|
||||||
std::cout << "Train XOR function with Node." << std::endl;
|
LOG(INFO) << "Train XOR function with Node." << std::endl;
|
||||||
|
|
||||||
std::vector<TrainingSample> training_set =
|
std::vector<TrainingSample> training_set =
|
||||||
{
|
{
|
||||||
@@ -247,16 +245,16 @@ UNIT(LearnXOR) {
|
|||||||
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
|
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
|
||||||
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
|
||||||
if (class_id != correct_output) {
|
if (class_id != correct_output) {
|
||||||
std::cout << "Failed to train. " <<
|
LOG(WARNING) << "Failed to train. " <<
|
||||||
" A simple perceptron cannot learn the XOR function." << std::endl;
|
" A simple perceptron cannot learn the XOR function." << std::endl;
|
||||||
FAIL();
|
FAIL();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::cout << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
std::cout << std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int main() {
|
int main(int argc, char* argv[]) {
|
||||||
|
START_EASYLOGGINGPP(argc, argv);
|
||||||
microunit::UnitTester::Run();
|
microunit::UnitTester::Run();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -18,12 +18,11 @@
|
|||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
|
#include <sstream>
|
||||||
#include <iterator>
|
#include <iterator>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
namespace utils {
|
namespace utils {
|
||||||
|
|
||||||
struct gen_rand {
|
struct gen_rand {
|
||||||
|
|||||||
Reference in New Issue
Block a user