Add logging header file.

This commit is contained in:
davidjacnogueira
2016-11-03 23:05:54 +00:00
parent 7965d7b748
commit f647b05f70
9 changed files with 6812 additions and 134 deletions

View File

@@ -151,8 +151,9 @@
</Link> </Link>
</ItemDefinitionGroup> </ItemDefinitionGroup>
<ItemGroup> <ItemGroup>
<ClInclude Include="..\deps\Chrono.h" />
<ClInclude Include="..\deps\easylogging++.h" />
<ClInclude Include="..\deps\microunit.h" /> <ClInclude Include="..\deps\microunit.h" />
<ClInclude Include="..\src\Chrono.h" />
<ClInclude Include="..\src\Layer.h"> <ClInclude Include="..\src\Layer.h">
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild> <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">false</ExcludedFromBuild>
<ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</ExcludedFromBuild> <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</ExcludedFromBuild>

View File

@@ -33,7 +33,10 @@
<ClInclude Include="..\deps\microunit.h"> <ClInclude Include="..\deps\microunit.h">
<Filter>Header Files</Filter> <Filter>Header Files</Filter>
</ClInclude> </ClInclude>
<ClInclude Include="..\src\Chrono.h"> <ClInclude Include="..\deps\Chrono.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="..\deps\easylogging++.h">
<Filter>Header Files</Filter> <Filter>Header Files</Filter>
</ClInclude> </ClInclude>
</ItemGroup> </ItemGroup>

View File

6695
deps/easylogging++.h vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -15,8 +15,12 @@
#include <vector> #include <vector>
#include <algorithm> #include <algorithm>
#include "microunit.h" #include "microunit.h"
#include "easylogging++.h"
int main() { INITIALIZE_EASYLOGGINGPP
int main(int argc, char* argv[]) {
START_EASYLOGGINGPP(argc, argv);
microunit::UnitTester::Run(); microunit::UnitTester::Run();
return 0; return 0;
} }

View File

@@ -10,6 +10,7 @@
#include <fstream> #include <fstream>
#include <vector> #include <vector>
#include <algorithm> #include <algorithm>
#include "easylogging++.h"
bool MLP::ExportNNWeights(std::vector<double> *weights) const { bool MLP::ExportNNWeights(std::vector<double> *weights) const {
return true; return true;
@@ -86,29 +87,26 @@ void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set
int num_examples = training_sample_set_with_bias.size(); int num_examples = training_sample_set_with_bias.size();
int num_features = training_sample_set_with_bias[0].GetInputVectorSize(); int num_features = training_sample_set_with_bias[0].GetInputVectorSize();
{ //{
int layer_i = -1; // int layer_i = -1;
int node_i = -1; // int node_i = -1;
std::cout << "Starting weights:" << std::endl; // std::cout << "Starting weights:" << std::endl;
for (const auto & layer : m_layers) { // for (const auto & layer : m_layers) {
layer_i++; // layer_i++;
node_i = -1; // node_i = -1;
std::cout << "Layer " << layer_i << " :" << std::endl; // std::cout << "Layer " << layer_i << " :" << std::endl;
for (const auto & node : layer.GetNodes()) { // for (const auto & node : layer.GetNodes()) {
node_i++; // node_i++;
std::cout << "\tNode " << node_i << " :\t"; // std::cout << "\tNode " << node_i << " :\t";
for (auto m_weightselement : node.GetWeights()) { // for (auto m_weightselement : node.GetWeights()) {
std::cout << m_weightselement << "\t"; // std::cout << m_weightselement << "\t";
} // }
std::cout << std::endl; // std::cout << std::endl;
} // }
} // }
} //}
size_t i = 0; size_t i = 0;
for ( i = 0; i < max_iterations; i++) { for (i = 0; i < max_iterations; i++) {
//std::cout << "******************************" << std::endl;
//std::cout << "******** ITER " << i << std::endl;
//std::cout << "******************************" << std::endl;
double current_iteration_cost_function = 0.0; double current_iteration_cost_function = 0.0;
for (auto & training_sample_with_bias : training_sample_set_with_bias) { for (auto & training_sample_with_bias : training_sample_set_with_bias) {
std::vector<double> predicted_output; std::vector<double> predicted_output;
@@ -122,16 +120,19 @@ void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set
assert(correct_output.size() == predicted_output.size()); assert(correct_output.size() == predicted_output.size());
std::vector<double> deriv_error_output(predicted_output.size()); std::vector<double> deriv_error_output(predicted_output.size());
//std::cout << training_sample_with_bias << "\t\t"; if ((i % (max_iterations / 100)) == 0) {
//{ std::stringstream temp_training;
// std::cout << "Predicted output: ["; temp_training << training_sample_with_bias << "\t\t";
// for (int i = 0; i < predicted_output.size(); i++) {
// if (i != 0) temp_training << "Predicted output: [";
// std::cout << ", "; for (int i = 0; i < predicted_output.size(); i++) {
// std::cout << predicted_output[i]; if (i != 0)
// } temp_training << ", ";
// std::cout << "]" << std::endl; temp_training << predicted_output[i];
//} }
temp_training << "]";
LOG(INFO) << temp_training.str();
}
for (int j = 0; j < predicted_output.size(); j++) { for (int j = 0; j < predicted_output.size(); j++) {
current_iteration_cost_function += current_iteration_cost_function +=
@@ -145,54 +146,35 @@ void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set
learning_rate); learning_rate);
} }
if((i% (max_iterations/100))==0) if ((i % (max_iterations / 100)) == 0)
std::cout << "Iteration "<< i << " cost function f(error): " LOG(INFO) << "Iteration " << i << " cost function f(error): "
<< current_iteration_cost_function << std::endl; << current_iteration_cost_function;
if (current_iteration_cost_function < min_error_cost) if (current_iteration_cost_function < min_error_cost)
break; break;
//{
// int layer_i = -1;
// int node_i = -1;
// std::cout << "Current weights:" << std::endl;
// for (const auto & layer : m_layers) {
// layer_i++;
// node_i = -1;
// std::cout << "Layer " << layer_i << " :" << std::endl;
// for (const auto & node : layer.GetNodes()) {
// node_i++;
// std::cout << "\tNode " << node_i << " :\t";
// for (auto m_weightselement : node.GetWeights()) {
// std::cout << m_weightselement << "\t";
// }
// std::cout << std::endl;
// }
// }
//}
} }
std::cout << "******************************" << std::endl; LOG(INFO) << "******************************" ;
std::cout << "******* TRAINING ENDED *******" << std::endl; LOG(INFO) << "******* TRAINING ENDED *******";
std::cout << "******* " << i << " iters *******" << std::endl; LOG(INFO) << "******* " << i << " iters *******";
std::cout << "******************************" << std::endl; LOG(INFO) << "******************************";
{ //{
int layer_i = -1; // int layer_i = -1;
int node_i = -1; // int node_i = -1;
std::cout << "Final weights:" << std::endl; // std::cout << "Final weights:" << std::endl;
for (const auto & layer : m_layers) { // for (const auto & layer : m_layers) {
layer_i++; // layer_i++;
node_i = -1; // node_i = -1;
std::cout << "Layer " << layer_i << " :" << std::endl; // std::cout << "Layer " << layer_i << " :" << std::endl;
for (const auto & node : layer.GetNodes()) { // for (const auto & node : layer.GetNodes()) {
node_i++; // node_i++;
std::cout << "\tNode " << node_i << " :\t"; // std::cout << "\tNode " << node_i << " :\t";
for (auto m_weightselement : node.GetWeights()) { // for (auto m_weightselement : node.GetWeights()) {
std::cout << m_weightselement << "\t"; // std::cout << m_weightselement << "\t";
} // }
std::cout << std::endl; // std::cout << std::endl;
} // }
} // }
} //}
}; };

View File

@@ -11,9 +11,12 @@
#include <vector> #include <vector>
#include <algorithm> #include <algorithm>
#include "microunit.h" #include "microunit.h"
#include "easylogging++.h"
INITIALIZE_EASYLOGGINGPP
UNIT(LearnAND) { UNIT(LearnAND) {
std::cout << "Train AND function with mlp." << std::endl; LOG(INFO) << "Train AND function with mlp." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -49,12 +52,11 @@ UNIT(LearnAND) {
ASSERT_TRUE(predicted_output == correct_output); ASSERT_TRUE(predicted_output == correct_output);
} }
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
UNIT(LearnNAND) { UNIT(LearnNAND) {
std::cout << "Train NAND function with mlp." << std::endl; LOG(INFO) << "Train NAND function with mlp." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -90,12 +92,11 @@ UNIT(LearnNAND) {
ASSERT_TRUE(predicted_output == correct_output); ASSERT_TRUE(predicted_output == correct_output);
} }
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
UNIT(LearnOR) { UNIT(LearnOR) {
std::cout << "Train OR function with mlp." << std::endl; LOG(INFO) << "Train OR function with mlp." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -131,12 +132,11 @@ UNIT(LearnOR) {
ASSERT_TRUE(predicted_output == correct_output); ASSERT_TRUE(predicted_output == correct_output);
} }
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
UNIT(LearnNOR) { UNIT(LearnNOR) {
std::cout << "Train NOR function with mlp." << std::endl; LOG(INFO) << "Train NOR function with mlp." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -172,12 +172,11 @@ UNIT(LearnNOR) {
ASSERT_TRUE(predicted_output == correct_output); ASSERT_TRUE(predicted_output == correct_output);
} }
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
UNIT(LearnXOR) { UNIT(LearnXOR) {
std::cout << "Train XOR function with mlp." << std::endl; LOG(INFO) << "Train XOR function with mlp." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -211,12 +210,11 @@ UNIT(LearnXOR) {
ASSERT_TRUE(predicted_output == correct_output); ASSERT_TRUE(predicted_output == correct_output);
} }
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
UNIT(LearnNOT) { UNIT(LearnNOT) {
std::cout << "Train NOT function with mlp." << std::endl; LOG(INFO) << "Train NOT function with mlp." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -248,12 +246,11 @@ UNIT(LearnNOT) {
ASSERT_TRUE(predicted_output == correct_output); ASSERT_TRUE(predicted_output == correct_output);
} }
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
UNIT(LearnX1) { UNIT(LearnX1) {
std::cout << "Train X1 function with mlp." << std::endl; LOG(INFO) << "Train X1 function with mlp." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -287,12 +284,11 @@ UNIT(LearnX1) {
ASSERT_TRUE(predicted_output == correct_output); ASSERT_TRUE(predicted_output == correct_output);
} }
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
UNIT(LearnX2) { UNIT(LearnX2) {
std::cout << "Train X2 function with mlp." << std::endl; LOG(INFO) << "Train X2 function with mlp." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -326,11 +322,11 @@ UNIT(LearnX2) {
ASSERT_TRUE(predicted_output == correct_output); ASSERT_TRUE(predicted_output == correct_output);
} }
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
int main() { int main(int argc, char* argv[]) {
START_EASYLOGGINGPP(argc, argv);
microunit::UnitTester::Run(); microunit::UnitTester::Run();
return 0; return 0;
} }

View File

@@ -13,6 +13,9 @@
#include <vector> #include <vector>
#include <algorithm> #include <algorithm>
#include "microunit.h" #include "microunit.h"
#include "easylogging++.h"
INITIALIZE_EASYLOGGINGPP
namespace { namespace {
void Train(Node & node, void Train(Node & node,
@@ -27,10 +30,10 @@ void Train(Node & node,
use_constant_weight_init, use_constant_weight_init,
constant_weight_init); constant_weight_init);
std::cout << "Starting weights:\t"; //std::cout << "Starting weights:\t";
for (auto m_weightselement : node.GetWeights()) //for (auto m_weightselement : node.GetWeights())
std::cout << m_weightselement << "\t"; // std::cout << m_weightselement << "\t";
std::cout << std::endl; //std::cout << std::endl;
for (int i = 0; i < max_iterations; i++) { for (int i = 0; i < max_iterations; i++) {
int error_count = 0; int error_count = 0;
@@ -49,15 +52,15 @@ void Train(Node & node,
if (error_count == 0) break; if (error_count == 0) break;
} }
std::cout << "Final weights:\t\t"; //std::cout << "Final weights:\t\t";
for (auto m_weightselement : node.GetWeights()) //for (auto m_weightselement : node.GetWeights())
std::cout << m_weightselement << "\t"; // std::cout << m_weightselement << "\t";
std::cout << std::endl; //std::cout << std::endl;
}; };
} }
UNIT(LearnAND) { UNIT(LearnAND) {
std::cout << "Train AND function with Node." << std::endl; LOG(INFO) << "Train AND function with Node." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -86,12 +89,11 @@ UNIT(LearnAND) {
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(class_id == correct_output); ASSERT_TRUE(class_id == correct_output);
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
UNIT(LearnNAND) { UNIT(LearnNAND) {
std::cout << "Train NAND function with Node." << std::endl; LOG(INFO) << "Train NAND function with Node." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -119,12 +121,11 @@ UNIT(LearnNAND) {
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(class_id == correct_output); ASSERT_TRUE(class_id == correct_output);
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
UNIT(LearnOR) { UNIT(LearnOR) {
std::cout << "Train OR function with Node." << std::endl; LOG(INFO) << "Train OR function with Node." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -152,11 +153,10 @@ UNIT(LearnOR) {
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(class_id == correct_output); ASSERT_TRUE(class_id == correct_output);
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
UNIT(LearnNOR) { UNIT(LearnNOR) {
std::cout << "Train NOR function with Node." << std::endl; LOG(INFO) << "Train NOR function with Node." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -184,12 +184,11 @@ UNIT(LearnNOR) {
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(class_id == correct_output); ASSERT_TRUE(class_id == correct_output);
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
UNIT(LearnNOT) { UNIT(LearnNOT) {
std::cout << "Train NOT function with Node." << std::endl; LOG(INFO) << "Train NOT function with Node." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -215,12 +214,11 @@ UNIT(LearnNOT) {
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
ASSERT_TRUE(class_id == correct_output); ASSERT_TRUE(class_id == correct_output);
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
UNIT(LearnXOR) { UNIT(LearnXOR) {
std::cout << "Train XOR function with Node." << std::endl; LOG(INFO) << "Train XOR function with Node." << std::endl;
std::vector<TrainingSample> training_set = std::vector<TrainingSample> training_set =
{ {
@@ -247,16 +245,16 @@ UNIT(LearnXOR) {
my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5); my_node.GetBooleanOutput(training_sample.input_vector(), &class_id, 0.5);
bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[0] > 0.5 ? true : false;
if (class_id != correct_output) { if (class_id != correct_output) {
std::cout << "Failed to train. " << LOG(WARNING) << "Failed to train. " <<
" A simple perceptron cannot learn the XOR function." << std::endl; " A simple perceptron cannot learn the XOR function." << std::endl;
FAIL(); FAIL();
} }
} }
std::cout << "Trained with success." << std::endl; LOG(INFO) << "Trained with success." << std::endl;
std::cout << std::endl;
} }
int main() { int main(int argc, char* argv[]) {
START_EASYLOGGINGPP(argc, argv);
microunit::UnitTester::Run(); microunit::UnitTester::Run();
return 0; return 0;
} }

View File

@@ -18,12 +18,11 @@
#include <algorithm> #include <algorithm>
#include <iostream> #include <iostream>
#include <sstream>
#include <iterator> #include <iterator>
#include <vector> #include <vector>
#include <cmath> #include <cmath>
namespace utils { namespace utils {
struct gen_rand { struct gen_rand {