From e5eb1bbf9df7ba95fe819c5a612f5aa33fe6bbf7 Mon Sep 17 00:00:00 2001 From: rluna Date: Thu, 3 Jan 2019 16:37:50 +0100 Subject: [PATCH 1/4] fix of some compiler warnings: comparison between unsigned int and int --- src/Layer.h | 6 +++--- src/MLP.cpp | 11 ++++++----- src/MLP.h | 3 ++- src/MLPTest.cpp | 26 +++++++++----------------- src/NodeTest.cpp | 8 +------- src/Sample.h | 8 ++++---- src/Utils.h | 6 +++--- 7 files changed, 28 insertions(+), 40 deletions(-) diff --git a/src/Layer.h b/src/Layer.h index f83e369..462a18c 100644 --- a/src/Layer.h +++ b/src/Layer.h @@ -124,7 +124,7 @@ public: fwrite(&str_size, sizeof(size_t), 1, file); fwrite(m_activation_function_str.c_str(), sizeof(char), str_size, file); - for (int i = 0; i < m_nodes.size(); i++) { + for (size_t i = 0; i < m_nodes.size(); i++) { m_nodes[i].SaveNode(file); } }; @@ -149,7 +149,7 @@ public: m_deriv_activation_function = (*pair).second; m_nodes.resize(m_num_nodes); - for (int i = 0; i < m_nodes.size(); i++) { + for (size_t i = 0; i < m_nodes.size(); i++) { m_nodes[i].LoadNode(file); } @@ -165,4 +165,4 @@ protected: std::function m_deriv_activation_function; }; -#endif //LAYER_H \ No newline at end of file +#endif //LAYER_H diff --git a/src/MLP.cpp b/src/MLP.cpp index 2de9853..dfdd56b 100644 --- a/src/MLP.cpp +++ b/src/MLP.cpp @@ -152,8 +152,9 @@ void MLP::Train(const std::vector &training_sample_set_with_bias int max_iterations, double min_error_cost, bool output_log) { - int num_examples = training_sample_set_with_bias.size(); - int num_features = training_sample_set_with_bias[0].GetInputVectorSize(); + //rlunaro.03/01/2019. the compiler says that these variables are unused + //int num_examples = training_sample_set_with_bias.size(); + //int num_features = training_sample_set_with_bias[0].GetInputVectorSize(); //{ // int layer_i = -1; @@ -174,7 +175,7 @@ void MLP::Train(const std::vector &training_sample_set_with_bias // } //} - size_t i = 0; + int i = 0; double current_iteration_cost_function = 0.0; for (i = 0; i < max_iterations; i++) { @@ -199,7 +200,7 @@ void MLP::Train(const std::vector &training_sample_set_with_bias temp_training << training_sample_with_bias << "\t\t"; temp_training << "Predicted output: ["; - for (int i = 0; i < predicted_output.size(); i++) { + for (size_t i = 0; i < predicted_output.size(); i++) { if (i != 0) temp_training << ", "; temp_training << predicted_output[i]; @@ -210,7 +211,7 @@ void MLP::Train(const std::vector &training_sample_set_with_bias } - for (int j = 0; j < predicted_output.size(); j++) { + for (size_t j = 0; j < predicted_output.size(); j++) { current_iteration_cost_function += (std::pow)((correct_output[j] - predicted_output[j]), 2); deriv_error_output[j] = diff --git a/src/MLP.h b/src/MLP.h index d4ecbea..638721b 100644 --- a/src/MLP.h +++ b/src/MLP.h @@ -40,6 +40,7 @@ public: int max_iterations = 5000, double min_error_cost = 0.001, bool output_log = true); + protected: void UpdateWeights(const std::vector> & all_layers_activations, const std::vector &error, @@ -56,4 +57,4 @@ private: std::vector m_layers; }; -#endif //MLP_H \ No newline at end of file +#endif //MLP_H diff --git a/src/MLPTest.cpp b/src/MLPTest.cpp index bec06d2..aaaf2f9 100644 --- a/src/MLPTest.cpp +++ b/src/MLPTest.cpp @@ -36,7 +36,6 @@ UNIT(LearnAND) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -46,7 +45,7 @@ UNIT(LearnAND) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -76,7 +75,6 @@ UNIT(LearnNAND) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -86,7 +84,7 @@ UNIT(LearnNAND) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -116,7 +114,6 @@ UNIT(LearnOR) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -126,7 +123,7 @@ UNIT(LearnOR) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -156,7 +153,6 @@ UNIT(LearnNOR) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -166,7 +162,7 @@ UNIT(LearnNOR) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -194,7 +190,6 @@ UNIT(LearnXOR) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -204,7 +199,7 @@ UNIT(LearnXOR) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -230,7 +225,6 @@ UNIT(LearnNOT) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -240,7 +234,7 @@ UNIT(LearnNOT) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -268,7 +262,6 @@ UNIT(LearnX1) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -278,7 +271,7 @@ UNIT(LearnX1) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -306,7 +299,6 @@ UNIT(LearnX2) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -316,7 +308,7 @@ UNIT(LearnX2) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -329,4 +321,4 @@ int main(int argc, char* argv[]) { START_EASYLOGGINGPP(argc, argv); microunit::UnitTester::Run(); return 0; -} \ No newline at end of file +} diff --git a/src/NodeTest.cpp b/src/NodeTest.cpp index 70e379b..a3e55e4 100644 --- a/src/NodeTest.cpp +++ b/src/NodeTest.cpp @@ -81,7 +81,6 @@ UNIT(LearnAND) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); Node my_node(num_features); Train(my_node, training_sample_set_with_bias, 0.1, 100); @@ -116,7 +115,6 @@ UNIT(LearnNAND) { training_sample_with_bias.AddBiasValue(1); } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); Node my_node(num_features); Train(my_node, training_sample_set_with_bias, 0.1, 100); @@ -151,7 +149,6 @@ UNIT(LearnOR) { training_sample_with_bias.AddBiasValue(1); } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); Node my_node(num_features); Train(my_node, training_sample_set_with_bias, 0.1, 100); @@ -185,7 +182,6 @@ UNIT(LearnNOR) { training_sample_with_bias.AddBiasValue(1); } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); Node my_node(num_features); Train(my_node, training_sample_set_with_bias, 0.1, 100); @@ -218,7 +214,6 @@ UNIT(LearnNOT) { training_sample_with_bias.AddBiasValue(1); } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); Node my_node(num_features); Train(my_node, training_sample_set_with_bias, 0.1, 100); @@ -253,7 +248,6 @@ UNIT(LearnXOR) { training_sample_with_bias.AddBiasValue(1); } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); Node my_node(num_features); Train(my_node, training_sample_set_with_bias, 0.1, 100); @@ -278,4 +272,4 @@ int main(int argc, char* argv[]) { START_EASYLOGGINGPP(argc, argv); microunit::UnitTester::Run(); return 0; -} \ No newline at end of file +} diff --git a/src/Sample.h b/src/Sample.h index 8b4522a..449f068 100644 --- a/src/Sample.h +++ b/src/Sample.h @@ -30,7 +30,7 @@ public: protected: virtual void PrintMyself(std::ostream& stream) const { stream << "Input vector: ["; - for (int i = 0; i < m_input_vector.size(); i++) { + for (size_t i = 0; i < m_input_vector.size(); i++) { if (i != 0) stream << ", "; stream << m_input_vector[i]; @@ -59,7 +59,7 @@ public: protected: virtual void PrintMyself(std::ostream& stream) const { stream << "Input vector: ["; - for (int i = 0; i < m_input_vector.size(); i++) { + for (size_t i = 0; i < m_input_vector.size(); i++) { if (i != 0) stream << ", "; stream << m_input_vector[i]; @@ -69,7 +69,7 @@ protected: stream << "; "; stream << "Output vector: ["; - for (int i = 0; i < m_output_vector.size(); i++) { + for (size_t i = 0; i < m_output_vector.size(); i++) { if (i != 0) stream << ", "; stream << m_output_vector[i]; @@ -81,4 +81,4 @@ protected: }; -#endif // TRAININGSAMPLE_H \ No newline at end of file +#endif // TRAININGSAMPLE_H diff --git a/src/Utils.h b/src/Utils.h index 1edeb38..9a8eb30 100644 --- a/src/Utils.h +++ b/src/Utils.h @@ -110,11 +110,11 @@ inline void Softmax(std::vector *output) { size_t num_elements = output->size(); std::vector exp_output(num_elements); double exp_total = 0.0; - for (int i = 0; i < num_elements; i++) { + for (size_t i = 0; i < num_elements; i++) { exp_output[i] = exp((*output)[i]); exp_total += exp_output[i]; } - for (int i = 0; i < num_elements; i++) { + for (size_t i = 0; i < num_elements; i++) { (*output)[i] = exp_output[i] / exp_total; } } @@ -125,4 +125,4 @@ inline void GetIdMaxElement(const std::vector &output, size_t * class_i output.end())); } } -#endif // UTILS_H \ No newline at end of file +#endif // UTILS_H From aa3453baa84ea0bf72da6293203632f8437d9db0 Mon Sep 17 00:00:00 2001 From: rluna Date: Fri, 4 Jan 2019 07:37:54 +0100 Subject: [PATCH 2/4] fix in clean command in Makefile --- .gitignore | 8 ++++++++ Makefile | 6 +++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 6442841..1b86f5d 100644 --- a/.gitignore +++ b/.gitignore @@ -214,3 +214,11 @@ _Pvt_Extensions/ ModelManifest.xml /build +/.cproject +/.project +/IrisDatasetTest +/LayerTest +/MLPTest +/NodeTest +/mlp.a +/mlp.so diff --git a/Makefile b/Makefile index caf8dae..9434e80 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ #!/bin/bash # Makefile for MLP CC = g++ -DEBUG = -g +DEBUG = -g3 PROJNAME = mlp HEADERPATH = ./src @@ -14,7 +14,7 @@ AUXLIBS = INCLUDES = -I$(LOCALDEPSINCLUDES) -I$(AUXINCLUDES) LIBS = -L$(AUXLIBS) #LIBS += -L/usr/local/lib/ -CFLAGS = -std=gnu++11 -std=c++11 -O3 -Wall -fmessage-length=0 -fPIC $(INCLUDES) +CFLAGS = -std=gnu++11 -std=c++11 -Wall -O3 -fmessage-length=0 -fPIC $(INCLUDES) CFLAGS += $(DEBUG) LFLAGS = $(LIBS) #For verbosity @@ -59,7 +59,7 @@ NodeTest: $(SOURCEPATH)/NodeTest.o $(SOURCEPATH)/MLP.o $(CC) $^ $(CFLAGS) $(LFLAGS) -o $@ clean: @echo Clean - rm -f *~ *.o *~ + rm -f *~ $(SOURCEPATH)/*.o *~ @echo Success cleanall: From f152b3030b332a00e537d8858df5347c6579fd4d Mon Sep 17 00:00:00 2001 From: rluna Date: Fri, 4 Jan 2019 17:24:37 +0100 Subject: [PATCH 3/4] added posibility to change internal weights of the network directly assigning the values --- .settings/.gitignore | 1 + Makefile | 3 ++- src/.gitignore | 5 ++++ src/Layer.h | 24 +++++++++++++++++ src/MLP.cpp | 43 ++++++++++++++++++++++++++--- src/MLP.h | 4 +++ src/MLPTest.cpp | 64 ++++++++++++++++++++++++++++++++++++++++++++ src/Node.h | 11 +++++++- 8 files changed, 149 insertions(+), 6 deletions(-) create mode 100644 .settings/.gitignore create mode 100644 src/.gitignore diff --git a/.settings/.gitignore b/.settings/.gitignore new file mode 100644 index 0000000..d81d4c4 --- /dev/null +++ b/.settings/.gitignore @@ -0,0 +1 @@ +/language.settings.xml diff --git a/Makefile b/Makefile index 9434e80..1131947 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,8 @@ AUXLIBS = INCLUDES = -I$(LOCALDEPSINCLUDES) -I$(AUXINCLUDES) LIBS = -L$(AUXLIBS) #LIBS += -L/usr/local/lib/ -CFLAGS = -std=gnu++11 -std=c++11 -Wall -O3 -fmessage-length=0 -fPIC $(INCLUDES) +#rlunaro: removed optimization for tests: -O3 +CFLAGS = -std=gnu++11 -std=c++11 -Wall -fmessage-length=0 -fPIC $(INCLUDES) CFLAGS += $(DEBUG) LFLAGS = $(LIBS) #For verbosity diff --git a/src/.gitignore b/src/.gitignore new file mode 100644 index 0000000..f010bd6 --- /dev/null +++ b/src/.gitignore @@ -0,0 +1,5 @@ +/IrisDatasetTest.o +/LayerTest.o +/MLP.o +/MLPTest.o +/NodeTest.o diff --git a/src/Layer.h b/src/Layer.h index 462a18c..a6efa01 100644 --- a/src/Layer.h +++ b/src/Layer.h @@ -68,6 +68,14 @@ public: return m_nodes; } + /** + * Return the internal list of nodes, but modifiable. + */ + std::vector & GetNodesChangeable() { + return m_nodes; + } + + void GetOutputAfterActivationFunction(const std::vector &input, std::vector * output) const { assert(input.size() == m_num_inputs_per_node); @@ -116,6 +124,22 @@ public: }; + void SetWeights( std::vector> & weights ) + { + if( 0 <= weights.size() && weights.size() <= m_num_nodes ) + { + // traverse the list of nodes + size_t node_i = 0; + for( Node & node : m_nodes ) + { + node.SetWeights( weights[node_i] ); + node_i++; + } + } + else + throw new std::logic_error("Incorrect layer number in SetWeights call"); + }; + void SaveLayer(FILE * file) const { fwrite(&m_num_nodes, sizeof(m_num_nodes), 1, file); fwrite(&m_num_inputs_per_node, sizeof(m_num_inputs_per_node), 1, file); diff --git a/src/MLP.cpp b/src/MLP.cpp index dfdd56b..ca54320 100644 --- a/src/MLP.cpp +++ b/src/MLP.cpp @@ -48,7 +48,7 @@ void MLP::CreateMLP(const std::vector & layers_nodes, m_num_outputs = m_layers_nodes[m_layers_nodes.size() - 1]; m_num_hidden_layers = m_layers_nodes.size() - 2; - for (int i = 0; i < m_layers_nodes.size() - 1; i++) { + for (size_t i = 0; i < m_layers_nodes.size() - 1; i++) { m_layers.emplace_back(Layer(m_layers_nodes[i], m_layers_nodes[i + 1], layers_activfuncs[i], @@ -65,7 +65,7 @@ void MLP::SaveMLPNetwork(const std::string & filename)const { fwrite(&m_num_hidden_layers, sizeof(m_num_hidden_layers), 1, file); if (!m_layers_nodes.empty()) fwrite(&m_layers_nodes[0], sizeof(m_layers_nodes[0]), m_layers_nodes.size(), file); - for (int i = 0; i < m_layers.size(); i++) { + for (size_t i = 0; i < m_layers.size(); i++) { m_layers[i].SaveLayer(file); } fclose(file); @@ -83,7 +83,7 @@ void MLP::LoadMLPNetwork(const std::string & filename) { if (!m_layers_nodes.empty()) fread(&m_layers_nodes[0], sizeof(m_layers_nodes[0]), m_layers_nodes.size(), file); m_layers.resize(m_layers_nodes.size() - 1); - for (int i = 0; i < m_layers.size(); i++) { + for (size_t i = 0; i < m_layers.size(); i++) { m_layers[i].LoadLayer(file); } fclose(file); @@ -103,7 +103,7 @@ void MLP::GetOutput(const std::vector &input, std::vector temp_out(temp_size, 0.0); temp_in = input; - for (int i = 0; i < m_layers.size(); ++i) { + for (size_t i = 0; i < m_layers.size(); ++i) { if (i > 0) { //Store this layer activation if (all_layers_activations != nullptr) @@ -260,3 +260,38 @@ void MLP::Train(const std::vector &training_sample_set_with_bias }; +size_t MLP::GetNumLayers() +{ + return m_layers.size(); +} + +std::vector> MLP::GetLayerWeights( size_t layer_i ) +{ + std::vector> ret_val; + // check parameters + if( 0 <= layer_i && layer_i < m_layers.size() ) + { + Layer current_layer = m_layers[layer_i]; + for( Node & node : current_layer.GetNodesChangeable() ) + { + ret_val.push_back( node.GetWeights() ); + } + return ret_val; + } + else + throw new std::logic_error("Incorrect layer number in GetLayerWeights call"); + +} + +void MLP::SetLayerWeights( size_t layer_i, std::vector> & weights ) +{ + // check parameters + if( 0 <= layer_i && layer_i < m_layers.size() ) + { + m_layers[layer_i].SetWeights( weights ); + } + else + throw new std::logic_error("Incorrect layer number in SetLayerWeights call"); +} + + diff --git a/src/MLP.h b/src/MLP.h index 638721b..2a1cc5e 100644 --- a/src/MLP.h +++ b/src/MLP.h @@ -16,6 +16,7 @@ #include #include #include +#include class MLP { public: @@ -40,6 +41,9 @@ public: int max_iterations = 5000, double min_error_cost = 0.001, bool output_log = true); + size_t GetNumLayers(); + std::vector> GetLayerWeights( size_t layer_i ); + void SetLayerWeights( size_t layer_i, std::vector> & weights ); protected: void UpdateWeights(const std::vector> & all_layers_activations, diff --git a/src/MLPTest.cpp b/src/MLPTest.cpp index aaaf2f9..ad237f7 100644 --- a/src/MLPTest.cpp +++ b/src/MLPTest.cpp @@ -317,6 +317,70 @@ UNIT(LearnX2) { LOG(INFO) << "Trained with success." << std::endl; } + + +UNIT(GetWeightsSetWeights) { + LOG(INFO) << "Train X2 function, read internal weights" << std::endl; + + std::vector training_set = + { + { { 0, 0 },{ 0.0 } }, + { { 0, 1 },{ 1.0 } }, + { { 1, 0 },{ 0.0 } }, + { { 1, 1 },{ 1.0 } } + }; + bool bias_already_in = false; + std::vector training_sample_set_with_bias(training_set); + //set up bias + if (!bias_already_in) { + for (auto & training_sample_with_bias : training_sample_set_with_bias) { + training_sample_with_bias.AddBiasValue(1); + } + } + + size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); + size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); + MLP my_mlp({ num_features, 2, num_outputs }, { "sigmoid", "linear" }); + //Train MLP + my_mlp.Train(training_sample_set_with_bias, 0.5, 500, 0.25); + + // get layer weights + std::vector> weights = my_mlp.GetLayerWeights( 1 ); + + // the expected value of the internal weights + // after training are 1.65693 -0.538749 + ASSERT_TRUE( 1.6 <= weights[0][0] && weights[0][0] <= 1.7 ); + ASSERT_TRUE( -0.6 <= weights[0][1] && weights[0][1] <= -0.5 ); + + // now, we are going to inject a weight value of 0.0 + // and check that the new output value is nonsense + std::vector> zeroWeights = { { 0.0, 0.0 } }; + + my_mlp.SetLayerWeights( 1, zeroWeights ); + + /* + * + * PREDICTED OUTPUT IS NOW: 0.335394 +PREDICTED OUTPUT IS NOW: 1.13887 +PREDICTED OUTPUT IS NOW: 0.180468 +PREDICTED OUTPUT IS NOW: 1.00535 + * + */ + for (const auto & training_sample : training_sample_set_with_bias) { + std::vector output; + my_mlp.GetOutput(training_sample.input_vector(), &output); + for (size_t i = 0; i < num_outputs; i++) { + bool predicted_output = output[i] > 0.5 ? true : false; + std::cout << "PREDICTED OUTPUT IS NOW: " << output[i] << std::endl; + bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; + ASSERT_TRUE(predicted_output == correct_output); + } + } + LOG(INFO) << "Trained with success." << std::endl; +} + + + int main(int argc, char* argv[]) { START_EASYLOGGINGPP(argc, argv); microunit::UnitTester::Run(); diff --git a/src/Node.h b/src/Node.h index 3e724f4..d145d51 100644 --- a/src/Node.h +++ b/src/Node.h @@ -15,6 +15,7 @@ #include #include #include // for assert() +#include #define CONSTANT_WEIGHT_INITIALIZATION 0 @@ -81,6 +82,14 @@ public: return m_weights; } + void SetWeights( std::vector & weights ){ + // check size of the weights vector + if( weights.size() == m_num_inputs ) + m_weights = weights; + else + throw new std::logic_error("Incorrect weight size in SetWeights call"); + } + size_t GetWeightsVectorSize() const { return m_weights.size(); } @@ -146,4 +155,4 @@ protected: std::vector m_weights; }; -#endif //NODE_H \ No newline at end of file +#endif //NODE_H From 72f2688c7d7241c63c7b83664601da62e8b6c44f Mon Sep 17 00:00:00 2001 From: rluna Date: Fri, 4 Jan 2019 17:47:13 +0100 Subject: [PATCH 4/4] finished of correcting "int" to "size_t" to avoid nasty errors and implement a test for SetWeights() function --- data/iris.mlp | Bin 421 -> 469 bytes src/Layer.h | 13 ++++++------- src/MLP.cpp | 2 ++ src/MLP.h | 9 ++++----- src/MLPTest.cpp | 25 +++++++++++++------------ src/Node.h | 5 ++--- src/Utils.h | 3 ++- 7 files changed, 29 insertions(+), 28 deletions(-) diff --git a/data/iris.mlp b/data/iris.mlp index 77badc2da12721a0e5660b9a588e5d83d871a918..dce3f72e12d1303cfd068e5e64babd6bd7990f89 100644 GIT binary patch literal 469 zcmZQ&fB4eQ<*9=LDIGxx5P^a0JQpKsm_|GfX6 zdE@#eTT~ph`Msrg+bLt%_w(q3vM0`;?1Pict-n5fY+p5{EMG>0$>9U1#$KBzd}wc>VC){tpx74xp>w{MqAY@C;#x dcd^W^Z&nH)@T-i@vuKiVIPW`!wc^~L{QxCriB13j literal 421 zcmZQ&U|?VdVn!fl1!53jfly#^7#}3Z4izuXOwY~FOo1r}iSF9@?LyGy=ldTthtJkL z_tm~O_W94*dFSoF=$t!RAN9`u(~RJGsasFmBkP%MnW@&oWqIJNx=EyX{k*>*rJ(3~Gi2VRCH z%2!Ff-;b>Jw`e$TiyF&;l*4sw=Y+TqI3= #include #include @@ -16,6 +13,8 @@ #include #include #include // for assert() +#include "Node.h" +#include "Utils.h" class Layer { public: @@ -82,7 +81,7 @@ public: output->resize(m_num_nodes); - for (int i = 0; i < m_num_nodes; ++i) { + for (size_t i = 0; i < m_num_nodes; ++i) { m_nodes[i].GetOutputAfterActivationFunction(input, m_activation_function, &((*output)[i])); @@ -111,7 +110,7 @@ public: dE_doj = deriv_error[i]; doj_dnetj = m_deriv_activation_function(net_sum); - for (int j = 0; j < m_num_inputs_per_node; j++) { + for (size_t j = 0; j < m_num_inputs_per_node; j++) { (*deltas)[j] += dE_doj * doj_dnetj * m_nodes[i].GetWeights()[j]; dnetj_dwij = input_layer_activation[j]; @@ -180,8 +179,8 @@ public: }; protected: - int m_num_inputs_per_node{ 0 }; - int m_num_nodes{ 0 }; + size_t m_num_inputs_per_node{ 0 }; + size_t m_num_nodes{ 0 }; std::vector m_nodes; std::string m_activation_function_str; diff --git a/src/MLP.cpp b/src/MLP.cpp index ca54320..4770edc 100644 --- a/src/MLP.cpp +++ b/src/MLP.cpp @@ -3,6 +3,7 @@ // Author : David Nogueira //============================================================================ #include "MLP.h" + #include #include #include @@ -10,6 +11,7 @@ #include #include #include + #include "easylogging++.h" diff --git a/src/MLP.h b/src/MLP.h index 2a1cc5e..551f0fe 100644 --- a/src/MLP.h +++ b/src/MLP.h @@ -5,10 +5,6 @@ #ifndef MLP_H #define MLP_H -#include "Layer.h" -#include "Sample.h" -#include "Utils.h" - #include #include #include @@ -17,6 +13,9 @@ #include #include #include +#include "Layer.h" +#include "Sample.h" +#include "Utils.h" class MLP { public: @@ -54,7 +53,7 @@ private: const std::vector & layers_activfuncs, bool use_constant_weight_init, double constant_weight_init = 0.5); - int m_num_inputs{ 0 }; + size_t m_num_inputs{ 0 }; int m_num_outputs{ 0 }; int m_num_hidden_layers{ 0 }; std::vector m_layers_nodes; diff --git a/src/MLPTest.cpp b/src/MLPTest.cpp index ad237f7..3fb0c07 100644 --- a/src/MLPTest.cpp +++ b/src/MLPTest.cpp @@ -347,6 +347,17 @@ UNIT(GetWeightsSetWeights) { // get layer weights std::vector> weights = my_mlp.GetLayerWeights( 1 ); + for (const auto & training_sample : training_sample_set_with_bias) { + std::vector output; + my_mlp.GetOutput(training_sample.input_vector(), &output); + for (size_t i = 0; i < num_outputs; i++) { + bool predicted_output = output[i] > 0.5 ? true : false; + std::cout << "PREDICTED OUTPUT IS NOW: " << output[i] << std::endl; + bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; + ASSERT_TRUE(predicted_output == correct_output); + } + } + // the expected value of the internal weights // after training are 1.65693 -0.538749 ASSERT_TRUE( 1.6 <= weights[0][0] && weights[0][0] <= 1.7 ); @@ -358,24 +369,14 @@ UNIT(GetWeightsSetWeights) { my_mlp.SetLayerWeights( 1, zeroWeights ); - /* - * - * PREDICTED OUTPUT IS NOW: 0.335394 -PREDICTED OUTPUT IS NOW: 1.13887 -PREDICTED OUTPUT IS NOW: 0.180468 -PREDICTED OUTPUT IS NOW: 1.00535 - * - */ for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); for (size_t i = 0; i < num_outputs; i++) { - bool predicted_output = output[i] > 0.5 ? true : false; - std::cout << "PREDICTED OUTPUT IS NOW: " << output[i] << std::endl; - bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; - ASSERT_TRUE(predicted_output == correct_output); + ASSERT_TRUE( -0.0001L <= output[i] && output[i] <= 0.0001L ); } } + LOG(INFO) << "Trained with success." << std::endl; } diff --git a/src/Node.h b/src/Node.h index d145d51..2fea944 100644 --- a/src/Node.h +++ b/src/Node.h @@ -5,8 +5,6 @@ #ifndef NODE_H #define NODE_H -#include "Utils.h" - #include #include #include @@ -16,6 +14,7 @@ #include #include // for assert() #include +#include "Utils.h" #define CONSTANT_WEIGHT_INITIALIZATION 0 @@ -150,7 +149,7 @@ public: }; protected: - int m_num_inputs{ 0 }; + size_t m_num_inputs{ 0 }; double m_bias{ 0.0 }; std::vector m_weights; }; diff --git a/src/Utils.h b/src/Utils.h index 9a8eb30..229d04b 100644 --- a/src/Utils.h +++ b/src/Utils.h @@ -5,7 +5,6 @@ #ifndef UTILS_H #define UTILS_H -#include "Chrono.h" #include #include #include @@ -22,6 +21,8 @@ #include #include #include + +#include "Chrono.h" #ifdef _WIN32 #include #else