From e5eb1bbf9df7ba95fe819c5a612f5aa33fe6bbf7 Mon Sep 17 00:00:00 2001 From: rluna Date: Thu, 3 Jan 2019 16:37:50 +0100 Subject: [PATCH] fix of some compiler warnings: comparison between unsigned int and int --- src/Layer.h | 6 +++--- src/MLP.cpp | 11 ++++++----- src/MLP.h | 3 ++- src/MLPTest.cpp | 26 +++++++++----------------- src/NodeTest.cpp | 8 +------- src/Sample.h | 8 ++++---- src/Utils.h | 6 +++--- 7 files changed, 28 insertions(+), 40 deletions(-) diff --git a/src/Layer.h b/src/Layer.h index f83e369..462a18c 100644 --- a/src/Layer.h +++ b/src/Layer.h @@ -124,7 +124,7 @@ public: fwrite(&str_size, sizeof(size_t), 1, file); fwrite(m_activation_function_str.c_str(), sizeof(char), str_size, file); - for (int i = 0; i < m_nodes.size(); i++) { + for (size_t i = 0; i < m_nodes.size(); i++) { m_nodes[i].SaveNode(file); } }; @@ -149,7 +149,7 @@ public: m_deriv_activation_function = (*pair).second; m_nodes.resize(m_num_nodes); - for (int i = 0; i < m_nodes.size(); i++) { + for (size_t i = 0; i < m_nodes.size(); i++) { m_nodes[i].LoadNode(file); } @@ -165,4 +165,4 @@ protected: std::function m_deriv_activation_function; }; -#endif //LAYER_H \ No newline at end of file +#endif //LAYER_H diff --git a/src/MLP.cpp b/src/MLP.cpp index 2de9853..dfdd56b 100644 --- a/src/MLP.cpp +++ b/src/MLP.cpp @@ -152,8 +152,9 @@ void MLP::Train(const std::vector &training_sample_set_with_bias int max_iterations, double min_error_cost, bool output_log) { - int num_examples = training_sample_set_with_bias.size(); - int num_features = training_sample_set_with_bias[0].GetInputVectorSize(); + //rlunaro.03/01/2019. the compiler says that these variables are unused + //int num_examples = training_sample_set_with_bias.size(); + //int num_features = training_sample_set_with_bias[0].GetInputVectorSize(); //{ // int layer_i = -1; @@ -174,7 +175,7 @@ void MLP::Train(const std::vector &training_sample_set_with_bias // } //} - size_t i = 0; + int i = 0; double current_iteration_cost_function = 0.0; for (i = 0; i < max_iterations; i++) { @@ -199,7 +200,7 @@ void MLP::Train(const std::vector &training_sample_set_with_bias temp_training << training_sample_with_bias << "\t\t"; temp_training << "Predicted output: ["; - for (int i = 0; i < predicted_output.size(); i++) { + for (size_t i = 0; i < predicted_output.size(); i++) { if (i != 0) temp_training << ", "; temp_training << predicted_output[i]; @@ -210,7 +211,7 @@ void MLP::Train(const std::vector &training_sample_set_with_bias } - for (int j = 0; j < predicted_output.size(); j++) { + for (size_t j = 0; j < predicted_output.size(); j++) { current_iteration_cost_function += (std::pow)((correct_output[j] - predicted_output[j]), 2); deriv_error_output[j] = diff --git a/src/MLP.h b/src/MLP.h index d4ecbea..638721b 100644 --- a/src/MLP.h +++ b/src/MLP.h @@ -40,6 +40,7 @@ public: int max_iterations = 5000, double min_error_cost = 0.001, bool output_log = true); + protected: void UpdateWeights(const std::vector> & all_layers_activations, const std::vector &error, @@ -56,4 +57,4 @@ private: std::vector m_layers; }; -#endif //MLP_H \ No newline at end of file +#endif //MLP_H diff --git a/src/MLPTest.cpp b/src/MLPTest.cpp index bec06d2..aaaf2f9 100644 --- a/src/MLPTest.cpp +++ b/src/MLPTest.cpp @@ -36,7 +36,6 @@ UNIT(LearnAND) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -46,7 +45,7 @@ UNIT(LearnAND) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -76,7 +75,6 @@ UNIT(LearnNAND) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -86,7 +84,7 @@ UNIT(LearnNAND) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -116,7 +114,6 @@ UNIT(LearnOR) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -126,7 +123,7 @@ UNIT(LearnOR) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -156,7 +153,6 @@ UNIT(LearnNOR) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -166,7 +162,7 @@ UNIT(LearnNOR) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -194,7 +190,6 @@ UNIT(LearnXOR) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -204,7 +199,7 @@ UNIT(LearnXOR) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -230,7 +225,6 @@ UNIT(LearnNOT) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -240,7 +234,7 @@ UNIT(LearnNOT) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -268,7 +262,6 @@ UNIT(LearnX1) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -278,7 +271,7 @@ UNIT(LearnX1) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -306,7 +299,6 @@ UNIT(LearnX2) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize(); MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }); @@ -316,7 +308,7 @@ UNIT(LearnX2) { for (const auto & training_sample : training_sample_set_with_bias) { std::vector output; my_mlp.GetOutput(training_sample.input_vector(), &output); - for (int i = 0; i < num_outputs; i++) { + for (size_t i = 0; i < num_outputs; i++) { bool predicted_output = output[i] > 0.5 ? true : false; bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false; ASSERT_TRUE(predicted_output == correct_output); @@ -329,4 +321,4 @@ int main(int argc, char* argv[]) { START_EASYLOGGINGPP(argc, argv); microunit::UnitTester::Run(); return 0; -} \ No newline at end of file +} diff --git a/src/NodeTest.cpp b/src/NodeTest.cpp index 70e379b..a3e55e4 100644 --- a/src/NodeTest.cpp +++ b/src/NodeTest.cpp @@ -81,7 +81,6 @@ UNIT(LearnAND) { } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); Node my_node(num_features); Train(my_node, training_sample_set_with_bias, 0.1, 100); @@ -116,7 +115,6 @@ UNIT(LearnNAND) { training_sample_with_bias.AddBiasValue(1); } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); Node my_node(num_features); Train(my_node, training_sample_set_with_bias, 0.1, 100); @@ -151,7 +149,6 @@ UNIT(LearnOR) { training_sample_with_bias.AddBiasValue(1); } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); Node my_node(num_features); Train(my_node, training_sample_set_with_bias, 0.1, 100); @@ -185,7 +182,6 @@ UNIT(LearnNOR) { training_sample_with_bias.AddBiasValue(1); } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); Node my_node(num_features); Train(my_node, training_sample_set_with_bias, 0.1, 100); @@ -218,7 +214,6 @@ UNIT(LearnNOT) { training_sample_with_bias.AddBiasValue(1); } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); Node my_node(num_features); Train(my_node, training_sample_set_with_bias, 0.1, 100); @@ -253,7 +248,6 @@ UNIT(LearnXOR) { training_sample_with_bias.AddBiasValue(1); } } - size_t num_examples = training_sample_set_with_bias.size(); size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize(); Node my_node(num_features); Train(my_node, training_sample_set_with_bias, 0.1, 100); @@ -278,4 +272,4 @@ int main(int argc, char* argv[]) { START_EASYLOGGINGPP(argc, argv); microunit::UnitTester::Run(); return 0; -} \ No newline at end of file +} diff --git a/src/Sample.h b/src/Sample.h index 8b4522a..449f068 100644 --- a/src/Sample.h +++ b/src/Sample.h @@ -30,7 +30,7 @@ public: protected: virtual void PrintMyself(std::ostream& stream) const { stream << "Input vector: ["; - for (int i = 0; i < m_input_vector.size(); i++) { + for (size_t i = 0; i < m_input_vector.size(); i++) { if (i != 0) stream << ", "; stream << m_input_vector[i]; @@ -59,7 +59,7 @@ public: protected: virtual void PrintMyself(std::ostream& stream) const { stream << "Input vector: ["; - for (int i = 0; i < m_input_vector.size(); i++) { + for (size_t i = 0; i < m_input_vector.size(); i++) { if (i != 0) stream << ", "; stream << m_input_vector[i]; @@ -69,7 +69,7 @@ protected: stream << "; "; stream << "Output vector: ["; - for (int i = 0; i < m_output_vector.size(); i++) { + for (size_t i = 0; i < m_output_vector.size(); i++) { if (i != 0) stream << ", "; stream << m_output_vector[i]; @@ -81,4 +81,4 @@ protected: }; -#endif // TRAININGSAMPLE_H \ No newline at end of file +#endif // TRAININGSAMPLE_H diff --git a/src/Utils.h b/src/Utils.h index 1edeb38..9a8eb30 100644 --- a/src/Utils.h +++ b/src/Utils.h @@ -110,11 +110,11 @@ inline void Softmax(std::vector *output) { size_t num_elements = output->size(); std::vector exp_output(num_elements); double exp_total = 0.0; - for (int i = 0; i < num_elements; i++) { + for (size_t i = 0; i < num_elements; i++) { exp_output[i] = exp((*output)[i]); exp_total += exp_output[i]; } - for (int i = 0; i < num_elements; i++) { + for (size_t i = 0; i < num_elements; i++) { (*output)[i] = exp_output[i] / exp_total; } } @@ -125,4 +125,4 @@ inline void GetIdMaxElement(const std::vector &output, size_t * class_i output.end())); } } -#endif // UTILS_H \ No newline at end of file +#endif // UTILS_H