fix of some compiler warnings: comparison between unsigned int and int

This commit is contained in:
rluna
2019-01-03 16:37:50 +01:00
parent 75116e157e
commit e5eb1bbf9d
7 changed files with 28 additions and 40 deletions

View File

@@ -124,7 +124,7 @@ public:
fwrite(&str_size, sizeof(size_t), 1, file);
fwrite(m_activation_function_str.c_str(), sizeof(char), str_size, file);
for (int i = 0; i < m_nodes.size(); i++) {
for (size_t i = 0; i < m_nodes.size(); i++) {
m_nodes[i].SaveNode(file);
}
};
@@ -149,7 +149,7 @@ public:
m_deriv_activation_function = (*pair).second;
m_nodes.resize(m_num_nodes);
for (int i = 0; i < m_nodes.size(); i++) {
for (size_t i = 0; i < m_nodes.size(); i++) {
m_nodes[i].LoadNode(file);
}
@@ -165,4 +165,4 @@ protected:
std::function<double(double)> m_deriv_activation_function;
};
#endif //LAYER_H
#endif //LAYER_H

View File

@@ -152,8 +152,9 @@ void MLP::Train(const std::vector<TrainingSample> &training_sample_set_with_bias
int max_iterations,
double min_error_cost,
bool output_log) {
int num_examples = training_sample_set_with_bias.size();
int num_features = training_sample_set_with_bias[0].GetInputVectorSize();
//rlunaro.03/01/2019. the compiler says that these variables are unused
//int num_examples = training_sample_set_with_bias.size();
//int num_features = training_sample_set_with_bias[0].GetInputVectorSize();
//{
// int layer_i = -1;
@@ -174,7 +175,7 @@ void MLP::Train(const std::vector<TrainingSample> &training_sample_set_with_bias
// }
//}
size_t i = 0;
int i = 0;
double current_iteration_cost_function = 0.0;
for (i = 0; i < max_iterations; i++) {
@@ -199,7 +200,7 @@ void MLP::Train(const std::vector<TrainingSample> &training_sample_set_with_bias
temp_training << training_sample_with_bias << "\t\t";
temp_training << "Predicted output: [";
for (int i = 0; i < predicted_output.size(); i++) {
for (size_t i = 0; i < predicted_output.size(); i++) {
if (i != 0)
temp_training << ", ";
temp_training << predicted_output[i];
@@ -210,7 +211,7 @@ void MLP::Train(const std::vector<TrainingSample> &training_sample_set_with_bias
}
for (int j = 0; j < predicted_output.size(); j++) {
for (size_t j = 0; j < predicted_output.size(); j++) {
current_iteration_cost_function +=
(std::pow)((correct_output[j] - predicted_output[j]), 2);
deriv_error_output[j] =

View File

@@ -40,6 +40,7 @@ public:
int max_iterations = 5000,
double min_error_cost = 0.001,
bool output_log = true);
protected:
void UpdateWeights(const std::vector<std::vector<double>> & all_layers_activations,
const std::vector<double> &error,
@@ -56,4 +57,4 @@ private:
std::vector<Layer> m_layers;
};
#endif //MLP_H
#endif //MLP_H

View File

@@ -36,7 +36,6 @@ UNIT(LearnAND) {
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
@@ -46,7 +45,7 @@ UNIT(LearnAND) {
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
for (int i = 0; i < num_outputs; i++) {
for (size_t i = 0; i < num_outputs; i++) {
bool predicted_output = output[i] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
@@ -76,7 +75,6 @@ UNIT(LearnNAND) {
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
@@ -86,7 +84,7 @@ UNIT(LearnNAND) {
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
for (int i = 0; i < num_outputs; i++) {
for (size_t i = 0; i < num_outputs; i++) {
bool predicted_output = output[i] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
@@ -116,7 +114,6 @@ UNIT(LearnOR) {
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
@@ -126,7 +123,7 @@ UNIT(LearnOR) {
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
for (int i = 0; i < num_outputs; i++) {
for (size_t i = 0; i < num_outputs; i++) {
bool predicted_output = output[i] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
@@ -156,7 +153,6 @@ UNIT(LearnNOR) {
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
@@ -166,7 +162,7 @@ UNIT(LearnNOR) {
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
for (int i = 0; i < num_outputs; i++) {
for (size_t i = 0; i < num_outputs; i++) {
bool predicted_output = output[i] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
@@ -194,7 +190,6 @@ UNIT(LearnXOR) {
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
@@ -204,7 +199,7 @@ UNIT(LearnXOR) {
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
for (int i = 0; i < num_outputs; i++) {
for (size_t i = 0; i < num_outputs; i++) {
bool predicted_output = output[i] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
@@ -230,7 +225,6 @@ UNIT(LearnNOT) {
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
@@ -240,7 +234,7 @@ UNIT(LearnNOT) {
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
for (int i = 0; i < num_outputs; i++) {
for (size_t i = 0; i < num_outputs; i++) {
bool predicted_output = output[i] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
@@ -268,7 +262,6 @@ UNIT(LearnX1) {
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
@@ -278,7 +271,7 @@ UNIT(LearnX1) {
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
for (int i = 0; i < num_outputs; i++) {
for (size_t i = 0; i < num_outputs; i++) {
bool predicted_output = output[i] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
@@ -306,7 +299,6 @@ UNIT(LearnX2) {
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
@@ -316,7 +308,7 @@ UNIT(LearnX2) {
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
my_mlp.GetOutput(training_sample.input_vector(), &output);
for (int i = 0; i < num_outputs; i++) {
for (size_t i = 0; i < num_outputs; i++) {
bool predicted_output = output[i] > 0.5 ? true : false;
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
ASSERT_TRUE(predicted_output == correct_output);
@@ -329,4 +321,4 @@ int main(int argc, char* argv[]) {
START_EASYLOGGINGPP(argc, argv);
microunit::UnitTester::Run();
return 0;
}
}

View File

@@ -81,7 +81,6 @@ UNIT(LearnAND) {
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
Node my_node(num_features);
Train(my_node, training_sample_set_with_bias, 0.1, 100);
@@ -116,7 +115,6 @@ UNIT(LearnNAND) {
training_sample_with_bias.AddBiasValue(1);
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
Node my_node(num_features);
Train(my_node, training_sample_set_with_bias, 0.1, 100);
@@ -151,7 +149,6 @@ UNIT(LearnOR) {
training_sample_with_bias.AddBiasValue(1);
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
Node my_node(num_features);
Train(my_node, training_sample_set_with_bias, 0.1, 100);
@@ -185,7 +182,6 @@ UNIT(LearnNOR) {
training_sample_with_bias.AddBiasValue(1);
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
Node my_node(num_features);
Train(my_node, training_sample_set_with_bias, 0.1, 100);
@@ -218,7 +214,6 @@ UNIT(LearnNOT) {
training_sample_with_bias.AddBiasValue(1);
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
Node my_node(num_features);
Train(my_node, training_sample_set_with_bias, 0.1, 100);
@@ -253,7 +248,6 @@ UNIT(LearnXOR) {
training_sample_with_bias.AddBiasValue(1);
}
}
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
Node my_node(num_features);
Train(my_node, training_sample_set_with_bias, 0.1, 100);
@@ -278,4 +272,4 @@ int main(int argc, char* argv[]) {
START_EASYLOGGINGPP(argc, argv);
microunit::UnitTester::Run();
return 0;
}
}

View File

@@ -30,7 +30,7 @@ public:
protected:
virtual void PrintMyself(std::ostream& stream) const {
stream << "Input vector: [";
for (int i = 0; i < m_input_vector.size(); i++) {
for (size_t i = 0; i < m_input_vector.size(); i++) {
if (i != 0)
stream << ", ";
stream << m_input_vector[i];
@@ -59,7 +59,7 @@ public:
protected:
virtual void PrintMyself(std::ostream& stream) const {
stream << "Input vector: [";
for (int i = 0; i < m_input_vector.size(); i++) {
for (size_t i = 0; i < m_input_vector.size(); i++) {
if (i != 0)
stream << ", ";
stream << m_input_vector[i];
@@ -69,7 +69,7 @@ protected:
stream << "; ";
stream << "Output vector: [";
for (int i = 0; i < m_output_vector.size(); i++) {
for (size_t i = 0; i < m_output_vector.size(); i++) {
if (i != 0)
stream << ", ";
stream << m_output_vector[i];
@@ -81,4 +81,4 @@ protected:
};
#endif // TRAININGSAMPLE_H
#endif // TRAININGSAMPLE_H

View File

@@ -110,11 +110,11 @@ inline void Softmax(std::vector<double> *output) {
size_t num_elements = output->size();
std::vector<double> exp_output(num_elements);
double exp_total = 0.0;
for (int i = 0; i < num_elements; i++) {
for (size_t i = 0; i < num_elements; i++) {
exp_output[i] = exp((*output)[i]);
exp_total += exp_output[i];
}
for (int i = 0; i < num_elements; i++) {
for (size_t i = 0; i < num_elements; i++) {
(*output)[i] = exp_output[i] / exp_total;
}
}
@@ -125,4 +125,4 @@ inline void GetIdMaxElement(const std::vector<double> &output, size_t * class_i
output.end()));
}
}
#endif // UTILS_H
#endif // UTILS_H