mirror of
https://github.com/davidalbertonogueira/MLP.git
synced 2025-12-17 04:14:41 +03:00
Merge pull request #10 from rlunaro/master
Add the posibility of change manually the weights of internal layers
This commit is contained in:
8
.gitignore
vendored
8
.gitignore
vendored
@@ -214,3 +214,11 @@ _Pvt_Extensions/
|
|||||||
ModelManifest.xml
|
ModelManifest.xml
|
||||||
|
|
||||||
/build
|
/build
|
||||||
|
/.cproject
|
||||||
|
/.project
|
||||||
|
/IrisDatasetTest
|
||||||
|
/LayerTest
|
||||||
|
/MLPTest
|
||||||
|
/NodeTest
|
||||||
|
/mlp.a
|
||||||
|
/mlp.so
|
||||||
|
|||||||
1
.settings/.gitignore
vendored
Normal file
1
.settings/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
/language.settings.xml
|
||||||
7
Makefile
7
Makefile
@@ -1,7 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# Makefile for MLP
|
# Makefile for MLP
|
||||||
CC = g++
|
CC = g++
|
||||||
DEBUG = -g
|
DEBUG = -g3
|
||||||
PROJNAME = mlp
|
PROJNAME = mlp
|
||||||
|
|
||||||
HEADERPATH = ./src
|
HEADERPATH = ./src
|
||||||
@@ -14,7 +14,8 @@ AUXLIBS =
|
|||||||
INCLUDES = -I$(LOCALDEPSINCLUDES) -I$(AUXINCLUDES)
|
INCLUDES = -I$(LOCALDEPSINCLUDES) -I$(AUXINCLUDES)
|
||||||
LIBS = -L$(AUXLIBS)
|
LIBS = -L$(AUXLIBS)
|
||||||
#LIBS += -L/usr/local/lib/
|
#LIBS += -L/usr/local/lib/
|
||||||
CFLAGS = -std=gnu++11 -std=c++11 -O3 -Wall -fmessage-length=0 -fPIC $(INCLUDES)
|
#rlunaro: removed optimization for tests: -O3
|
||||||
|
CFLAGS = -std=gnu++11 -std=c++11 -Wall -fmessage-length=0 -fPIC $(INCLUDES)
|
||||||
CFLAGS += $(DEBUG)
|
CFLAGS += $(DEBUG)
|
||||||
LFLAGS = $(LIBS)
|
LFLAGS = $(LIBS)
|
||||||
#For verbosity
|
#For verbosity
|
||||||
@@ -59,7 +60,7 @@ NodeTest: $(SOURCEPATH)/NodeTest.o $(SOURCEPATH)/MLP.o
|
|||||||
$(CC) $^ $(CFLAGS) $(LFLAGS) -o $@
|
$(CC) $^ $(CFLAGS) $(LFLAGS) -o $@
|
||||||
clean:
|
clean:
|
||||||
@echo Clean
|
@echo Clean
|
||||||
rm -f *~ *.o *~
|
rm -f *~ $(SOURCEPATH)/*.o *~
|
||||||
@echo Success
|
@echo Success
|
||||||
|
|
||||||
cleanall:
|
cleanall:
|
||||||
|
|||||||
BIN
data/iris.mlp
BIN
data/iris.mlp
Binary file not shown.
5
src/.gitignore
vendored
Normal file
5
src/.gitignore
vendored
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
/IrisDatasetTest.o
|
||||||
|
/LayerTest.o
|
||||||
|
/MLP.o
|
||||||
|
/MLPTest.o
|
||||||
|
/NodeTest.o
|
||||||
43
src/Layer.h
43
src/Layer.h
@@ -5,9 +5,6 @@
|
|||||||
#ifndef LAYER_H
|
#ifndef LAYER_H
|
||||||
#define LAYER_H
|
#define LAYER_H
|
||||||
|
|
||||||
#include "Utils.h"
|
|
||||||
#include "Node.h"
|
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
@@ -16,6 +13,8 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cassert> // for assert()
|
#include <cassert> // for assert()
|
||||||
|
#include "Node.h"
|
||||||
|
#include "Utils.h"
|
||||||
|
|
||||||
class Layer {
|
class Layer {
|
||||||
public:
|
public:
|
||||||
@@ -68,13 +67,21 @@ public:
|
|||||||
return m_nodes;
|
return m_nodes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the internal list of nodes, but modifiable.
|
||||||
|
*/
|
||||||
|
std::vector<Node> & GetNodesChangeable() {
|
||||||
|
return m_nodes;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void GetOutputAfterActivationFunction(const std::vector<double> &input,
|
void GetOutputAfterActivationFunction(const std::vector<double> &input,
|
||||||
std::vector<double> * output) const {
|
std::vector<double> * output) const {
|
||||||
assert(input.size() == m_num_inputs_per_node);
|
assert(input.size() == m_num_inputs_per_node);
|
||||||
|
|
||||||
output->resize(m_num_nodes);
|
output->resize(m_num_nodes);
|
||||||
|
|
||||||
for (int i = 0; i < m_num_nodes; ++i) {
|
for (size_t i = 0; i < m_num_nodes; ++i) {
|
||||||
m_nodes[i].GetOutputAfterActivationFunction(input,
|
m_nodes[i].GetOutputAfterActivationFunction(input,
|
||||||
m_activation_function,
|
m_activation_function,
|
||||||
&((*output)[i]));
|
&((*output)[i]));
|
||||||
@@ -103,7 +110,7 @@ public:
|
|||||||
dE_doj = deriv_error[i];
|
dE_doj = deriv_error[i];
|
||||||
doj_dnetj = m_deriv_activation_function(net_sum);
|
doj_dnetj = m_deriv_activation_function(net_sum);
|
||||||
|
|
||||||
for (int j = 0; j < m_num_inputs_per_node; j++) {
|
for (size_t j = 0; j < m_num_inputs_per_node; j++) {
|
||||||
(*deltas)[j] += dE_doj * doj_dnetj * m_nodes[i].GetWeights()[j];
|
(*deltas)[j] += dE_doj * doj_dnetj * m_nodes[i].GetWeights()[j];
|
||||||
|
|
||||||
dnetj_dwij = input_layer_activation[j];
|
dnetj_dwij = input_layer_activation[j];
|
||||||
@@ -116,6 +123,22 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
void SetWeights( std::vector<std::vector<double>> & weights )
|
||||||
|
{
|
||||||
|
if( 0 <= weights.size() && weights.size() <= m_num_nodes )
|
||||||
|
{
|
||||||
|
// traverse the list of nodes
|
||||||
|
size_t node_i = 0;
|
||||||
|
for( Node & node : m_nodes )
|
||||||
|
{
|
||||||
|
node.SetWeights( weights[node_i] );
|
||||||
|
node_i++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw new std::logic_error("Incorrect layer number in SetWeights call");
|
||||||
|
};
|
||||||
|
|
||||||
void SaveLayer(FILE * file) const {
|
void SaveLayer(FILE * file) const {
|
||||||
fwrite(&m_num_nodes, sizeof(m_num_nodes), 1, file);
|
fwrite(&m_num_nodes, sizeof(m_num_nodes), 1, file);
|
||||||
fwrite(&m_num_inputs_per_node, sizeof(m_num_inputs_per_node), 1, file);
|
fwrite(&m_num_inputs_per_node, sizeof(m_num_inputs_per_node), 1, file);
|
||||||
@@ -124,7 +147,7 @@ public:
|
|||||||
fwrite(&str_size, sizeof(size_t), 1, file);
|
fwrite(&str_size, sizeof(size_t), 1, file);
|
||||||
fwrite(m_activation_function_str.c_str(), sizeof(char), str_size, file);
|
fwrite(m_activation_function_str.c_str(), sizeof(char), str_size, file);
|
||||||
|
|
||||||
for (int i = 0; i < m_nodes.size(); i++) {
|
for (size_t i = 0; i < m_nodes.size(); i++) {
|
||||||
m_nodes[i].SaveNode(file);
|
m_nodes[i].SaveNode(file);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -149,15 +172,15 @@ public:
|
|||||||
m_deriv_activation_function = (*pair).second;
|
m_deriv_activation_function = (*pair).second;
|
||||||
|
|
||||||
m_nodes.resize(m_num_nodes);
|
m_nodes.resize(m_num_nodes);
|
||||||
for (int i = 0; i < m_nodes.size(); i++) {
|
for (size_t i = 0; i < m_nodes.size(); i++) {
|
||||||
m_nodes[i].LoadNode(file);
|
m_nodes[i].LoadNode(file);
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
int m_num_inputs_per_node{ 0 };
|
size_t m_num_inputs_per_node{ 0 };
|
||||||
int m_num_nodes{ 0 };
|
size_t m_num_nodes{ 0 };
|
||||||
std::vector<Node> m_nodes;
|
std::vector<Node> m_nodes;
|
||||||
|
|
||||||
std::string m_activation_function_str;
|
std::string m_activation_function_str;
|
||||||
@@ -165,4 +188,4 @@ protected:
|
|||||||
std::function<double(double)> m_deriv_activation_function;
|
std::function<double(double)> m_deriv_activation_function;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif //LAYER_H
|
#endif //LAYER_H
|
||||||
|
|||||||
56
src/MLP.cpp
56
src/MLP.cpp
@@ -3,6 +3,7 @@
|
|||||||
// Author : David Nogueira
|
// Author : David Nogueira
|
||||||
//============================================================================
|
//============================================================================
|
||||||
#include "MLP.h"
|
#include "MLP.h"
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
@@ -10,6 +11,7 @@
|
|||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
|
||||||
#include "easylogging++.h"
|
#include "easylogging++.h"
|
||||||
|
|
||||||
|
|
||||||
@@ -48,7 +50,7 @@ void MLP::CreateMLP(const std::vector<uint64_t> & layers_nodes,
|
|||||||
m_num_outputs = m_layers_nodes[m_layers_nodes.size() - 1];
|
m_num_outputs = m_layers_nodes[m_layers_nodes.size() - 1];
|
||||||
m_num_hidden_layers = m_layers_nodes.size() - 2;
|
m_num_hidden_layers = m_layers_nodes.size() - 2;
|
||||||
|
|
||||||
for (int i = 0; i < m_layers_nodes.size() - 1; i++) {
|
for (size_t i = 0; i < m_layers_nodes.size() - 1; i++) {
|
||||||
m_layers.emplace_back(Layer(m_layers_nodes[i],
|
m_layers.emplace_back(Layer(m_layers_nodes[i],
|
||||||
m_layers_nodes[i + 1],
|
m_layers_nodes[i + 1],
|
||||||
layers_activfuncs[i],
|
layers_activfuncs[i],
|
||||||
@@ -65,7 +67,7 @@ void MLP::SaveMLPNetwork(const std::string & filename)const {
|
|||||||
fwrite(&m_num_hidden_layers, sizeof(m_num_hidden_layers), 1, file);
|
fwrite(&m_num_hidden_layers, sizeof(m_num_hidden_layers), 1, file);
|
||||||
if (!m_layers_nodes.empty())
|
if (!m_layers_nodes.empty())
|
||||||
fwrite(&m_layers_nodes[0], sizeof(m_layers_nodes[0]), m_layers_nodes.size(), file);
|
fwrite(&m_layers_nodes[0], sizeof(m_layers_nodes[0]), m_layers_nodes.size(), file);
|
||||||
for (int i = 0; i < m_layers.size(); i++) {
|
for (size_t i = 0; i < m_layers.size(); i++) {
|
||||||
m_layers[i].SaveLayer(file);
|
m_layers[i].SaveLayer(file);
|
||||||
}
|
}
|
||||||
fclose(file);
|
fclose(file);
|
||||||
@@ -83,7 +85,7 @@ void MLP::LoadMLPNetwork(const std::string & filename) {
|
|||||||
if (!m_layers_nodes.empty())
|
if (!m_layers_nodes.empty())
|
||||||
fread(&m_layers_nodes[0], sizeof(m_layers_nodes[0]), m_layers_nodes.size(), file);
|
fread(&m_layers_nodes[0], sizeof(m_layers_nodes[0]), m_layers_nodes.size(), file);
|
||||||
m_layers.resize(m_layers_nodes.size() - 1);
|
m_layers.resize(m_layers_nodes.size() - 1);
|
||||||
for (int i = 0; i < m_layers.size(); i++) {
|
for (size_t i = 0; i < m_layers.size(); i++) {
|
||||||
m_layers[i].LoadLayer(file);
|
m_layers[i].LoadLayer(file);
|
||||||
}
|
}
|
||||||
fclose(file);
|
fclose(file);
|
||||||
@@ -103,7 +105,7 @@ void MLP::GetOutput(const std::vector<double> &input,
|
|||||||
std::vector<double> temp_out(temp_size, 0.0);
|
std::vector<double> temp_out(temp_size, 0.0);
|
||||||
temp_in = input;
|
temp_in = input;
|
||||||
|
|
||||||
for (int i = 0; i < m_layers.size(); ++i) {
|
for (size_t i = 0; i < m_layers.size(); ++i) {
|
||||||
if (i > 0) {
|
if (i > 0) {
|
||||||
//Store this layer activation
|
//Store this layer activation
|
||||||
if (all_layers_activations != nullptr)
|
if (all_layers_activations != nullptr)
|
||||||
@@ -152,8 +154,9 @@ void MLP::Train(const std::vector<TrainingSample> &training_sample_set_with_bias
|
|||||||
int max_iterations,
|
int max_iterations,
|
||||||
double min_error_cost,
|
double min_error_cost,
|
||||||
bool output_log) {
|
bool output_log) {
|
||||||
int num_examples = training_sample_set_with_bias.size();
|
//rlunaro.03/01/2019. the compiler says that these variables are unused
|
||||||
int num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
//int num_examples = training_sample_set_with_bias.size();
|
||||||
|
//int num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
|
|
||||||
//{
|
//{
|
||||||
// int layer_i = -1;
|
// int layer_i = -1;
|
||||||
@@ -174,7 +177,7 @@ void MLP::Train(const std::vector<TrainingSample> &training_sample_set_with_bias
|
|||||||
// }
|
// }
|
||||||
//}
|
//}
|
||||||
|
|
||||||
size_t i = 0;
|
int i = 0;
|
||||||
double current_iteration_cost_function = 0.0;
|
double current_iteration_cost_function = 0.0;
|
||||||
|
|
||||||
for (i = 0; i < max_iterations; i++) {
|
for (i = 0; i < max_iterations; i++) {
|
||||||
@@ -199,7 +202,7 @@ void MLP::Train(const std::vector<TrainingSample> &training_sample_set_with_bias
|
|||||||
temp_training << training_sample_with_bias << "\t\t";
|
temp_training << training_sample_with_bias << "\t\t";
|
||||||
|
|
||||||
temp_training << "Predicted output: [";
|
temp_training << "Predicted output: [";
|
||||||
for (int i = 0; i < predicted_output.size(); i++) {
|
for (size_t i = 0; i < predicted_output.size(); i++) {
|
||||||
if (i != 0)
|
if (i != 0)
|
||||||
temp_training << ", ";
|
temp_training << ", ";
|
||||||
temp_training << predicted_output[i];
|
temp_training << predicted_output[i];
|
||||||
@@ -210,7 +213,7 @@ void MLP::Train(const std::vector<TrainingSample> &training_sample_set_with_bias
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int j = 0; j < predicted_output.size(); j++) {
|
for (size_t j = 0; j < predicted_output.size(); j++) {
|
||||||
current_iteration_cost_function +=
|
current_iteration_cost_function +=
|
||||||
(std::pow)((correct_output[j] - predicted_output[j]), 2);
|
(std::pow)((correct_output[j] - predicted_output[j]), 2);
|
||||||
deriv_error_output[j] =
|
deriv_error_output[j] =
|
||||||
@@ -259,3 +262,38 @@ void MLP::Train(const std::vector<TrainingSample> &training_sample_set_with_bias
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
size_t MLP::GetNumLayers()
|
||||||
|
{
|
||||||
|
return m_layers.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::vector<double>> MLP::GetLayerWeights( size_t layer_i )
|
||||||
|
{
|
||||||
|
std::vector<std::vector<double>> ret_val;
|
||||||
|
// check parameters
|
||||||
|
if( 0 <= layer_i && layer_i < m_layers.size() )
|
||||||
|
{
|
||||||
|
Layer current_layer = m_layers[layer_i];
|
||||||
|
for( Node & node : current_layer.GetNodesChangeable() )
|
||||||
|
{
|
||||||
|
ret_val.push_back( node.GetWeights() );
|
||||||
|
}
|
||||||
|
return ret_val;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw new std::logic_error("Incorrect layer number in GetLayerWeights call");
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void MLP::SetLayerWeights( size_t layer_i, std::vector<std::vector<double>> & weights )
|
||||||
|
{
|
||||||
|
// check parameters
|
||||||
|
if( 0 <= layer_i && layer_i < m_layers.size() )
|
||||||
|
{
|
||||||
|
m_layers[layer_i].SetWeights( weights );
|
||||||
|
}
|
||||||
|
else
|
||||||
|
throw new std::logic_error("Incorrect layer number in SetLayerWeights call");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
16
src/MLP.h
16
src/MLP.h
@@ -5,10 +5,6 @@
|
|||||||
#ifndef MLP_H
|
#ifndef MLP_H
|
||||||
#define MLP_H
|
#define MLP_H
|
||||||
|
|
||||||
#include "Layer.h"
|
|
||||||
#include "Sample.h"
|
|
||||||
#include "Utils.h"
|
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
@@ -16,6 +12,10 @@
|
|||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <exception>
|
||||||
|
#include "Layer.h"
|
||||||
|
#include "Sample.h"
|
||||||
|
#include "Utils.h"
|
||||||
|
|
||||||
class MLP {
|
class MLP {
|
||||||
public:
|
public:
|
||||||
@@ -40,6 +40,10 @@ public:
|
|||||||
int max_iterations = 5000,
|
int max_iterations = 5000,
|
||||||
double min_error_cost = 0.001,
|
double min_error_cost = 0.001,
|
||||||
bool output_log = true);
|
bool output_log = true);
|
||||||
|
size_t GetNumLayers();
|
||||||
|
std::vector<std::vector<double>> GetLayerWeights( size_t layer_i );
|
||||||
|
void SetLayerWeights( size_t layer_i, std::vector<std::vector<double>> & weights );
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void UpdateWeights(const std::vector<std::vector<double>> & all_layers_activations,
|
void UpdateWeights(const std::vector<std::vector<double>> & all_layers_activations,
|
||||||
const std::vector<double> &error,
|
const std::vector<double> &error,
|
||||||
@@ -49,11 +53,11 @@ private:
|
|||||||
const std::vector<std::string> & layers_activfuncs,
|
const std::vector<std::string> & layers_activfuncs,
|
||||||
bool use_constant_weight_init,
|
bool use_constant_weight_init,
|
||||||
double constant_weight_init = 0.5);
|
double constant_weight_init = 0.5);
|
||||||
int m_num_inputs{ 0 };
|
size_t m_num_inputs{ 0 };
|
||||||
int m_num_outputs{ 0 };
|
int m_num_outputs{ 0 };
|
||||||
int m_num_hidden_layers{ 0 };
|
int m_num_hidden_layers{ 0 };
|
||||||
std::vector<uint64_t> m_layers_nodes;
|
std::vector<uint64_t> m_layers_nodes;
|
||||||
std::vector<Layer> m_layers;
|
std::vector<Layer> m_layers;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif //MLP_H
|
#endif //MLP_H
|
||||||
|
|||||||
@@ -36,7 +36,6 @@ UNIT(LearnAND) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||||
@@ -46,7 +45,7 @@ UNIT(LearnAND) {
|
|||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
||||||
for (int i = 0; i < num_outputs; i++) {
|
for (size_t i = 0; i < num_outputs; i++) {
|
||||||
bool predicted_output = output[i] > 0.5 ? true : false;
|
bool predicted_output = output[i] > 0.5 ? true : false;
|
||||||
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
@@ -76,7 +75,6 @@ UNIT(LearnNAND) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||||
@@ -86,7 +84,7 @@ UNIT(LearnNAND) {
|
|||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
||||||
for (int i = 0; i < num_outputs; i++) {
|
for (size_t i = 0; i < num_outputs; i++) {
|
||||||
bool predicted_output = output[i] > 0.5 ? true : false;
|
bool predicted_output = output[i] > 0.5 ? true : false;
|
||||||
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
@@ -116,7 +114,6 @@ UNIT(LearnOR) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||||
@@ -126,7 +123,7 @@ UNIT(LearnOR) {
|
|||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
||||||
for (int i = 0; i < num_outputs; i++) {
|
for (size_t i = 0; i < num_outputs; i++) {
|
||||||
bool predicted_output = output[i] > 0.5 ? true : false;
|
bool predicted_output = output[i] > 0.5 ? true : false;
|
||||||
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
@@ -156,7 +153,6 @@ UNIT(LearnNOR) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||||
@@ -166,7 +162,7 @@ UNIT(LearnNOR) {
|
|||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
||||||
for (int i = 0; i < num_outputs; i++) {
|
for (size_t i = 0; i < num_outputs; i++) {
|
||||||
bool predicted_output = output[i] > 0.5 ? true : false;
|
bool predicted_output = output[i] > 0.5 ? true : false;
|
||||||
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
@@ -194,7 +190,6 @@ UNIT(LearnXOR) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||||
@@ -204,7 +199,7 @@ UNIT(LearnXOR) {
|
|||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
||||||
for (int i = 0; i < num_outputs; i++) {
|
for (size_t i = 0; i < num_outputs; i++) {
|
||||||
bool predicted_output = output[i] > 0.5 ? true : false;
|
bool predicted_output = output[i] > 0.5 ? true : false;
|
||||||
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
@@ -230,7 +225,6 @@ UNIT(LearnNOT) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||||
@@ -240,7 +234,7 @@ UNIT(LearnNOT) {
|
|||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
||||||
for (int i = 0; i < num_outputs; i++) {
|
for (size_t i = 0; i < num_outputs; i++) {
|
||||||
bool predicted_output = output[i] > 0.5 ? true : false;
|
bool predicted_output = output[i] > 0.5 ? true : false;
|
||||||
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
@@ -268,7 +262,6 @@ UNIT(LearnX1) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||||
@@ -278,7 +271,7 @@ UNIT(LearnX1) {
|
|||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
||||||
for (int i = 0; i < num_outputs; i++) {
|
for (size_t i = 0; i < num_outputs; i++) {
|
||||||
bool predicted_output = output[i] > 0.5 ? true : false;
|
bool predicted_output = output[i] > 0.5 ? true : false;
|
||||||
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
@@ -306,7 +299,6 @@ UNIT(LearnX2) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||||
@@ -316,7 +308,7 @@ UNIT(LearnX2) {
|
|||||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
std::vector<double> output;
|
std::vector<double> output;
|
||||||
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
||||||
for (int i = 0; i < num_outputs; i++) {
|
for (size_t i = 0; i < num_outputs; i++) {
|
||||||
bool predicted_output = output[i] > 0.5 ? true : false;
|
bool predicted_output = output[i] > 0.5 ? true : false;
|
||||||
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
||||||
ASSERT_TRUE(predicted_output == correct_output);
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
@@ -325,8 +317,73 @@ UNIT(LearnX2) {
|
|||||||
LOG(INFO) << "Trained with success." << std::endl;
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
UNIT(GetWeightsSetWeights) {
|
||||||
|
LOG(INFO) << "Train X2 function, read internal weights" << std::endl;
|
||||||
|
|
||||||
|
std::vector<TrainingSample> training_set =
|
||||||
|
{
|
||||||
|
{ { 0, 0 },{ 0.0 } },
|
||||||
|
{ { 0, 1 },{ 1.0 } },
|
||||||
|
{ { 1, 0 },{ 0.0 } },
|
||||||
|
{ { 1, 1 },{ 1.0 } }
|
||||||
|
};
|
||||||
|
bool bias_already_in = false;
|
||||||
|
std::vector<TrainingSample> training_sample_set_with_bias(training_set);
|
||||||
|
//set up bias
|
||||||
|
if (!bias_already_in) {
|
||||||
|
for (auto & training_sample_with_bias : training_sample_set_with_bias) {
|
||||||
|
training_sample_with_bias.AddBiasValue(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
|
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||||
|
MLP my_mlp({ num_features, 2, num_outputs }, { "sigmoid", "linear" });
|
||||||
|
//Train MLP
|
||||||
|
my_mlp.Train(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||||
|
|
||||||
|
// get layer weights
|
||||||
|
std::vector<std::vector<double>> weights = my_mlp.GetLayerWeights( 1 );
|
||||||
|
|
||||||
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
|
std::vector<double> output;
|
||||||
|
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
||||||
|
for (size_t i = 0; i < num_outputs; i++) {
|
||||||
|
bool predicted_output = output[i] > 0.5 ? true : false;
|
||||||
|
std::cout << "PREDICTED OUTPUT IS NOW: " << output[i] << std::endl;
|
||||||
|
bool correct_output = training_sample.output_vector()[i] > 0.5 ? true : false;
|
||||||
|
ASSERT_TRUE(predicted_output == correct_output);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// the expected value of the internal weights
|
||||||
|
// after training are 1.65693 -0.538749
|
||||||
|
ASSERT_TRUE( 1.6 <= weights[0][0] && weights[0][0] <= 1.7 );
|
||||||
|
ASSERT_TRUE( -0.6 <= weights[0][1] && weights[0][1] <= -0.5 );
|
||||||
|
|
||||||
|
// now, we are going to inject a weight value of 0.0
|
||||||
|
// and check that the new output value is nonsense
|
||||||
|
std::vector<std::vector<double>> zeroWeights = { { 0.0, 0.0 } };
|
||||||
|
|
||||||
|
my_mlp.SetLayerWeights( 1, zeroWeights );
|
||||||
|
|
||||||
|
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||||
|
std::vector<double> output;
|
||||||
|
my_mlp.GetOutput(training_sample.input_vector(), &output);
|
||||||
|
for (size_t i = 0; i < num_outputs; i++) {
|
||||||
|
ASSERT_TRUE( -0.0001L <= output[i] && output[i] <= 0.0001L );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG(INFO) << "Trained with success." << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int main(int argc, char* argv[]) {
|
int main(int argc, char* argv[]) {
|
||||||
START_EASYLOGGINGPP(argc, argv);
|
START_EASYLOGGINGPP(argc, argv);
|
||||||
microunit::UnitTester::Run();
|
microunit::UnitTester::Run();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
16
src/Node.h
16
src/Node.h
@@ -5,8 +5,6 @@
|
|||||||
#ifndef NODE_H
|
#ifndef NODE_H
|
||||||
#define NODE_H
|
#define NODE_H
|
||||||
|
|
||||||
#include "Utils.h"
|
|
||||||
|
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
@@ -15,6 +13,8 @@
|
|||||||
#include <vector>
|
#include <vector>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <cassert> // for assert()
|
#include <cassert> // for assert()
|
||||||
|
#include <exception>
|
||||||
|
#include "Utils.h"
|
||||||
|
|
||||||
#define CONSTANT_WEIGHT_INITIALIZATION 0
|
#define CONSTANT_WEIGHT_INITIALIZATION 0
|
||||||
|
|
||||||
@@ -81,6 +81,14 @@ public:
|
|||||||
return m_weights;
|
return m_weights;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SetWeights( std::vector<double> & weights ){
|
||||||
|
// check size of the weights vector
|
||||||
|
if( weights.size() == m_num_inputs )
|
||||||
|
m_weights = weights;
|
||||||
|
else
|
||||||
|
throw new std::logic_error("Incorrect weight size in SetWeights call");
|
||||||
|
}
|
||||||
|
|
||||||
size_t GetWeightsVectorSize() const {
|
size_t GetWeightsVectorSize() const {
|
||||||
return m_weights.size();
|
return m_weights.size();
|
||||||
}
|
}
|
||||||
@@ -141,9 +149,9 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
int m_num_inputs{ 0 };
|
size_t m_num_inputs{ 0 };
|
||||||
double m_bias{ 0.0 };
|
double m_bias{ 0.0 };
|
||||||
std::vector<double> m_weights;
|
std::vector<double> m_weights;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif //NODE_H
|
#endif //NODE_H
|
||||||
|
|||||||
@@ -81,7 +81,6 @@ UNIT(LearnAND) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
Node my_node(num_features);
|
Node my_node(num_features);
|
||||||
Train(my_node, training_sample_set_with_bias, 0.1, 100);
|
Train(my_node, training_sample_set_with_bias, 0.1, 100);
|
||||||
@@ -116,7 +115,6 @@ UNIT(LearnNAND) {
|
|||||||
training_sample_with_bias.AddBiasValue(1);
|
training_sample_with_bias.AddBiasValue(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
Node my_node(num_features);
|
Node my_node(num_features);
|
||||||
Train(my_node, training_sample_set_with_bias, 0.1, 100);
|
Train(my_node, training_sample_set_with_bias, 0.1, 100);
|
||||||
@@ -151,7 +149,6 @@ UNIT(LearnOR) {
|
|||||||
training_sample_with_bias.AddBiasValue(1);
|
training_sample_with_bias.AddBiasValue(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
Node my_node(num_features);
|
Node my_node(num_features);
|
||||||
Train(my_node, training_sample_set_with_bias, 0.1, 100);
|
Train(my_node, training_sample_set_with_bias, 0.1, 100);
|
||||||
@@ -185,7 +182,6 @@ UNIT(LearnNOR) {
|
|||||||
training_sample_with_bias.AddBiasValue(1);
|
training_sample_with_bias.AddBiasValue(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
Node my_node(num_features);
|
Node my_node(num_features);
|
||||||
Train(my_node, training_sample_set_with_bias, 0.1, 100);
|
Train(my_node, training_sample_set_with_bias, 0.1, 100);
|
||||||
@@ -218,7 +214,6 @@ UNIT(LearnNOT) {
|
|||||||
training_sample_with_bias.AddBiasValue(1);
|
training_sample_with_bias.AddBiasValue(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
Node my_node(num_features);
|
Node my_node(num_features);
|
||||||
Train(my_node, training_sample_set_with_bias, 0.1, 100);
|
Train(my_node, training_sample_set_with_bias, 0.1, 100);
|
||||||
@@ -253,7 +248,6 @@ UNIT(LearnXOR) {
|
|||||||
training_sample_with_bias.AddBiasValue(1);
|
training_sample_with_bias.AddBiasValue(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
size_t num_examples = training_sample_set_with_bias.size();
|
|
||||||
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
|
||||||
Node my_node(num_features);
|
Node my_node(num_features);
|
||||||
Train(my_node, training_sample_set_with_bias, 0.1, 100);
|
Train(my_node, training_sample_set_with_bias, 0.1, 100);
|
||||||
@@ -278,4 +272,4 @@ int main(int argc, char* argv[]) {
|
|||||||
START_EASYLOGGINGPP(argc, argv);
|
START_EASYLOGGINGPP(argc, argv);
|
||||||
microunit::UnitTester::Run();
|
microunit::UnitTester::Run();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -30,7 +30,7 @@ public:
|
|||||||
protected:
|
protected:
|
||||||
virtual void PrintMyself(std::ostream& stream) const {
|
virtual void PrintMyself(std::ostream& stream) const {
|
||||||
stream << "Input vector: [";
|
stream << "Input vector: [";
|
||||||
for (int i = 0; i < m_input_vector.size(); i++) {
|
for (size_t i = 0; i < m_input_vector.size(); i++) {
|
||||||
if (i != 0)
|
if (i != 0)
|
||||||
stream << ", ";
|
stream << ", ";
|
||||||
stream << m_input_vector[i];
|
stream << m_input_vector[i];
|
||||||
@@ -59,7 +59,7 @@ public:
|
|||||||
protected:
|
protected:
|
||||||
virtual void PrintMyself(std::ostream& stream) const {
|
virtual void PrintMyself(std::ostream& stream) const {
|
||||||
stream << "Input vector: [";
|
stream << "Input vector: [";
|
||||||
for (int i = 0; i < m_input_vector.size(); i++) {
|
for (size_t i = 0; i < m_input_vector.size(); i++) {
|
||||||
if (i != 0)
|
if (i != 0)
|
||||||
stream << ", ";
|
stream << ", ";
|
||||||
stream << m_input_vector[i];
|
stream << m_input_vector[i];
|
||||||
@@ -69,7 +69,7 @@ protected:
|
|||||||
stream << "; ";
|
stream << "; ";
|
||||||
|
|
||||||
stream << "Output vector: [";
|
stream << "Output vector: [";
|
||||||
for (int i = 0; i < m_output_vector.size(); i++) {
|
for (size_t i = 0; i < m_output_vector.size(); i++) {
|
||||||
if (i != 0)
|
if (i != 0)
|
||||||
stream << ", ";
|
stream << ", ";
|
||||||
stream << m_output_vector[i];
|
stream << m_output_vector[i];
|
||||||
@@ -81,4 +81,4 @@ protected:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
#endif // TRAININGSAMPLE_H
|
#endif // TRAININGSAMPLE_H
|
||||||
|
|||||||
@@ -5,7 +5,6 @@
|
|||||||
#ifndef UTILS_H
|
#ifndef UTILS_H
|
||||||
#define UTILS_H
|
#define UTILS_H
|
||||||
|
|
||||||
#include "Chrono.h"
|
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
@@ -22,6 +21,8 @@
|
|||||||
#include <typeinfo>
|
#include <typeinfo>
|
||||||
#include <typeindex>
|
#include <typeindex>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
|
||||||
|
#include "Chrono.h"
|
||||||
#ifdef _WIN32
|
#ifdef _WIN32
|
||||||
#include <time.h>
|
#include <time.h>
|
||||||
#else
|
#else
|
||||||
@@ -110,11 +111,11 @@ inline void Softmax(std::vector<double> *output) {
|
|||||||
size_t num_elements = output->size();
|
size_t num_elements = output->size();
|
||||||
std::vector<double> exp_output(num_elements);
|
std::vector<double> exp_output(num_elements);
|
||||||
double exp_total = 0.0;
|
double exp_total = 0.0;
|
||||||
for (int i = 0; i < num_elements; i++) {
|
for (size_t i = 0; i < num_elements; i++) {
|
||||||
exp_output[i] = exp((*output)[i]);
|
exp_output[i] = exp((*output)[i]);
|
||||||
exp_total += exp_output[i];
|
exp_total += exp_output[i];
|
||||||
}
|
}
|
||||||
for (int i = 0; i < num_elements; i++) {
|
for (size_t i = 0; i < num_elements; i++) {
|
||||||
(*output)[i] = exp_output[i] / exp_total;
|
(*output)[i] = exp_output[i] / exp_total;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -125,4 +126,4 @@ inline void GetIdMaxElement(const std::vector<double> &output, size_t * class_i
|
|||||||
output.end()));
|
output.end()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif // UTILS_H
|
#endif // UTILS_H
|
||||||
|
|||||||
Reference in New Issue
Block a user