Internal architecture changes (to allow diferent activation functions for each layer and to allow hidden layers to have different number of nodes).

This commit is contained in:
davidjacnogueira
2016-11-09 02:31:53 +00:00
parent f647b05f70
commit 0a636416ed
7 changed files with 193 additions and 100 deletions

View File

@@ -39,7 +39,7 @@ UNIT(LearnAND) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
@@ -79,7 +79,7 @@ UNIT(LearnNAND) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
@@ -119,7 +119,7 @@ UNIT(LearnOR) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
@@ -159,7 +159,7 @@ UNIT(LearnNOR) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
@@ -197,9 +197,9 @@ UNIT(LearnXOR) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 50'000, 0.25);
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
for (const auto & training_sample : training_sample_set_with_bias) {
std::vector<double> output;
@@ -233,7 +233,7 @@ UNIT(LearnNOT) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
@@ -271,7 +271,7 @@ UNIT(LearnX1) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
@@ -309,7 +309,7 @@ UNIT(LearnX2) {
size_t num_examples = training_sample_set_with_bias.size();
size_t num_features = training_sample_set_with_bias[0].GetInputVectorSize();
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
MLP my_mlp(num_features, num_outputs, 1, 2, false);
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" }, false);
//Train MLP
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);