mirror of
https://github.com/davidalbertonogueira/MLP.git
synced 2025-12-16 20:07:07 +03:00
Edit README and Makefile.
This commit is contained in:
@@ -13,6 +13,13 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "LayerTest", "LayerTest\Laye
|
||||
EndProject
|
||||
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "IrisDatasetTest", "IrisDatasetTest\IrisDatasetTest.vcxproj", "{D58D3DD3-DF71-479D-A8EF-C52308C34C11}"
|
||||
EndProject
|
||||
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{0D44BA4E-E6F3-41E0-811E-5D832620728E}"
|
||||
ProjectSection(SolutionItems) = preProject
|
||||
..\logo.png = ..\logo.png
|
||||
..\Makefile = ..\Makefile
|
||||
..\README.md = ..\README.md
|
||||
EndProjectSection
|
||||
EndProject
|
||||
Global
|
||||
GlobalSection(SolutionConfigurationPlatforms) = preSolution
|
||||
Debug|x64 = Debug|x64
|
||||
|
||||
8
Makefile
8
Makefile
@@ -29,7 +29,7 @@ OBJS = $(SRCS:.cpp=.o)
|
||||
TXTS = $(wildcard *.txt)
|
||||
SCRIPTS = $(wildcard *.sh)
|
||||
|
||||
all : MLPTest LayerTest NodeTest $(PROJNAME).a $(PROJNAME).so
|
||||
all : IrisDatasetTest MLPTest LayerTest NodeTest $(PROJNAME).a $(PROJNAME).so
|
||||
|
||||
$(PROJNAME).a : $(SOURCEPATH)/MLP.o
|
||||
@echo Creating static lib $@
|
||||
@@ -42,6 +42,10 @@ $(PROJNAME).so : $(SOURCEPATH)/MLP.o
|
||||
%.o: %.cpp $(HDRS)
|
||||
$(CC) -c $(CFLAGS) $(LFLAGS) -o $@ $<
|
||||
|
||||
IrisDatasetTest: $(SOURCEPATH)/IrisDatasetTest.o $(SOURCEPATH)/MLP.o
|
||||
@echo Compiling program $@
|
||||
$(CC) $^ $(CFLAGS) $(LFLAGS) -o $@
|
||||
|
||||
MLPTest: $(SOURCEPATH)/MLPTest.o $(SOURCEPATH)/MLP.o
|
||||
@echo Compiling program $@
|
||||
$(CC) $^ $(CFLAGS) $(LFLAGS) -o $@
|
||||
@@ -60,5 +64,5 @@ clean:
|
||||
|
||||
cleanall:
|
||||
@echo Clean All
|
||||
rm -f *~ $(SOURCEPATH)/*.o *~ $(PROJNAME).a $(PROJNAME).so MLPTest LayerTest NodeTest
|
||||
rm -f *~ $(SOURCEPATH)/*.o *~ $(PROJNAME).a $(PROJNAME).so IrisDatasetTest MLPTest LayerTest NodeTest
|
||||
@echo Success
|
||||
76
README.md
76
README.md
@@ -1,6 +1,78 @@
|
||||
|
||||
<img alt="MLP logo" src="logo.png" width="250" align="right" />
|
||||
|
||||
# MLP
|
||||
## About
|
||||
|
||||
Simple multilayer perceptron c++ implementation.
|
||||
MLP stands for [multilayer perceptron](https://en.wikipedia.org/wiki/Multilayer_perceptron).
|
||||
This project is a simple & fast C++ implementation of a MLP, oriented towards hacking and rapid prototyping.
|
||||
It is well-tested and includes multiple tests for each component as well as use cases.
|
||||
|
||||
This project is maintained by [David Nogueira](http://web.tecnico.ulisboa.pt/david.jacome.nogueira/).
|
||||
|
||||
|
||||
David Nogueira, 2016.11.01
|
||||
## Featuring
|
||||
|
||||
- C++ implementation.
|
||||
- Modular-oriented, with classes built on top of each other: Node, Layer and network classes.
|
||||
- Easy to use and to hack.
|
||||
- Simple, fast and thread-safe.
|
||||
- Tests for each component module as well as use-case tests.
|
||||
- Supports saving & loading models.
|
||||
|
||||
## OS Support
|
||||
|
||||
MLP offers support both for Windows (MVS) & Linux (g++/clang++).
|
||||
|
||||
## Tests/Example Code
|
||||
|
||||
Some example programs are included with the source code.
|
||||
|
||||
- [`IrisDatasetTest.cpp`](./src/IrisDatasetTest.cpp) - Using the [IRIS data-set](https://archive.ics.uci.edu/ml/datasets/Iris) trains a MLP using backpropagation and tries to predict the classes.
|
||||
- [`MLPTest.cpp`](./src/MLPTest.cpp) - Includes tests to train a MLP for AND, NAND, NOR, OR, NOT and XOR using backpropagation.
|
||||
- [`Node.cpp`](./src/Node.cpp) - Includes tests to train a single node (aka, perceptron) for AND, NAND, NOR, OR, NOT and XOR using backpropagation. (*A simple perceptron cannot learn the XOR function.*)
|
||||
|
||||
## Example
|
||||
|
||||
Let us look at an example. After loading the data and creating the training/dev/test data structures, we will create a MLP with input size 5 (assuming 4 input data features + 1 bias), a hidden layer of 4 neuros and an output layer with 3 outputs (3 possible predicted classes). The activation functions will be a sigmoid for the hidden layer and a linear one for the output layer.
|
||||
|
||||
```cpp
|
||||
#include "MLP.h"
|
||||
#include <vector>
|
||||
|
||||
// ...
|
||||
std::vector<TrainingSample> training_set;
|
||||
// ...
|
||||
|
||||
// assuming 4 inputs + 1 bias.
|
||||
// 1 hidden layer(s) of 4 neurons.
|
||||
// assuming 3 outputs
|
||||
MLP my_mlp({ 4 + 1, 4 , 3 }, { "sigmoid", "linear" }, false);
|
||||
|
||||
int loops = 5000;
|
||||
my_mlp.Train(training_set, .01, loops, 0.10, false);
|
||||
|
||||
int correct = 0;
|
||||
for (int j = 0; j < samples; ++j) {
|
||||
std::vector<double> guess;
|
||||
my_mlp.GetOutput(training_set[j].input_vector(), &guess);
|
||||
size_t class_id;
|
||||
my_mlp.GetOutputClass(guess, &class_id);
|
||||
|
||||
// Compare class_id with gold class id for each instance
|
||||
}
|
||||
```
|
||||
|
||||
Saving and loading models is also very intuitive:
|
||||
|
||||
```cpp
|
||||
#include "MLP.h"
|
||||
{
|
||||
//...
|
||||
my_mlp.SaveMLPNetwork(std::string("../../data/iris.mlp")); //saving
|
||||
}
|
||||
{
|
||||
MLP my_mlp(std::string("../../data/iris.mlp")); //load a model in constructor
|
||||
//...
|
||||
}
|
||||
```
|
||||
|
||||
@@ -115,19 +115,16 @@ int main(int argc, char *argv[]) {
|
||||
}
|
||||
|
||||
{
|
||||
/* 4 inputs + 1 bias.
|
||||
* 1 hidden layer(s) of 4 neurons.
|
||||
* 3 outputs (1 per iris_class)
|
||||
*/
|
||||
// 4 inputs + 1 bias.
|
||||
// 1 hidden layer(s) of 4 neurons.
|
||||
// 3 outputs (1 per iris_class)
|
||||
MLP my_mlp({ 4 + 1, 4 ,3 }, { "sigmoid", "linear" }, false);
|
||||
|
||||
|
||||
int loops = 5000;
|
||||
|
||||
|
||||
// Train the network with backpropagation.
|
||||
LOG(INFO) << "Training for " << loops << " loops over data.";
|
||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, .01, loops, 0.10, false);
|
||||
my_mlp.Train(training_sample_set_with_bias, .01, loops, 0.10, false);
|
||||
|
||||
my_mlp.SaveMLPNetwork(std::string("../../data/iris.mlp"));
|
||||
}
|
||||
|
||||
@@ -148,7 +148,7 @@ void MLP::UpdateWeights(const std::vector<std::vector<double>> & all_layers_acti
|
||||
}
|
||||
};
|
||||
|
||||
void MLP::UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set_with_bias,
|
||||
void MLP::Train(const std::vector<TrainingSample> &training_sample_set_with_bias,
|
||||
double learning_rate,
|
||||
int max_iterations,
|
||||
double min_error_cost,
|
||||
|
||||
@@ -35,7 +35,7 @@ public:
|
||||
std::vector<std::vector<double>> * all_layers_activations = nullptr) const;
|
||||
void GetOutputClass(const std::vector<double> &output, size_t * class_id) const;
|
||||
|
||||
void UpdateMiniBatch(const std::vector<TrainingSample> &training_sample_set_with_bias,
|
||||
void Train(const std::vector<TrainingSample> &training_sample_set_with_bias,
|
||||
double learning_rate,
|
||||
int max_iterations = 5000,
|
||||
double min_error_cost = 0.001,
|
||||
|
||||
@@ -41,7 +41,7 @@ UNIT(LearnAND) {
|
||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||
//Train MLP
|
||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
my_mlp.Train(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
|
||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||
std::vector<double> output;
|
||||
@@ -81,7 +81,7 @@ UNIT(LearnNAND) {
|
||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||
//Train MLP
|
||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
my_mlp.Train(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
|
||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||
std::vector<double> output;
|
||||
@@ -121,7 +121,7 @@ UNIT(LearnOR) {
|
||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||
//Train MLP
|
||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
my_mlp.Train(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
|
||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||
std::vector<double> output;
|
||||
@@ -161,7 +161,7 @@ UNIT(LearnNOR) {
|
||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||
//Train MLP
|
||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
my_mlp.Train(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
|
||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||
std::vector<double> output;
|
||||
@@ -199,7 +199,7 @@ UNIT(LearnXOR) {
|
||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||
//Train MLP
|
||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
my_mlp.Train(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
|
||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||
std::vector<double> output;
|
||||
@@ -235,7 +235,7 @@ UNIT(LearnNOT) {
|
||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||
//Train MLP
|
||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
my_mlp.Train(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
|
||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||
std::vector<double> output;
|
||||
@@ -273,7 +273,7 @@ UNIT(LearnX1) {
|
||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||
//Train MLP
|
||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
my_mlp.Train(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
|
||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||
std::vector<double> output;
|
||||
@@ -311,7 +311,7 @@ UNIT(LearnX2) {
|
||||
size_t num_outputs = training_sample_set_with_bias[0].GetOutputVectorSize();
|
||||
MLP my_mlp({ num_features, 2 ,num_outputs }, { "sigmoid", "linear" });
|
||||
//Train MLP
|
||||
my_mlp.UpdateMiniBatch(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
my_mlp.Train(training_sample_set_with_bias, 0.5, 500, 0.25);
|
||||
|
||||
for (const auto & training_sample : training_sample_set_with_bias) {
|
||||
std::vector<double> output;
|
||||
|
||||
Reference in New Issue
Block a user