mirror of
https://github.com/davidalbertonogueira/MLP.git
synced 2025-12-17 12:24:40 +03:00
added posibility to change internal weights of the network directly
assigning the values
This commit is contained in:
43
src/MLP.cpp
43
src/MLP.cpp
@@ -48,7 +48,7 @@ void MLP::CreateMLP(const std::vector<uint64_t> & layers_nodes,
|
||||
m_num_outputs = m_layers_nodes[m_layers_nodes.size() - 1];
|
||||
m_num_hidden_layers = m_layers_nodes.size() - 2;
|
||||
|
||||
for (int i = 0; i < m_layers_nodes.size() - 1; i++) {
|
||||
for (size_t i = 0; i < m_layers_nodes.size() - 1; i++) {
|
||||
m_layers.emplace_back(Layer(m_layers_nodes[i],
|
||||
m_layers_nodes[i + 1],
|
||||
layers_activfuncs[i],
|
||||
@@ -65,7 +65,7 @@ void MLP::SaveMLPNetwork(const std::string & filename)const {
|
||||
fwrite(&m_num_hidden_layers, sizeof(m_num_hidden_layers), 1, file);
|
||||
if (!m_layers_nodes.empty())
|
||||
fwrite(&m_layers_nodes[0], sizeof(m_layers_nodes[0]), m_layers_nodes.size(), file);
|
||||
for (int i = 0; i < m_layers.size(); i++) {
|
||||
for (size_t i = 0; i < m_layers.size(); i++) {
|
||||
m_layers[i].SaveLayer(file);
|
||||
}
|
||||
fclose(file);
|
||||
@@ -83,7 +83,7 @@ void MLP::LoadMLPNetwork(const std::string & filename) {
|
||||
if (!m_layers_nodes.empty())
|
||||
fread(&m_layers_nodes[0], sizeof(m_layers_nodes[0]), m_layers_nodes.size(), file);
|
||||
m_layers.resize(m_layers_nodes.size() - 1);
|
||||
for (int i = 0; i < m_layers.size(); i++) {
|
||||
for (size_t i = 0; i < m_layers.size(); i++) {
|
||||
m_layers[i].LoadLayer(file);
|
||||
}
|
||||
fclose(file);
|
||||
@@ -103,7 +103,7 @@ void MLP::GetOutput(const std::vector<double> &input,
|
||||
std::vector<double> temp_out(temp_size, 0.0);
|
||||
temp_in = input;
|
||||
|
||||
for (int i = 0; i < m_layers.size(); ++i) {
|
||||
for (size_t i = 0; i < m_layers.size(); ++i) {
|
||||
if (i > 0) {
|
||||
//Store this layer activation
|
||||
if (all_layers_activations != nullptr)
|
||||
@@ -260,3 +260,38 @@ void MLP::Train(const std::vector<TrainingSample> &training_sample_set_with_bias
|
||||
};
|
||||
|
||||
|
||||
size_t MLP::GetNumLayers()
|
||||
{
|
||||
return m_layers.size();
|
||||
}
|
||||
|
||||
std::vector<std::vector<double>> MLP::GetLayerWeights( size_t layer_i )
|
||||
{
|
||||
std::vector<std::vector<double>> ret_val;
|
||||
// check parameters
|
||||
if( 0 <= layer_i && layer_i < m_layers.size() )
|
||||
{
|
||||
Layer current_layer = m_layers[layer_i];
|
||||
for( Node & node : current_layer.GetNodesChangeable() )
|
||||
{
|
||||
ret_val.push_back( node.GetWeights() );
|
||||
}
|
||||
return ret_val;
|
||||
}
|
||||
else
|
||||
throw new std::logic_error("Incorrect layer number in GetLayerWeights call");
|
||||
|
||||
}
|
||||
|
||||
void MLP::SetLayerWeights( size_t layer_i, std::vector<std::vector<double>> & weights )
|
||||
{
|
||||
// check parameters
|
||||
if( 0 <= layer_i && layer_i < m_layers.size() )
|
||||
{
|
||||
m_layers[layer_i].SetWeights( weights );
|
||||
}
|
||||
else
|
||||
throw new std::logic_error("Incorrect layer number in SetLayerWeights call");
|
||||
}
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user