diff options
-rw-r--r-- | .gitignore | 1 | ||||
-rw-r--r-- | DualMLP.cpp | 72 | ||||
-rw-r--r-- | DualMLP.hpp | 23 | ||||
-rw-r--r-- | FullyRN.cpp | 40 | ||||
-rw-r--r-- | FullyRN.hpp | 17 | ||||
-rw-r--r-- | MemCell.cpp | 83 | ||||
-rw-r--r-- | MemCell.hpp | 26 | ||||
-rw-r--r-- | NNUtils.cpp | 54 | ||||
-rw-r--r-- | NNUtils.hpp | 28 | ||||
-rw-r--r-- | NeuralNet.hpp | 26 | ||||
-rw-r--r-- | NeuralNetworks.hpp | 11 | ||||
-rw-r--r-- | Neuron.cpp | 57 | ||||
-rw-r--r-- | Neuron.hpp | 22 | ||||
-rw-r--r-- | Node.hpp | 21 | ||||
-rw-r--r-- | NodeLayer.cpp | 75 | ||||
-rw-r--r-- | NodeLayer.hpp | 30 | ||||
-rw-r--r-- | Population.cpp | 95 | ||||
-rw-r--r-- | Population.hpp | 30 | ||||
-rw-r--r-- | README.md | 46 | ||||
-rw-r--r-- | SimpleRN.cpp | 35 | ||||
-rw-r--r-- | SimpleRN.hpp | 17 | ||||
-rw-r--r-- | SingleMLP.cpp | 64 | ||||
-rw-r--r-- | SingleMLP.hpp | 22 |
23 files changed, 895 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..7bc7e11 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +libNeuralNetworks.a diff --git a/DualMLP.cpp b/DualMLP.cpp new file mode 100644 index 0000000..3a854c0 --- /dev/null +++ b/DualMLP.cpp @@ -0,0 +1,72 @@ +#include "DualMLP.hpp" + +DualMLP::DualMLP(unsigned inputCount, unsigned hiddenNodeCount, unsigned outputCount, NodeClass nodeClass, bool zeroed) +: m_firstHiddenLayer (inputCount, hiddenNodeCount, nodeClass, zeroed), + m_secondHiddenLayer (hiddenNodeCount, hiddenNodeCount, nodeClass, zeroed), + m_outputLayer (hiddenNodeCount, outputCount, nodeClass, zeroed) +{ + m_inputCount = inputCount; + m_hiddenNodeCount = hiddenNodeCount; + m_outputCount = outputCount; + m_nodeClass = nodeClass; +} + +void DualMLP::setChromosome(const Chromosome &chromosome) +{ + if (chromosome.size() != getChromosomeSize()) + { + return; + } + + auto l1 = chromosome.begin() + m_firstHiddenLayer.getChromosomeSize(); + auto l2 = l1 + m_secondHiddenLayer.getChromosomeSize(); + + Chromosome chr1(chromosome.begin(), l1); + Chromosome chr2(l1, l2); + Chromosome chr3(l2, chromosome.end()); + + m_firstHiddenLayer.setChromosome(chr1); + m_secondHiddenLayer.setChromosome(chr2); + m_outputLayer.setChromosome(chr3); +} + +Chromosome DualMLP::getChromosome() const +{ + Chromosome chromosome; + + Chromosome chr1 = m_firstHiddenLayer.getChromosome(); + Chromosome chr2 = m_secondHiddenLayer.getChromosome(); + Chromosome chr3 = m_outputLayer.getChromosome(); + + chromosome.insert(chromosome.end(), chr1.begin(), chr1.end()); + chromosome.insert(chromosome.end(), chr2.begin(), chr2.end()); + chromosome.insert(chromosome.end(), chr3.begin(), chr3.end()); + + return chromosome; +} + +unsigned DualMLP::getChromosomeSize() const +{ + unsigned chrSize = 0; + chrSize += m_firstHiddenLayer.getChromosomeSize(); + chrSize += m_secondHiddenLayer.getChromosomeSize(); + chrSize += m_outputLayer.getChromosomeSize(); + + return chrSize; +} + +std::vector<float> DualMLP::io(const std::vector<float> &inputs) +{ + std::vector<float> response; + + if (inputs.size() != m_inputCount) + { + return response; + } + + response = m_firstHiddenLayer.io(inputs); + response = m_secondHiddenLayer.io(response); + response = m_outputLayer.io(response); + + return response; +} diff --git a/DualMLP.hpp b/DualMLP.hpp new file mode 100644 index 0000000..2c8feca --- /dev/null +++ b/DualMLP.hpp @@ -0,0 +1,23 @@ +#ifndef __DUALMLP_HPP__ +#define __DUALMLP_HPP__ + +#include "NeuralNet.hpp" + +class DualMLP : public NeuralNet +{ +public: + DualMLP(unsigned inputCount, unsigned hiddenNodeCount, unsigned outputCount, NodeClass nodeClass, bool zeroed = false); + + void setChromosome(const Chromosome &chromosome); + Chromosome getChromosome() const; + unsigned getChromosomeSize() const; + + virtual std::vector<float> io(const std::vector<float> &inputs); + +protected: + NodeLayer m_firstHiddenLayer; + NodeLayer m_secondHiddenLayer; + NodeLayer m_outputLayer; +}; + +#endif // __DUALMLP_HPP__ diff --git a/FullyRN.cpp b/FullyRN.cpp new file mode 100644 index 0000000..ecc88d5 --- /dev/null +++ b/FullyRN.cpp @@ -0,0 +1,40 @@ +#include "FullyRN.hpp" + +FullyRN::FullyRN(unsigned inputCount, unsigned hiddenNodeCount, unsigned outputCount, NodeClass nodeClass, bool zeroed) +: DualMLP(0, 0, 0, NEURON, true) +{ + unsigned contextSize = inputCount + hiddenNodeCount + outputCount; + m_context.resize(contextSize, 0.f); + + m_firstHiddenLayer = NodeLayer(inputCount + contextSize, inputCount, nodeClass, zeroed); + m_secondHiddenLayer = NodeLayer(contextSize, hiddenNodeCount, nodeClass, zeroed); + m_outputLayer = NodeLayer(contextSize, outputCount, nodeClass, zeroed); + + m_inputCount = inputCount; + m_hiddenNodeCount = hiddenNodeCount; + m_outputCount = outputCount; + m_nodeClass = nodeClass; +} + +std::vector<float> FullyRN::io(const std::vector<float> &inputs) +{ + std::vector<float> response; + + if (inputs.size() != m_inputCount) + { + return response; + } + + auto stateInput = inputs; + stateInput.insert(stateInput.end(), m_context.begin(), m_context.end()); + response = m_firstHiddenLayer.io(stateInput); + + auto hiddenOutput = m_secondHiddenLayer.io(m_context); + response.insert(response.end(), hiddenOutput.begin(), hiddenOutput.end()); + + auto outputOutput = m_outputLayer.io(m_context); + response.insert(response.end(), outputOutput.begin(), outputOutput.end()); + + m_context = response; + return outputOutput; +} diff --git a/FullyRN.hpp b/FullyRN.hpp new file mode 100644 index 0000000..a114140 --- /dev/null +++ b/FullyRN.hpp @@ -0,0 +1,17 @@ +#ifndef __FULLYRN_HPP__ +#define __FULLYRN_HPP__ + +#include "DualMLP.hpp" + +class FullyRN : public DualMLP +{ +public: + FullyRN(unsigned inputCount, unsigned hiddenNodeCount, unsigned outputCount, NodeClass nodeClass, bool zeroed = false); + + std::vector<float> io(const std::vector<float> &inputs); + +private: + std::vector<float> m_context; +}; + +#endif // __FULLYRN_HPP__ diff --git a/MemCell.cpp b/MemCell.cpp new file mode 100644 index 0000000..4e5b17b --- /dev/null +++ b/MemCell.cpp @@ -0,0 +1,83 @@ +#include "MemCell.hpp" + +MemCell::MemCell(unsigned inputCount, bool zeroed) +: m_input (inputCount, zeroed), + m_inputGate (inputCount + 1, zeroed), + m_memGate (inputCount + 1, zeroed), + m_outputGate(inputCount + 1, zeroed) +{ + m_inputCount = inputCount; +} + +void MemCell::setChromosome(const Chromosome &chromosome) +{ + unsigned chrSize = getChromosomeSize(); + + if (chromosome.size() != chrSize) + { + return; + } + + auto l1 = chromosome.begin() + m_input.getChromosomeSize(); + auto l2 = l1 + m_inputGate.getChromosomeSize(); + auto l3 = l2 + m_memGate.getChromosomeSize(); + + Chromosome chrInput(chromosome.begin(), l1); + Chromosome chrInputGate(l1, l2); + Chromosome chrMemGate(l2, l3); + Chromosome chrOutputGate(l3, chromosome.end()); + + m_input.setChromosome(chrInput); + m_inputGate.setChromosome(chrInputGate); + m_memGate.setChromosome(chrMemGate); + m_outputGate.setChromosome(chrOutputGate); +} + +Chromosome MemCell::getChromosome() const +{ + Chromosome chromosome; + + Chromosome chrInput = m_input.getChromosome(); + Chromosome chrInputGate = m_inputGate.getChromosome(); + Chromosome chrMemGate = m_memGate.getChromosome(); + Chromosome chrOutputGate = m_outputGate.getChromosome(); + + chromosome.insert(chromosome.end(), chrInput.begin(), chrInput.end()); + chromosome.insert(chromosome.end(), chrInputGate.begin(), chrInputGate.end()); + chromosome.insert(chromosome.end(), chrMemGate.begin(), chrMemGate.end()); + chromosome.insert(chromosome.end(), chrOutputGate.begin(), chrOutputGate.end()); + + return chromosome; +} + +unsigned MemCell::getChromosomeSize() const +{ + unsigned chrSize = 0; + + chrSize += m_input.getChromosomeSize(); + chrSize += m_inputGate.getChromosomeSize(); + chrSize += m_memGate.getChromosomeSize(); + chrSize += m_outputGate.getChromosomeSize(); + + return chrSize; +} + +float MemCell::io(const std::vector<float> &inputs) +{ + float response = 0.f; + + if (inputs.size() != m_inputCount) + { + return response; + } + + std::vector<float> gateInputs = inputs; + gateInputs.push_back(m_context); + + response = m_input.io(inputs) * m_inputGate.io(gateInputs); + float memory = m_context * m_memGate.io(gateInputs); + m_context = memory + response; + + response = sigmoid(m_context) * m_outputGate.io(gateInputs); + return response; +} diff --git a/MemCell.hpp b/MemCell.hpp new file mode 100644 index 0000000..4b1a38f --- /dev/null +++ b/MemCell.hpp @@ -0,0 +1,26 @@ +#ifndef __MEMCELL_HPP__ +#define __MEMCELL_HPP__ + +#include "Neuron.hpp" + +class MemCell : public Node +{ +public: + MemCell(unsigned inputCount, bool zeroed = false); + + void setChromosome(const Chromosome &chromosome); + Chromosome getChromosome() const; + unsigned getChromosomeSize() const; + + float io(const std::vector<float> &inputs); + +private: + Neuron m_input; + Neuron m_inputGate; + Neuron m_memGate; + Neuron m_outputGate; + + float m_context = 0.f; +}; + +#endif // __MEMCELL_HPP__ diff --git a/NNUtils.cpp b/NNUtils.cpp new file mode 100644 index 0000000..611768a --- /dev/null +++ b/NNUtils.cpp @@ -0,0 +1,54 @@ +#include "NNUtils.hpp" + +std::default_random_engine rGenerator; + +void seedRand() +{ + rGenerator.seed(std::chrono::system_clock::now().time_since_epoch().count()); +} + +float realRand(float low, float hi) +{ + std::uniform_real_distribution<float> fDistributor(low, hi); + return fDistributor(rGenerator); +} + +int intRand(int low, int hi) +{ + std::uniform_int_distribution<int> iDistributor(low, hi - 1); + return iDistributor(rGenerator); +} + +float sigmoid(float input, float sensitivity) +{ + return 1.f / (1.f + expf(-input / sensitivity)); +} + +void crossover(const Chromosome &parent, const Chromosome &mother, Chromosome &child1, Chromosome &child2) +{ + if (parent.size() != mother.size()) + { + return; + } + + child1.clear(); + child2.clear(); + + int cop = intRand(0, parent.size()); + + child1.insert(child1.end(), parent.begin(), parent.begin() + cop); + child1.insert(child1.end(), mother.begin() + cop, mother.end()); + child2.insert(child2.end(), mother.begin(), mother.begin() + cop); + child2.insert(child2.end(), parent.begin() + cop, parent.end()); +} + +void mutate(Chromosome &chromosome, float mutationRate) +{ + for (auto &i : chromosome) + { + if (realRand(0.f, 1.f) < mutationRate) + { + i = realRand(-1.f, 1.f); + } + } +} diff --git a/NNUtils.hpp b/NNUtils.hpp new file mode 100644 index 0000000..e2c12ba --- /dev/null +++ b/NNUtils.hpp @@ -0,0 +1,28 @@ +#ifndef __NNUTILS_HPP__ +#define __NNUTILS_HPP__ + +#include <cmath> +#include <vector> +#include <random> +#include <chrono> +#include <memory> + +enum NodeClass { NEURON, MEMORY_CELL }; + +enum NeuralNetClass { SINGLE_MLP, DUAL_MLP, SIMPLE_RN, FULLY_RN }; + +typedef std::vector<float> Chromosome; + +void seedRand(); + +float realRand(float low, float hi); + +int intRand(int low, int hi); + +float sigmoid(float input, float sensitivity = 1.f); + +void crossover(const Chromosome &parent, const Chromosome &mother, Chromosome &child1, Chromosome &child2); + +void mutate(Chromosome &chromosome, float mutationRate = 0.001f); + +#endif // __NNUTILS_HPP__ diff --git a/NeuralNet.hpp b/NeuralNet.hpp new file mode 100644 index 0000000..70d3e87 --- /dev/null +++ b/NeuralNet.hpp @@ -0,0 +1,26 @@ +#ifndef __NEURALNET_HPP__ +#define __NEURALNET_HPP__ + +#include "NodeLayer.hpp" + +class NeuralNet +{ +public: + virtual void setChromosome(const Chromosome &chromosome) = 0; + virtual Chromosome getChromosome() const = 0; + virtual unsigned getChromosomeSize() const = 0; + + virtual std::vector<float> io(const std::vector<float> &inputs) = 0; + + unsigned getInputCount() const { return m_inputCount; } + unsigned getHiddenNodeCount() const { return m_hiddenNodeCount; } + unsigned getOutputCount() const { return m_outputCount; } + +protected: + unsigned m_inputCount = 0; + unsigned m_hiddenNodeCount = 0; + unsigned m_outputCount = 0; + NodeClass m_nodeClass; +}; + +#endif // __NEURALNET_HPP__ diff --git a/NeuralNetworks.hpp b/NeuralNetworks.hpp new file mode 100644 index 0000000..0edd503 --- /dev/null +++ b/NeuralNetworks.hpp @@ -0,0 +1,11 @@ +#ifndef __NEURALNETWORKS_HPP__ +#define __NEURALNETWORKS_HPP__ + +#include "Population.hpp" + +#include "SingleMLP.hpp" +#include "DualMLP.hpp" +#include "SimpleRN.hpp" +#include "FullyRN.hpp" + +#endif // __NEURALNETWORKS_HPP__ diff --git a/Neuron.cpp b/Neuron.cpp new file mode 100644 index 0000000..647d525 --- /dev/null +++ b/Neuron.cpp @@ -0,0 +1,57 @@ +#include "Neuron.hpp" + +Neuron::Neuron(unsigned inputCount, bool zeroed) +{ + m_inputCount = inputCount; + + while (inputCount--) + { + m_weights.push_back(zeroed ? 0.f : realRand(-1.f, 1.f)); + } + + m_bias = zeroed ? 0.f : realRand(-1.f, 1.f); +} + +void Neuron::setChromosome(const Chromosome &chromosome) +{ + unsigned chrSize = getChromosomeSize(); + + if (chrSize != chromosome.size()) + { + return; + } + + m_weights = Chromosome(chromosome.begin(), chromosome.end() - 1); + m_bias = chromosome.back(); +} + +Chromosome Neuron::getChromosome() const +{ + Chromosome chromosome; + + for (auto &i : m_weights) + { + chromosome.push_back(i); + } + + chromosome.push_back(m_bias); + + return chromosome; +} + +float Neuron::io(const std::vector<float> &inputs) +{ + float response = 0.f; + + if (inputs.size() != m_inputCount) + { + return response; + } + + for (unsigned i = 0; i < m_inputCount; ++i) + { + response += inputs[i] * m_weights[i]; + } + + return sigmoid(response - m_bias); +} diff --git a/Neuron.hpp b/Neuron.hpp new file mode 100644 index 0000000..f2f7269 --- /dev/null +++ b/Neuron.hpp @@ -0,0 +1,22 @@ +#ifndef __NEURON_HPP__ +#define __NEURON_HPP__ + +#include "Node.hpp" + +class Neuron : public Node +{ +public: + Neuron(unsigned inputCount, bool zeroed = false); + + void setChromosome(const Chromosome &chromosome); + Chromosome getChromosome() const; + unsigned getChromosomeSize() const { return m_inputCount + 1; } + + float io(const std::vector<float> &inputs); + +private: + std::vector<float> m_weights; + float m_bias = 0.f; +}; + +#endif // __NEURON_HPP__ diff --git a/Node.hpp b/Node.hpp new file mode 100644 index 0000000..97d7f4c --- /dev/null +++ b/Node.hpp @@ -0,0 +1,21 @@ +#ifndef __NODE_HPP__ +#define __NODE_HPP__ + +#include "NNUtils.hpp" + +class Node +{ +public: + virtual void setChromosome(const Chromosome &chromosome) = 0; + virtual Chromosome getChromosome() const = 0; + virtual unsigned getChromosomeSize() const = 0; + + virtual float io(const std::vector<float> &inputs) = 0; + + unsigned getInputCount() const { return m_inputCount; } + +protected: + unsigned m_inputCount = 0; +}; + +#endif // __NODE_HPP__ diff --git a/NodeLayer.cpp b/NodeLayer.cpp new file mode 100644 index 0000000..b42cf11 --- /dev/null +++ b/NodeLayer.cpp @@ -0,0 +1,75 @@ +#include "NodeLayer.hpp" + +NodeLayer::NodeLayer(unsigned inputCount, unsigned nodeCount, NodeClass nodeClass, bool zeroed) +: m_inputCount(inputCount), m_nodeCount(nodeCount), m_nodeClass(nodeClass) +{ + if (nodeClass == NEURON) + { + while (nodeCount--) + { + m_nodes.push_back(std::unique_ptr<Node>(new Neuron(inputCount, zeroed))); + } + } + else + { + while (nodeCount--) + { + m_nodes.push_back(std::unique_ptr<Node>(new MemCell(inputCount, zeroed))); + } + } +} + +void NodeLayer::setChromosome(const Chromosome &chromosome) +{ + unsigned chrSize = getChromosomeSize(); + + if (chrSize != chromosome.size()) + { + return; + } + + chrSize /= m_nodeCount; + + for (unsigned i = 0; i < m_nodeCount; ++i) + { + auto l1 = chromosome.begin() + i * chrSize; + auto l2 = l1 + chrSize; + Chromosome chrPart(l1, l2); + m_nodes[i]->setChromosome(chrPart); + } +} + +Chromosome NodeLayer::getChromosome() const +{ + Chromosome chromosome; + + for (auto &i : m_nodes) + { + Chromosome chrPart = i->getChromosome(); + chromosome.insert(chromosome.end(), chrPart.begin(), chrPart.end()); + } + + return chromosome; +} + +unsigned NodeLayer::getChromosomeSize() const +{ + return m_nodes.front()->getChromosomeSize() * m_nodeCount; +} + +std::vector<float> NodeLayer::io(const std::vector<float> &inputs) +{ + std::vector<float> response; + + if (inputs.size() != m_inputCount) + { + return response; + } + + for (auto &i : m_nodes) + { + response.push_back(i->io(inputs)); + } + + return response; +} diff --git a/NodeLayer.hpp b/NodeLayer.hpp new file mode 100644 index 0000000..8ff44c2 --- /dev/null +++ b/NodeLayer.hpp @@ -0,0 +1,30 @@ +#ifndef __NODELAYER_HPP__ +#define __NODELAYER_HPP__ + +#include "Neuron.hpp" +#include "MemCell.hpp" + +class NodeLayer +{ +public: + NodeLayer(unsigned inputCount, unsigned nodeCount, NodeClass nodeClass, bool zeroed = false); + + void setChromosome(const Chromosome &chromosome); + Chromosome getChromosome() const; + unsigned getChromosomeSize() const; + + std::vector<float> io(const std::vector<float> &inputs); + + unsigned getInputCount() const { return m_inputCount; } + unsigned getNodeCount() const { return m_nodeCount; } + NodeClass getNodeClass() const { return m_nodeClass; } + +private: + unsigned m_inputCount = 0; + unsigned m_nodeCount = 0; + NodeClass m_nodeClass; + + std::vector<std::unique_ptr<Node>> m_nodes; +}; + +#endif // __NODELAYER_HPP__ diff --git a/Population.cpp b/Population.cpp new file mode 100644 index 0000000..8c502b0 --- /dev/null +++ b/Population.cpp @@ -0,0 +1,95 @@ +#include "Population.hpp" + +Population::Population(unsigned popSize, unsigned eliteCount, unsigned chromosomeSize) +: m_popSize(popSize), m_eliteCount(eliteCount), m_chromosomeSize(chromosomeSize) +{ + m_fitnesses.resize(m_popSize, 0.f); + m_chromosomes.resize(m_popSize); + for (auto &i : m_chromosomes) + { + i.resize(m_chromosomeSize, 0.f); + for (auto &j : i) + { + j = realRand(-1.f, 1.f); + } + } +} + +void Population::roulleteWheel() +{ + std::vector<Chromosome> newGeneration; + + std::vector<unsigned> eliteIndexes(m_eliteCount, 0); + for (auto &i : eliteIndexes) + { + for (unsigned j = 0; j < m_popSize; ++j) + { + bool picked = false; + for (auto &k : eliteIndexes) + { + if (k == j) + { + picked = true; + break; + } + } + if (picked) + { + continue; + } + if (m_fitnesses[j] > m_fitnesses[i]) + { + i = j; + } + } + } + + for (auto &i : eliteIndexes) + { + newGeneration.push_back(m_chromosomes[i]); + } + + unsigned fitnessSum = 0; + for (auto &i : m_fitnesses) + { + fitnessSum += i; + } + + for (unsigned i = m_eliteCount; i < m_popSize; i += 2) + { + unsigned parentIndex = 0; + unsigned motherIndex = 0; + int randomParent = intRand(0, fitnessSum); + int randomMother = intRand(0, fitnessSum); + + while (randomParent > 0) + { + randomParent -= m_fitnesses[parentIndex]; + ++parentIndex; + } --parentIndex; + + while (randomMother > 0) + { + randomMother -= m_fitnesses[motherIndex]; + ++motherIndex; + } --motherIndex; + + Chromosome child1; + Chromosome child2; + + crossover(m_chromosomes[parentIndex], m_chromosomes[motherIndex], child1, child2); + mutate(child1); + mutate(child2); + + newGeneration.push_back(child1); + newGeneration.push_back(child2); + } + + if (newGeneration.size() < m_chromosomes.size()) + { + newGeneration.push_back(newGeneration.back()); + } + + m_chromosomes = newGeneration; + m_fitnesses = std::vector<unsigned>(m_popSize, 0); +} diff --git a/Population.hpp b/Population.hpp new file mode 100644 index 0000000..075a08f --- /dev/null +++ b/Population.hpp @@ -0,0 +1,30 @@ +#ifndef __POPULATION_HPP__ +#define __POPULATION_HPP__ + +#include "NNUtils.hpp" + +class Population +{ +public: + Population(unsigned popSize, unsigned eliteCount, unsigned chromosomeSize); + + void roulleteWheel(); + + unsigned getPopSize() const { return m_popSize; } + unsigned getChromosomeSize() const { return m_chromosomeSize; } + unsigned getEliteCount() const { return m_eliteCount; } + + void setFitness(unsigned i, unsigned fitness) { m_fitnesses[i] = fitness; } + Chromosome getChromosome(unsigned i) const { return m_chromosomes[i]; } + unsigned getFitness(unsigned i) const { return m_fitnesses[i]; } + +private: + unsigned m_popSize = 0; + unsigned m_eliteCount = 0; + unsigned m_chromosomeSize = 0; + + std::vector<Chromosome> m_chromosomes; + std::vector<unsigned> m_fitnesses; +}; + +#endif // __POPULATION_HPP__ diff --git a/README.md b/README.md new file mode 100644 index 0000000..05a3b72 --- /dev/null +++ b/README.md @@ -0,0 +1,46 @@ +Neural Network library used for the Guppies project. Parameters that can be set +by it follow: + +## NETWORK STRUCTURE +Defines the main topology of the Neural Network (i.e. the way that Nodes +connect with each other). The four possible choices (in order of +complexity) are: + +### Single MultiLayer Perceptron +A FeedForward network with 1 hidden layer. Information flows +unidirectionally from input nodes to output nodes (and not the +other way around). + +### Dual MultiLayer Perceptron +Same as above, but with two hidden layers instead of one. + +### Simple Recurrent Network (Elman Network) +This type of recurrent network has a set of "context" units that +store the output of the (single) hidden layer and feeds it back +to the input layer on the next time-step, giving it a kind of +short term memory. + +### Fully Recurrent Network +All Nodes in this network are connected to each other. It's the +most complex (and processor intensive) network of the four. + +## NODE STRUCTURE +Defines the structure of each network node. The two choices are: + +### Neuron +Each neuron computes its outputs from a given set of inputs. Output +equals the weighted sum of all inputs. + +### Memory Cell +This kind of node is based in the Long-Short Term Memory recurrent +network model. It contains 4 neurons, 3 of them act as "gates" that +allow it to block input, store it and output it, thus being able to +hold in information or "memories" for a long time span. It's the +most complex (and processor intensive) type of node. + +## NODES PER HIDDEN LAYER +Number of nodes that reside on each hidden layer. SingleMLPs, SimpleRNs and +FullyRNs have one hidden layer. DualMLPs have two (thus, their number of +hidden nodes is "this value" x 2). The more nodes, the more complex the +Neural Networks of the Guppies are (and the more time it'll take to evolve +them).
\ No newline at end of file diff --git a/SimpleRN.cpp b/SimpleRN.cpp new file mode 100644 index 0000000..0e10c22 --- /dev/null +++ b/SimpleRN.cpp @@ -0,0 +1,35 @@ +#include "SimpleRN.hpp" + +SimpleRN::SimpleRN(unsigned inputCount, unsigned hiddenNodeCount, unsigned outputCount, NodeClass nodeClass, bool zeroed) +: SingleMLP(0, 0, 0, NEURON, true) +{ + m_context.resize(hiddenNodeCount, 0.f); + + m_hiddenLayer = NodeLayer(inputCount + hiddenNodeCount, hiddenNodeCount, nodeClass, zeroed); + m_outputLayer = NodeLayer(hiddenNodeCount, outputCount, nodeClass, zeroed); + + m_inputCount = inputCount; + m_hiddenNodeCount = hiddenNodeCount; + m_outputCount = outputCount; + m_nodeClass = nodeClass; +} + +std::vector<float> SimpleRN::io(const std::vector<float> &inputs) +{ + std::vector<float> response; + + if (inputs.size() != m_inputCount) + { + return response; + } + + response.insert(response.end(), inputs.begin(), inputs.end()); + response.insert(response.end(), m_context.begin(), m_context.end()); + + response = m_hiddenLayer.io(response); + + m_context = response; + response = m_outputLayer.io(response); + + return response; +} diff --git a/SimpleRN.hpp b/SimpleRN.hpp new file mode 100644 index 0000000..6d0cc2c --- /dev/null +++ b/SimpleRN.hpp @@ -0,0 +1,17 @@ +#ifndef __SIMPLERN_HPP__ +#define __SIMPLERN_HPP__ + +#include "SingleMLP.hpp" + +class SimpleRN : public SingleMLP +{ +public: + SimpleRN(unsigned inputCount, unsigned hiddenNodeCount, unsigned outputCount, NodeClass nodeClass, bool zeroed = false); + + std::vector<float> io(const std::vector<float> &inputs); + +private: + std::vector<float> m_context; +}; + +#endif // __SIMPLERN_HPP__ diff --git a/SingleMLP.cpp b/SingleMLP.cpp new file mode 100644 index 0000000..8cc90c3 --- /dev/null +++ b/SingleMLP.cpp @@ -0,0 +1,64 @@ +#include "SingleMLP.hpp" + +SingleMLP::SingleMLP(unsigned inputCount, unsigned hiddenNodeCount, unsigned outputCount, NodeClass nodeClass, bool zeroed) +: m_hiddenLayer(inputCount, hiddenNodeCount, nodeClass, zeroed), + m_outputLayer(hiddenNodeCount, outputCount, nodeClass, zeroed) +{ + m_inputCount = inputCount; + m_hiddenNodeCount = hiddenNodeCount; + m_outputCount = outputCount; + m_nodeClass = nodeClass; +} + +void SingleMLP::setChromosome(const Chromosome &chromosome) +{ + if (chromosome.size() != getChromosomeSize()) + { + return; + } + + auto l1 = chromosome.begin() + m_hiddenLayer.getChromosomeSize(); + + Chromosome chr1(chromosome.begin(), l1); + Chromosome chr2(l1, chromosome.end()); + + m_hiddenLayer.setChromosome(chr1); + m_outputLayer.setChromosome(chr2); +} + +Chromosome SingleMLP::getChromosome() const +{ + Chromosome chromosome; + + Chromosome chr1 = m_hiddenLayer.getChromosome(); + Chromosome chr2 = m_outputLayer.getChromosome(); + + chromosome.insert(chromosome.end(), chr1.begin(), chr1.end()); + chromosome.insert(chromosome.end(), chr2.begin(), chr2.end()); + + return chromosome; +} + +unsigned SingleMLP::getChromosomeSize() const +{ + unsigned chrSize = 0; + chrSize += m_hiddenLayer.getChromosomeSize(); + chrSize += m_outputLayer.getChromosomeSize(); + + return chrSize; +} + +std::vector<float> SingleMLP::io(const std::vector<float> &inputs) +{ + std::vector<float> response; + + if (inputs.size() != m_inputCount) + { + return response; + } + + response = m_hiddenLayer.io(inputs); + response = m_outputLayer.io(response); + + return response; +} diff --git a/SingleMLP.hpp b/SingleMLP.hpp new file mode 100644 index 0000000..79395f8 --- /dev/null +++ b/SingleMLP.hpp @@ -0,0 +1,22 @@ +#ifndef __SINGLEMLP_HPP__ +#define __SINGLEMLP_HPP__ + +#include "NeuralNet.hpp" + +class SingleMLP : public NeuralNet +{ +public: + SingleMLP(unsigned inputCount, unsigned hiddenNodeCount, unsigned outputCount, NodeClass nodeClass, bool zeroed = false); + + void setChromosome(const Chromosome &chromosome); + Chromosome getChromosome() const; + unsigned getChromosomeSize() const; + + virtual std::vector<float> io(const std::vector<float> &inputs); + +protected: + NodeLayer m_hiddenLayer; + NodeLayer m_outputLayer; +}; + +#endif // __SINGLEMLP_HPP__ |