13 NeuralNetwork::NeuralNetwork(
const int inputsize,
const int outputsize) :
14 AdaptiveFilter(inputsize, outputsize), alpha(0.0), mu(0.0), LocInput(
15 inputsize, 0.0), LocOutput(outputsize, 0.0), LocDesired(outputsize,
21 const double mu_,
const ttypeArray &Layersetup,
const double maxinit,
23 AdaptiveFilter(inputsize, outputsize), alpha(0.0), mu(mu_), LocInput(
24 inputsize, 0.0), LocOutput(outputsize, 0.0), LocDesired(outputsize,
36 const gplib::rvec &Desired)
38 if (Desired.size() != LocDesired.size())
40 copy(Desired.begin(), Desired.end(), LocDesired.begin());
45 void NeuralNetwork::CalcOutput(
const gplib::rvec &Input,
48 if (Input.size() != LocInput.size())
50 copy(Input.begin(), Input.end(), LocInput.begin());
52 if (Output.size() != LocOutput.size())
54 copy(LocOutput.begin(), LocOutput.end(), Output.begin());
61 for (
size_t i = 1; i < Layers.size(); ++i)
63 for (
size_t j = 0; j < Layers.at(i).size(); ++j)
64 size += Layers.at(i).at(j)->GetWeights().size();
66 gplib::rvec temp(size);
68 for (
size_t i = 1; i < Layers.size(); ++i)
70 for (
size_t j = 0; j < Layers.at(i).size(); ++j)
72 for (
size_t k = 0; k < Layers.at(i).at(j)->GetWeights().size(); ++k)
73 temp(currstart + k) = Layers.at(i).at(j)->GetWeights().at(k);
74 currstart += Layers.at(i).at(j)->GetWeights().size();
77 WeightsAsVector = temp;
78 return WeightsAsVector;
86 const size_t nlayers = typeArray.size();
87 LocOutput.assign(typeArray.back().size(), 0);
88 LocDesired.assign(typeArray.back().size(), 0);
89 for (
size_t i = 0; i < LocInput.size(); ++i)
90 CurrentLayer.push_back(boost::shared_ptr<GeneralNeuron>(
92 Layers.push_back(CurrentLayer);
94 cout <<
"Nlayers: " << nlayers << endl;
95 for (
size_t i = 0; i < nlayers; ++i)
97 for (
size_t j = 0; j < typeArray.at(i).size(); ++j)
99 CurrentLayer.push_back(boost::shared_ptr<GeneralNeuron>(
102 Layers.push_back(CurrentLayer);
103 CurrentLayer.clear();
105 for (
size_t i = 1; i < nlayers + 1; ++i)
108 const size_t prevsize = Layers.at(i - 1).size();
109 cout <<
"Previous layer size: " << prevsize << endl;
110 for (
size_t j = 0; j < prevsize; ++j)
112 inputvector.push_back(Layers.at(i - 1).at(j));
114 for (
size_t j = 0; j < Layers.at(i).size(); ++j)
116 Layers.at(i).at(j)->SetInput(inputvector);
117 Layers.at(i).at(j)->SetOldDelta().assign(prevsize, 0.0);
123 const double MaxBias)
126 for (
size_t i = 1; i < Layers.size(); ++i)
127 for (
size_t j = 0; j < Layers.at(i).size(); ++j)
129 for (
size_t k = 0; k < Layers.at(i).at(j)->GetWeights().size(); ++k)
131 Layers.at(i).at(j)->SetWeights().at(k) = Random.
GetNumber(
132 -MaxWeight, MaxWeight);
133 Layers.at(i).at(j)->SetOldDelta().at(k) = 0.0;
135 Layers.at(i).at(j)->SetBias(Random.
GetNumber(-MaxBias, MaxBias));
139 std::vector<double> &NeuralNetwork::CalcOutput()
141 gplib::rvec temp(LocOutput.size());
142 for (
size_t i = 0; i < Layers.back().size(); ++i)
144 LocOutput.at(i) = Layers.back().at(i)->GetOutput();
149 void NeuralNetwork::AdaptWeights()
151 tNeuralArray::iterator previouslayer = (Layers.end() - 2);
152 tNeuralArray::iterator currlayer = (Layers.end() - 1);
153 double correction = 0.0;
154 for (
size_t i = 0; i < previouslayer->size(); ++i)
155 previouslayer->at(i)->SetDelta(0.0);
156 for (
size_t j = 0; j < currlayer->size(); ++j)
159 currlayer->at(j)->SetDelta(Layers.back().at(j)->CalcDeriv(
160 Layers.back().at(j)->GetNet()) * (LocDesired.at(j)
163 for (
size_t i = 0; i < currlayer->at(j)->GetWeights().size(); ++i)
165 currlayer->at(j)->GetInput().at(i)->SetDelta(
166 currlayer->at(j)->GetInput().at(i)->GetDelta()
167 + currlayer->at(j)->GetWeights().at(i) * currlayer->at(
170 correction = alpha * currlayer->at(j)->GetOldDelta().at(i) + mu
171 * (previouslayer->at(i)->GetLastOutput())
172 * Layers.back().at(j)->GetDelta();
173 currlayer->at(j)->SetWeights().at(i) += correction;
174 currlayer->at(j)->SetOldDelta().at(i) = correction;
176 currlayer->at(j)->SetBias(Layers.back().at(j)->GetBias() + mu
177 * Layers.back().at(j)->GetDelta());
180 for (
size_t i = Layers.size() - 2; i >= 1; --i)
183 for (
size_t j = 0; j < Layers.at(i - 1).size(); ++j)
184 Layers.at(i - 1).at(j)->SetDelta(0.0);
186 for (tNeuralLayer::iterator currneuron = Layers.at(i).begin(); currneuron
187 < Layers.at(i).end(); ++currneuron)
190 currneuron->get()->SetDelta(currneuron->get()->GetDelta()
191 * currneuron->get()->CalcDeriv(currneuron->get()->GetNet()));
193 for (
size_t k = 0; k < currneuron->get()->GetWeights().size(); ++k)
196 currneuron->get()->GetInput().at(k)->SetDelta(
197 currneuron->get()->GetInput().at(k)->GetDelta()
198 + currneuron->get()->GetWeights().at(k)
199 * currneuron->get()->GetDelta());
202 = alpha * currneuron->get()->GetOldDelta().at(k)
204 * currneuron->get()->GetInput().at(k)->GetLastOutput()
205 * currneuron->get()->GetDelta();
207 currneuron->get()->SetWeights().at(k) += correction;
209 currneuron->get()->SetOldDelta().at(k) = correction;
212 currneuron->get()->SetBias(currneuron->get()->GetBias() + mu
213 * currneuron->get()->GetDelta());
221 for (
size_t i = 1; i < Layers.size(); ++i)
223 for (
size_t j = 0; j < Layers.at(i).size(); ++j)
225 copy(Layers.at(i).at(j)->GetWeights().begin(), Layers.at(i).at(
226 j)->GetWeights().end(), ostream_iterator<double> (output,
236 const double maxpower = std::abs(*max_element(WeightVector.begin(),
237 WeightVector.end(), gplib::absLess<double, double>()));
238 std::ofstream output(filename.c_str());
239 output <<
"digraph network {" << endl;
240 for (
size_t i = 0; i < Layers.front().size(); ++i)
242 output <<
"node [shape=point];" << std::endl;
243 output <<
"i" << i <<
" -> input" << i <<
";" << std::endl;
244 output <<
"input" << i <<
" [shape=circle];" << std::endl;
245 for (
size_t j = 0; j < Layers.at(1).size(); ++j)
247 output <<
"input" << i <<
" -> n1" << j;
249 output <<
"[color =\" 0.7 1.0 " << 0.1 + std::abs(
250 Layers.at(1).at(j)->GetWeights().at(i)) / maxpower
252 output <<
";" << std::endl;
255 for (
size_t i = 1; i < Layers.size() - 1; ++i)
257 output <<
"{ rank=same;" << std::endl;
258 for (
size_t j = 0; j < Layers.at(i).size(); ++j)
260 output <<
"n" << i << j <<
" [shape=circle];" << std::endl;
267 output <<
"};" << std::endl;
268 for (
size_t j = 0; j < Layers.at(i).size(); ++j)
269 for (
size_t k = 0; k < Layers.at(i + 1).size(); ++k)
271 output <<
"n" << i << j <<
" -> n" << i + 1 << k;
273 output <<
"[color =\" 0.7 1.0 " << 0.1 + std::abs(
274 Layers.at(i + 1).at(k)->GetWeights().at(j)) / maxpower
276 output <<
";" << std::endl;
279 output <<
"{ rank=same;" << std::endl;
280 for (
size_t i = 0; i < Layers.back().size(); ++i)
282 output <<
"n" << Layers.size() - 1 << i <<
" [shape=circle];"
289 output <<
"};" << std::endl;
290 for (
size_t i = 0; i < Layers.back().size(); ++i)
292 output <<
"output" << i <<
" [shape=point];" << std::endl;
293 for (
size_t j = 0; j < Layers.at(Layers.size() - 1).
size(); ++j)
295 output <<
"n" << Layers.size() - 1 << j <<
" -> output" << i
void InitWeights(const double MaxWeight, const double MaxBias)
Initialize the weights with random values with the specified maxima.
std::vector< boost::shared_ptr< GeneralNeuron > > tNeuralLayer
void SetLayers(ttypeArray typeArray, bool cachedoutput=false)
Configure the layers of the network according to the types in typeArray.
void SetEpsilon(const gplib::rvec &MyEps)
Possibility for derived classes to set estimation error.
virtual void PrintWeights(std::ostream &output)
Print the weights of the network to the specified output stream.
std::vector< ttypeVector > ttypeArray
virtual void AdaptFilter(const gplib::rvec &Input, const gplib::rvec &Desired)
Adapt the Filter with the current input and desired.
A generic base class for all types of adaptive filters.
const gplib::rvec & GetFilterOutput() const
Access to the last calculated output (not sure if needed)
NeuralNetwork(const int inputsize, const int outputsize)
The minium values for the network are the length of the input and output.
virtual const gplib::rvec & GetWeightsAsVector()
Return the network weights as a single vector.
void SetOutput(const gplib::rvec &Out)
Possibility for derived classes to set output.
SigmoidalNeuron implements the main functionality of neurons in a neural network. ...
void PrintTopology(std::string filename)
Print the topology and weights of the network for plotting with the dot program.
std::vector< boost::shared_ptr< GeneralNeuron > > tinvector
The basic exception class for all errors that arise in gplib.