net.cpp

#include <iostream>
#include <cassert>
 
#include "net.h"
#include "layer.h"
#include "neuron.h"
#include "connection.h"
 
typedef std::shared_ptr<Connection> pConnectionX;
typedef std::vector<pConnectionX> pConnection;
 
/*
It has been shown that the error surface of a backpropagation network with one hidden layer and hidden units 
has no local minima, if the network is trained with an arbitrary set containing different inputs1 (Yu, 1992).
In practice, however, other features of the error surface such as "ravines" and "plateaus" (Baldi and Hornik, 1988) 
can present difficulty for optimisation. For example, two error functions (from (Gori, 1996)) do not have local 
minima. However, the function on the left is expected to be more difficult to optimise with gradient descent. 
For the purposes of this paper, the criterion of interest considered is "the best solution found in a given 
practical time limit.
*/
 
 
Net::Net() :
discount_factor(0.4),
goal_amount(100.0),
learning_rate(0.5),
max_error_tollerance(0.1)
{
	layers.reserve(10);
}
 
 
// Example:
//   std::vector<unsigned int> myTopology;
//   myTopology.push_back(2);
//   myTopology.push_back(4);
//   myTopology.push_back(1);
//   Net myNet(myTopology);
Net::Net(const std::vector<unsigned int>& topology) :
discount_factor(0.4),
goal_amount(100.0),
learning_rate(0.5),
max_error_tollerance(0.1)
{
  assert(topology.size() > 0);
 
  discount_factor = 0.4;
  goal_amount = 100.0;
  learning_rate = 0.5;
  max_error_tollerance = 0.1;
 
  layers.reserve(topology.size());
 
	// obtain a time-based seed:
	//unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
	// using built-in random generator:
	//shuffle (topology.begin(), topology.end(), std::default_random_engine(seed));
	//auto engine = std::default_random_engine{};
	//std::shuffle(std::begin(topology), std::end(topology), engine);
	//std::random_shuffle ( topology.begin(), topology.end());
	//std::shuffle ( topology.begin(), topology.end() );
 
 
	for (unsigned int i = 0; i<topology.size(); i++)
	{
		pLayerX tmp(new Layer(topology[i]));
    tmp->setIndex(i);
    tmp->setGlobalBias(global_bias);
		layers.push_back(tmp);
	}
 
 
	std::cout << "layers size: " << layers.size() << std::endl;
  for (unsigned int i = 0; i < layers.size(); i++)
  {
    std::cout << "layers " << i << " neurons size: " << layers[i]->getSize() << std::endl;
  }
 
 
	//printOutput();
 
	// Add Bias to input and hidden layers.
	//layers[1]->addNeuron(
 
  //connectAll();
  //connectForward();
 
	//printOutput();
}
 
 
// Connects the "From" Neuron to the "To" Neuron.
void Net::connect(const std::vector< std::vector<double> > connections)
{
  //unsigned int connection_idx = 1;
  int connection_idx = 0;
 
  for (unsigned int i = 0; i < connections.size(); i++)
  {
    //for (unsigned int j = 0; j < connections[i].size(); j++)
    //{
      int layerFrom = (int)connections[i][0];
      int neuronFrom = (int)connections[i][1];
      int layerTo = (int)connections[i][2];
      int neuronTo = (int)connections[i][3];
      double _R = connections[i][4];
 
      pConnectionX tmp(new Connection(layers[layerFrom]->getNeuron(neuronFrom), layers[layerTo]->getNeuron(neuronTo)));
      tmp->setIndex(connection_idx++);
      tmp->setQ(0);
      tmp->setR(_R);
      layers[layerFrom]->getNeuron(neuronFrom)->addConnectionOut(tmp);
      layers[layerTo]->getNeuron(neuronTo)->addConnectionIn(tmp);
    //}
  }
}
 
 
// Connects the "From" Neuron to the "To" Neuron.
void Net::connect(int layerFrom, int neuronFrom, int layerTo, int neuronTo, double _R, int connection_idx)
{
  //unsigned int connection_idx = 1;
 
  pConnectionX tmp(new Connection(layers[layerFrom]->getNeuron(neuronFrom), layers[layerTo]->getNeuron(neuronTo)));
  tmp->setIndex(connection_idx);
  tmp->setQ(0);
  tmp->setR(_R);
  layers[layerFrom]->getNeuron(neuronFrom)->addConnectionOut(tmp);
  layers[layerTo]->getNeuron(neuronTo)->addConnectionIn(tmp);
}
 
 
 
// Connects all Neurons to each other.
void Net::connectAll()
{
  // assert(layer.size() > 1); // There must be more than 1 neuron to connect.
 
  int connection_idx = 0;
 
  for (unsigned int i = 0; i<layers.size(); i++) // For each Sending Layer.
  {
    for (unsigned int j = 0; j<layers[i]->getSize(); j++)  // For each neuron in Sending Layer.
    {
      for (unsigned int k = 0; k<layers.size(); k++)  // For each Receiving layer.
      {
        for (unsigned int l = 0; l < layers[k]->getSize(); l++)  // For each neuron in Receiving layer.
        {
          pConnectionX tmp(new Connection(layers[i]->getNeuron(j), layers[k]->getNeuron(l)));
          tmp->setIndex(connection_idx++);
          layers[i]->getNeuron(j)->addConnectionOut(tmp);
          layers[k]->getNeuron(l)->addConnectionIn(tmp);
 
          //std::cout << "I[" << j << "] connected to H[" << k << "] with Connection IDX=" << connection_idx-1 << std::endl;
        }
      }
    }
  }
}
 
 
// Connects all Neurons in a layer to all Neurons in the next layer.
void Net::connectForward()
{
  unsigned int connection_idx = 0;
 
  for (unsigned int i = 0; i<layers.size()-1; i++) // For each Sending Layer.
  {
    for (unsigned int j = 0; j<layers[i]->getSize(); j++)  // how many input neurons in input level.
    {
      for (unsigned int k = 0; k<layers[i+1]->getSize(); k++)  // how many neurons in next level.
      {
        pConnectionX tmp(new Connection(layers[i]->getNeuron(j), layers[i + 1]->getNeuron(k)));
        tmp->setIndex(connection_idx++);
        layers[i]->getNeuron(j)->addConnectionOut(tmp);
        layers[i + 1]->getNeuron(k)->addConnectionIn(tmp);
 
        //std::cout << "I[" << j << "] connected to H[" << k << "] with Connection IDX=" << connection_idx-1 << std::endl;
      }
    }
  }
}
 
 
// Same as connectForward() but code spread out between layers.
// Connects all Neurons to Neurons in next layer.
void Net::connectForward2()
{
  unsigned int connection_idx = 0;
 
  // Create the input to hidden connections.
  // assert(layers.size() > 1); // There must be more than 1 layers to connect.
 
  for (unsigned int i = 0; i<layers.size() - 1; i++) // Get how many Layers.
  {
    // Create the input to hidden connections.
    if (i == 0)
    {
      if ((layers.size()) > 1)  // there are other layers
      {
        for (unsigned int j = 0; j<layers[0]->getSize(); j++)  // How many input neurons in input level.
        {
          for (unsigned int k = 0; k<layers[1]->getSize(); k++)  // How many neurons in next level.
          {
            pConnectionX tmp(new Connection(layers[0]->getNeuron(j), layers[1]->getNeuron(k)));
            tmp->setIndex(connection_idx++);
            layers[0]->getNeuron(j)->addConnectionOut(tmp);
            layers[1]->getNeuron(k)->addConnectionIn(tmp);
 
            //std::cout << "I[" << j << "] connected to H[" << k << "] with Connection IDX=" << connection_idx-1 << std::endl;
          }
        }
      }
      else //  No other layers.  So no connections possible.
      {
 
      }
    }
 
 
    // Create the inside hidden connections...and hidden to output connection.
    if ((i>0) && (i <= layers.size() - 2))
    {
      for (unsigned int j = 0; j<layers[i]->getSize(); j++)  // How many input neurons.
      {
        for (unsigned int k = 0; k<layers[i + 1]->getSize(); k++)  // How many neurons in next level.
        {
          pConnectionX tmp(new Connection(layers[i]->getNeuron(j), layers[i + 1]->getNeuron(k)));
          tmp->setIndex(connection_idx++);
          layers[i]->getNeuron(j)->addConnectionOut(tmp);
          layers[i + 1]->getNeuron(k)->addConnectionIn(tmp);
 
          //std::cout << "HI[" << j << "] connected to O[" << k << "] with Connection IDX=" << connection_idx-1 << std::endl;
        }
      }
    }
  }
}
 
 
// Connects all Neurons to each other.
void Net::connectAllInLayer(const pLayerX& layer)
{
  // assert(layer.size() > 1); // There must be more than 1 neuron to connect.
 
  unsigned int connection_idx = 0;
 
  for (unsigned int i = 0; i<layer->getSize(); i++)  // For each "from" neuron in Layer.
  {
    for (unsigned int j = 0; j<layer->getSize(); j++)  // For each "to" neuron in layer.
    {
      pConnectionX tmp(new Connection(layer->getNeuron(i), layer->getNeuron(j)));
      tmp->setIndex(connection_idx++);
      layer->getNeuron(i)->addConnectionOut(tmp);
      layer->getNeuron(j)->addConnectionIn(tmp);
 
        //std::cout << "I[" << j << "] connected to H[" << k << "] with Connection IDX=" << connection_idx-1 << std::endl;
    }
  }
}
 
 
 
void Net::DQN(void)
{
  int numLayers = layers.size();
  int numNeurons = 0;
  int numConnections = 0;
 
  // Determine how many layers, neurons (states) and connections (actions) we have.
  for (unsigned int i = 0; i < layers.size(); i++) // Get how many Layers.
  {
    numNeurons += layers[i]->getSize();
 
    for (unsigned int j = 0; j < layers[i]->getSize(); j++)  // For each neuron in the Sending Layer.
    {
      pNeuronX& currentNeuron = layers[i]->getNeuron(j);  // Get current neuron.
      if (!currentNeuron)
        continue;
 
      numConnections += currentNeuron->getSizeOut();  // Get how many connections from the current neuron.
    }
  }
 
 
  // Select random initial neuron (state).
  int rnd_layer = randomBetween(0, numLayers-1);
  int rnd_state = randomBetween(0, numNeurons-1);
  pLayerX currentLayer = layers[rnd_layer];
  pNeuronX currentState = layers[rnd_layer]->getNeuron(rnd_state);
 
  // Set initial reward.
  double R = -1;
 
  // Loop until a reward matching the goal_amount has been found.
  while (R != goal_amount)
  {
    // Select one amongst all possible actions (connections) for the current state (neuron).
    // TODO: Simply using random treats all possible actions as equal.
    // TODO: Should cater for giving different actions different odds of being chosen.
    int rnd_action = randomBetween(0, currentState->getSizeOut()-1); 
    pConnectionX currentAction = currentState->getConnectionOut(rnd_action);
 
    // Action outcome is set to deterministic in this example.
    // Transition probability is 1.
    pNeuronX nextState = currentAction->getTo();
 
    // Get reward.
    R = currentAction->getR();
 
    // Get Q.
    double Q = currentAction->getQ();
 
    // Determine the maximum Q.
    double maxQ = DBL_MIN;
    for (unsigned int i = 0; i<nextState->getSizeOut(); i++)
    {
      double tmpQ = nextState->getConnectionOut(i)->getQ();
 
      if (maxQ < tmpQ)
        maxQ = tmpQ;
    }
    if (maxQ == DBL_MIN) 
      maxQ = 0;
 
    // Update the Q.
    //double v = Q + alpha * (R + discount_factor * maxQ - Q);
    //double v = Q + learning_rate * (R + discount_factor * maxQ - Q);
    double target = R + discount_factor * maxQ;
 
    //double error = R + discount_factor * maxQ - Q;
    double error = target - Q;
 
 
    // Experience Replay Memory.
    // To suggest an experience replay memory.
    // This is loosely inspired by the brain, and in particular the way it syncs memory traces in the hippocampus 
    // with the cortex. 
    // What this amounts to is that instead of performing an update and then throwing away the experience tuple, 
    // i.e. the original Q, we keep it around and effectively build up a training set of experiences. 
    // Then, we don't learn based on the new experience that comes in at time t, but instead sample random 
    // expriences from the replay memory and perform an update on each sample. 
    // This feature has the effect of removing correlations in the observed state,action,reward sequence and 
    // reduces gradual drift and forgetting.
    // If the size of the memory pool is greater than some threshold, start replacing old experiences.
    // or those further from the current Q, or randomly etc.
    int rnd_replay_memory = randomBetween(0, 100);
    if (rnd_replay_memory > 99) // if rnd > some value
    {
      //experience_add_every = 5; // number of time steps before we add another experience to replay memory.
      //experience_size = 10000;  // size of experience.
 
      // Record old Q value into array of stored memories.
      // Now select new Q value from randomly selecting one of the old Q memory values - perhaps by using odds.
      // i.e. most fresh Q value might have slightly greater chance of being selected etc.
    }
 
    // Clamping TD Error.
    // Clamp the TD Error gradient at some fixed maximum value.
    // If the error is greater in magnitude then some threshold (tderror_clamp) then we cap it at that value.
    // This makes the learning more robust to outliers and has the interpretation of using Huber loss, which 
    // is an L2 penalty in a small region around the target value and an L1 penalty further away.
//    double tderror_clamp = 1.0; // for robustness
//    if (error > tderror_clamp)
//      error = tderror_clamp;
 
    // Periodic Target Q Value Updates.
    // Periodically freeze the Q where it is.
    // Aims to reduce correlations between updates and the immediately undertaken behavior.
    // The idea is to freeze the Q network once in a while into a frozen, copied network, which is used to 
    // only compute the targets.
    // This target network is once in a while updated to the actual current.
    int rnd_freeze = randomBetween(0, 100);
    if (rnd_freeze > 99)
    {
    }
 
    double v = Q + learning_rate * (error);
    currentAction->setQ(v);
 
    // Update the state.
    currentState = nextState;
  }
}
 
 
// Determine the maximum Q for the state.
double Net::getMaxQ(pNeuronX state)
{
  double maxQ = DBL_MIN;
  for (unsigned int i = 0; i<state->getSizeOut(); i++)
  {
    double tmpQ = state->getConnectionOut(i)->getQ();
 
    if (maxQ < tmpQ)
      maxQ = tmpQ;
  }
 
  if (maxQ == DBL_MIN) 
    maxQ = 0;
 
  return maxQ;
}
 
 
// Get policy from state.
pNeuronX Net::getPolicy(pNeuronX currentState)
{
  double maxValue = DBL_MIN;
  pNeuronX policyGotoState = currentState; // Default goto self if not found.
 
  for (unsigned int i = 0; i < currentState->getSizeOut(); i++)
  {
    pNeuronX nextState = currentState->getConnectionOut(i)->getTo();
    double value = currentState->getConnectionOut(i)->getQ();
 
    if (value > maxValue)
    {
      maxValue = value;
      policyGotoState = nextState;
    }
  }
 
  return policyGotoState;
}
 
 
// Policy is maxQ(states).
void Net::showPolicy(void) 
{
  for (unsigned int i = 0; i < layers.size(); i++)
  {
    for (unsigned int j = 0; j < layers[i]->getSize(); j++)
    {
      pNeuronX fromState = layers[i]->getNeuron(j);
      int from = fromState->getIndex(); 
 
      pNeuronX toState = getPolicy(fromState);
      int to = toState->getIndex(); 
 
      std::cout << "From " << from << " goto " << to << std::endl;
    }
  }
}
 
 
// This method has a bit of a bias towards the low end if the range of rand() isn't divisible
// by highestNumber - lowestNumber + 1.
int Net::randomBetween(int lowestNumber, int highestNumber)
{
  assert(highestNumber >= lowestNumber);
 
  //return rand() % to + from;
 
  return rand() % (highestNumber - lowestNumber + 1) + lowestNumber;
}
 
 
double Net::getDiscountFactor(void)
{
  return discount_factor;
}
 
 
// Discount factor.
//
// The discount_factor parameter has a range of 0 to 1 (0 <= discount_factor > 1).
//
// If discount_factor is closer to zero, the agent will tend to consider only immediate rewards.
//
// If discount_factor is closer to one, the agent will consider future rewards with greater weight, 
// willing to delay the reward.
void Net::setDiscountFactor(const double& _discount_factor)
{
  discount_factor = _discount_factor;
}
 
 
double Net::getGoalAmount(void)
{
  return goal_amount;
}
 
 
void Net::setGoalAmount(const double& _goal_amount)
{
  this->goal_amount = _goal_amount;
}
 
 
 
// Controls how much the weights are changed during a weight update.
// The larger the value, the more the weights are changed.
// This must be a real value between 0.0 and 1.0.
// These values are commonly set from 0.5 to 0.7.
//
// The learning_rate parameter has a range of 0 to 1 (0 <= discount_factor > 1).
//
// Set this by trial and error.  That's Pretty much the best thing we have.
double Net::getLearningRate(void)
{
	return learning_rate;
}
 
 
void Net::setLearningRate(const double& learning_rate)
{
	this->learning_rate = learning_rate;
}
 
 
double Net::getMaxErrorTollerance(void)
{
	return max_error_tollerance;
}
 
 
void Net::setMaxErrorTollerance(const double& max_error_tollerance)
{
	this->max_error_tollerance = max_error_tollerance;
}
 
 
 
 
 
 
/*
void Net::load_data(char *arg)
{
int x, y;
ifstream in(arg);
if(!in)
{
cout << endl << "failed to load data file" << endl; file_loaded = 0;
return;
}
 
in >> input_array_size;
in >> hidden_array_size;
in >> output_array_size;
in >> learning_rate;
in >> number_of_input_patterns;
bias_array_size = hidden_array_size + output_array_size;
initialize_net();
for (x=0; x<bias_array_size; x++)
in >> bias[x];
for(x=0; x<input_array_size; x++)
{
for(y=0; y<hidden_array_size; y++)
in >> weight_i_h[x][y];
}
for(x=0; x<hidden_array_size; x++)
{
for(y=0; y<output_array_size; y++) in >> weight_h_o[x][y];
}
for(x=0; x<number_of_input_patterns; x++)
{
for(y=0; y<input_array_size; y++)
in >> input[x][y];
}
for(x=0; x<number_of_input_patterns; x++)
{
for(y=0; y<output_array_size; y++)
in >> target[x][y];
}
in.close();
cout << endl << "data loaded" << endl;
return;
}
 
 
 
void Net::save_data(char *argres)
{
int x, y;
ofstream out;
out.open(argres);
if(!out)
{
cout << endl << "failed to save file" << endl;
return;
}
out << input_array_size << endl;
out << hidden_array_size << endl;
out << output_array_size << endl;
out << learning_rate << endl;
out << number_of_input_patterns << endl << endl;
for(x=0; x<bias_array_size; x++)
out << bias[x] << ' ';
out << endl << endl;
for(x=0; x<input_array_size; x++)
{
for(y=0; y<hidden_array_size; y++)
out << weight_i_h[x][y] << ' ';
}
out << endl << endl;
for(x=0; x<hidden_array_size; x++)
{
for(y=0; y<output_array_size; y++) out << weight_h_o[x][y] << ' ';
}
out << endl << endl;
for(x=0; x<number_of_input_patterns; x++)
{
for(y=0; y<input_array_size; y++)
out << input[x][y] << ' ';
out << endl;
}
out << endl;
for(x=0; x<number_of_input_patterns; x++)
{
for(y=0; y<output_array_size; y++)
out << target[x][y] << ' ';
out << endl;
}
out.close();
cout << endl << "data saved" << endl;
return;
}
 
*/
 
void Net::setTest(void)
{
	layers[0]->getNeuron(0)->setValue(1);
	layers[0]->getNeuron(1)->setValue(1);
	//layers[0]->getNeuron(2)->setValue(1);
 
	layers[1]->getNeuron(0)->setValue(123);
	layers[1]->getNeuron(1)->setValue(456);
 
	layers[2]->getNeuron(0)->setValue(0);
 
	layers[0]->getNeuron(0)->getConnectionOut(0)->setWeight(0.1);
	layers[0]->getNeuron(0)->getConnectionOut(1)->setWeight(0.2);
	layers[0]->getNeuron(1)->getConnectionOut(0)->setWeight(0.3);
	layers[0]->getNeuron(1)->getConnectionOut(1)->setWeight(0.4);
	layers[1]->getNeuron(0)->getConnectionOut(0)->setWeight(0.5);
	layers[1]->getNeuron(1)->getConnectionOut(0)->setWeight(0.6);
 
	// Add connection between two neurons in same level 1
	//pConnectionX tmp(new Connection(layers[1]->getNeuron(0), layers[1]->getNeuron(1)));    
	//tmp->setIndex(100);
	//layers[1]->getNeuron(0)->addConnectionOut(tmp);
 
 
	printOutput();
 
	//layers[1]->getNeuron(0)->removeConnectionOut(0)->setWeight(0.5);
	//layers[1]->getNeuron(0)->getConnectionOut(0) = nullptr;
 
	//layers[1]->getNeuron(0)->getConnectionIn(0) = nullptr;
	//layers[0]->getNeuron(0)->getConnectionOut(0) = nullptr;
	//layers[0]->getNeuron(0)->getConnectionOut(1) = nullptr;
	//layers[0]->getNeuron(0) = nullptr;
 
	printOutput();
	std::cout << "**************************************************************" << std::endl;
}
 
 
// TODO: Only works if 3 or more layers.  Perhaps we should cater when zero hidden layers?
void Net::feedForward(const std::vector<double>& inputVals)
{
	//  std::cout << "inputVals.size=" << inputVals.size() << std::endl;
	//  std::cout << "layers[0]->getSize()=" << layers[0]->getSize() << std::endl;
 
 
	assert(layers[0]->getSize() == inputVals.size());
 
	//std::cout << "inputVals.size=" << inputVals.size() << std::endl;
 
	// Setting input vals to input layer.
	for (unsigned int i = 0; i<inputVals.size(); i++)
	{
		if (!layers[0]->getNeuron(i))
			continue;
 
		layers[0]->getNeuron(i)->setValue(inputVals[i]); // layers[0] is the input layer.
	}
 
 
	// Updating hidden layers.
	for (unsigned int i = 1; i<layers.size() - 1; i++)
	{
		layers[i]->feedForward(layers[i - 1]); // Updating the neurons output based on the neurons of the previous layer.
	}
 
 
	// Updating output layer.
	for (unsigned int i = 0; i<layers.back()->getSize(); i++) // How many neurons in the output layer
	{
		pLayerX& prevLayer = layers[layers.size() - 2];
		layers[layers.size() - 1]->feedForward(prevLayer); // Updating the neurons output based on the neurons of the previous layer.
	}
}
 
 
/*
void backward_pass(int pattern)
{
register int x, y;
register double temp = 0;
 
// COMPUTE ERRORSIGNAL FOR OUTPUT UNITS
for(x=0; x<output_array_size; x++) {
errorsignal_output[x] = (target[pattern][x] - output[pattern][x]);
}
 
// COMPUTE ERRORSIGNAL FOR HIDDEN UNITS
for(x=0; x<hidden_array_size; x++) {
for(y=0; y<output_array_size; y++) {
temp += (errorsignal_output[y] * weight_h_o[x][y]);
}
errorsignal_hidden[x] = hidden[x] * (1-hidden[x]) * temp;
temp = 0.0;
}
 
// ADJUST WEIGHTS OF CONNECTIONS FROM HIDDEN TO OUTPUT UNITS
double length = 0.0;
for (x=0; x<hidden_array_size; x++) {
length += hidden[x]*hidden[x];
}
if (length<=0.1) length = 0.1;
for(x=0; x<hidden_array_size; x++) {
for(y=0; y<output_array_size; y++) {
weight_h_o[x][y] += (learning_rate * errorsignal_output[y] *
hidden[x]/length);
}
}
 
// ADJUST BIASES OF HIDDEN UNITS
for(x=hidden_array_size; x<bias_array_size; x++) {
bias[x] += (learning_rate * errorsignal_output[x] / length);
}
 
// ADJUST WEIGHTS OF CONNECTIONS FROM INPUT TO HIDDEN UNITS
length = 0.0;
for (x=0; x<input_array_size; x++) {
length += input[pattern][x]*input[pattern][x];
}
if (length<=0.1) length = 0.1;
for(x=0; x<input_array_size; x++) {
for(y=0; y<hidden_array_size; y++) {
weight_i_h[x][y] += (learning_rate * errorsignal_hidden[y] *
input[pattern][x]/length);
}
}
 
// ADJUST BIASES FOR OUTPUT UNITS
for(x=0; x<hidden_array_size; x++) {
bias[x] += (learning_rate * errorsignal_hidden[x] / length);
}
return;
}
*/
 
void Net::backPropagate(const std::vector<double>& targetVals)
{
	// COMPUTE ERRORSIGNAL FOR OUTPUT UNITS
	pLayerX& outputLayer = layers.back();
	assert(targetVals.size() == outputLayer->getSize());
 
	// Traversing output layer.
	for (unsigned int i = 0; i<outputLayer->getSize(); i++) // For every output Neuron.
	{
		if (!outputLayer->getNeuron(i))
			continue;
 
    double outputValue = outputLayer->getNeuron(i)->getValue();
    double gradient = (targetVals[i] - outputValue) * outputValue * (1.0 - outputValue);
		outputLayer->getNeuron(i)->setGradient(gradient);
	}
 
	// COMPUTE ERRORSIGNAL FOR HIDDEN UNITS
	for (unsigned int i = layers.size() - 2; i>0; i--) // for every hidden layer
	{
		pLayerX& currentLayer = layers[i];
		pLayerX& nextLayer = layers[i + 1];
 
		for (unsigned int j = 0; j<currentLayer->getSize(); j++) // for every neuron
		{
			double temp = 0.0;
			pNeuronX& currentNeuron = layers[i]->getNeuron(j);  // current neuron.
			if (!currentNeuron)
				continue;
 
			for (unsigned int k = 0; k<currentNeuron->getSizeOut(); k++)  // for every connection in current layer.
			{
				pConnectionX &currentConnection = currentNeuron->getConnectionOut(k);
				if (!currentConnection)
					continue;
				if (!currentConnection->getTo())
					continue;
 
				int currentIndex = currentNeuron->getConnectionOut(k)->getTo()->getIndex();
 
				for (unsigned int l = 0; l<nextLayer->getSize(); l++) // for every neuron in next layer
				{
					pNeuronX& nextNeuron = nextLayer->getNeuron(l);  // next layers neuron.
					if (!nextNeuron)
						continue;
 
					int nextIndex = nextNeuron->getIndex();
 
					if (currentIndex == nextIndex)
					{
						temp += (nextNeuron->getGradient() * currentConnection->getWeight());    // output_error + weight-h-o
					}
				}
			}
 
			currentNeuron->setGradient(currentNeuron->getValue() * (1.0 - currentNeuron->getValue()) * temp);
		}
	}
 
 
	// ADJUST WEIGHTS OF CONNECTIONS FROM HIDDEN TO OUTPUT UNITS
	for (unsigned int i = 0; i<layers.size() - 1; i++)  // for every layer.
	{
		for (unsigned int j = 0; j<layers[i]->getSize(); j++)  // for every neuron.
		{
			pNeuronX& currentNeuron = layers[i]->getNeuron(j);
			if (!currentNeuron)
				continue;
			double currentValue = currentNeuron->getValue();
 
			for (unsigned int k = 0; k<layers[i]->getNeuron(j)->getSizeOut(); k++) // for every connection.
			{
				pConnectionX& currentConnection = currentNeuron->getConnectionOut(k);
				if (!currentConnection)
					continue;
				pNeuronX& nextNeuron = currentConnection->getTo();
				if (!nextNeuron)
					continue;
 
				double nextGradient = nextNeuron->getGradient();
				//double delta = 0.5 * nextGradient * currentNeuron->getValue();
        //double delta = 0.5 * nextGradient * currentValue;
        //double delta = learning_rate * nextGradient * currentValue;
        //double delta = learning_rate * nextGradient * currentValue;
        //double delta = learning_rate * nextGradient * currentValue - weight_decay;
        double delta = (learning_rate * nextGradient - weight_decay) * currentValue;
        //TODO: Potentially add a learning_rate, and weight_delay to each neuron or connection....
        //TODO:   then use something like currentConnection->getLearningRate()...
        //TODO: Can have global options and local options.
 
				//currentConnection->setWeight(currentConnection->getWeight() + delta + (0.4 * currentConnection->getDeltaWeight()));
				currentConnection->setWeight(currentConnection->getWeight() + delta + (currentConnection->getMomentum() * currentConnection->getDeltaWeight()));
				currentConnection->setDeltaWeight(delta);
			}
		}
	}
}
 
 
void Net::backPropagate2(const std::vector<double>& targetVals)
{
	pLayerX& outputLayer = layers.back();
	//assert(targetVals.size() == outputLayer.size());
	assert(targetVals.size() == outputLayer->getSize());
 
	// Starting with the output layer.
	//for (unsigned int i=0; i<outputLayer.size(); i++)
	for (unsigned int i = 0; i<outputLayer->getSize(); i++) // How many neurons in output layer.
	{
		pNeuronX& currentNeuron = outputLayer->getNeuron(i);
 
		double output = outputLayer->getNeuron(i)->getValue();
		// COMPUTE ERRORSIGNAL FOR OUTPUT UNITS
		double error = output * (1 - output) * (pow(targetVals[i] - output, 2)); // std::cout << "good4" << std::endl;
		//std::cout << "Error Output=" << error << std::endl;
 
		// ADJUST WEIGHTS OF CONNECTIONS FROM HIDDEN TO OUTPUT UNITS
		for (unsigned int j = 0; j<outputLayer->getNeuron(i)->getSizeIn(); j++)
		{
			outputLayer->getNeuron(i)->getConnectionIn(j)->setError(error); // Set error against each connection into the output layer.
			double newWeight = outputLayer->getNeuron(i)->getConnectionIn(j)->getWeight();
			newWeight += (error * outputLayer->getNeuron(i)->getValue());
			outputLayer->getNeuron(i)->getConnectionIn(j)->setWeight(newWeight); // Setting new weight of each connection into the output layer.
		}
	}
 
	for (unsigned int i = layers.size() - 2; i>0; i--) // Traversing hidden layers all the way to input layer.
	{
		pLayerX& currentLayer = layers[i];
		pLayerX& nextLayer = layers[i + 1];
 
		// Traversing current layer
		//for (unsigned int j=0; j<currentLayer.size(); j++)
		for (unsigned int j = 0; j<currentLayer->getSize(); j++)  // for every neuron in current layer.
		{
			const double& output = currentLayer->getNeuron(j)->getValue(); // get its value.
			double subSum = 0.0; // Initializing subsum.
 
			// Traversing next layer.
			for (unsigned int k = 0; k<nextLayer->getSize(); k++) // for every neuron in next layer.
			{
				double error = nextLayer->getNeuron(k)->getConnectionIn(k)->getError();
				double weight = nextLayer->getNeuron(k)->getConnectionIn(j)->getWeight();
 
 
				//subSum += pow(nextLayer.getNeuron(j).getConnectionIn(k).getError() * currentLayer.getNeuron(j).getConnectionIn(k).getWeight(),2); // Getting their backpropagated error and weight.
				subSum += pow(nextLayer->getNeuron(k)->getConnectionIn(k)->getError() * currentLayer->getNeuron(j)->getConnectionIn(k)->getWeight(), 2); // Getting their backpropagated error and weight.
				//subSum += pow(nextLayer.getNeuron(k).getConnectionIn(k).getError() * nextLayer.getNeuron(k).getConnectionIn(j).getWeight(),2); // Getting their backpropagated error and weight.
			}
 
			double error = output*(1 - output)*(subSum);
 
			for (unsigned int k = 0; k<currentLayer->getNeuron(j)->getSizeIn(); k++)
			{
				currentLayer->getNeuron(j)->getConnectionIn(k)->setError(error);
				double newWeight = currentLayer->getNeuron(j)->getConnectionIn(k)->getWeight();
				newWeight += error * output;
				currentLayer->getNeuron(j)->getConnectionIn(k)->setWeight(newWeight);
			}
		}
	}
}
 
 
 
 
//void Net::printOutput(void) const 
void Net::printOutput(void)
{
	std::cout << "***Net has [" << layers.size() << "] layers" << std::endl;
 
	for (unsigned int i = 0; i<layers.size(); i++)
	{
		pLayerX& currentLayer = layers[i];
		currentLayer->printOutput();
	}
}
 
 
void Net::printResult(void)
{
	pLayerX& outputLayer = layers.back();
 
	for (unsigned int i = 0; i<outputLayer->getSize(); i++)
	{
		std::cout << "Result=" << outputLayer->getNeuron(i)->getValue() << std::endl;
	}
}
 
 
void Net::setTarget(const std::vector<double>& targetVals)
{
 
}