Ejemplo n.º 1
0
	void DynamicsPlotterUtil::transferNeuronActivationToOutput(const QList<Neuron*> neurons) {

		//Set the output of every neuron to the transferred activation
		for(QListIterator<Neuron*> i(neurons); i.hasNext();) {
			Neuron *neuron = i.next();
			neuron->getOutputActivationValue().set(
				neuron->getTransferFunction()->transferActivation(
					neuron->getActivationValue().get(), neuron));
		}
	}
void Neuron::makeSynapses(void){
   Neuron* n = new Neuron(4345);
   cout << "index of postSyn is: " << n->getIndex() << endl;
   
   Synapse syn1(1, n);
   axons.push_back(syn1);
   Synapse syn2(2, n);
   axons.push_back(syn2);
   Synapse syn3(3, n);
   axons.push_back(syn3);
   return;
}
Ejemplo n.º 3
0
    Value NeuralNet::neuron_output(const Neuron& n, const ValueListPtr v) const
    {
        Neuron::const_iterator it;
        ValueList::const_iterator vit;
        Value retval = 0.0f;

        for(it = n.begin(), vit = v->begin(); it != n.end() && vit != v->end(); it++, vit++)
        {
            retval += (*it) * (*vit);
        }
        // std::cout << retval << std::endl;
        return retval;
    }
Ejemplo n.º 4
0
// Back propagate the errors to update the weights.
void NeuralNet::backPropagate(vector<double>* outputs, int teacher) {
  Layer *outputLayer = (*layers)[numHiddenLayers + 1];
  for (int i = 0; i < outputs->size(); i++) {
    Neuron *n = outputLayer->getNeuron(i);
    double adjusted = -n->getValue();
    if (i == teacher) {
      adjusted += 1;
    }
    n->setDelta(sigmoidPrime(n->getActivation()) * adjusted);
  }

  // Propagate deltas backward from output layer to input layer.
  for (int l = numHiddenLayers; l >= 0; l--) {
    Layer *curr = (*layers)[l], *downstream = (*layers)[l+1];

    for (int i = 0; i < curr->neuronCount(); i++) {
      double sum = 0;
      Neuron *n = curr->getNeuron(i);
      for (int j = 0; j < downstream->neuronCount(); j++) {
        sum += downstream->getNeuron(j)->getWeight(i)
            * downstream->getNeuron(j)->getDelta();
      }
      n->setDelta(sigmoidPrime(n->getActivation()) * sum);
      for (int j = 0; j < downstream->neuronCount(); j++) {
        downstream->getNeuron(j)->updateWeight(i,
            learningRate * sigmoid(n->getActivation())
            * downstream->getNeuron(j)->getDelta());
      }
    }
  }
}
Ejemplo n.º 5
0
/**
 * Returns the strength of the owner synapse.
 * 
 * @param owner the owner of this SynapseFunction.
 * @return the strength of the owner.
 */
double ExampleSynapseFunction::calculate(Synapse *owner) {
	if(owner == 0) {
		return 0.0;
	}
	
	//Search for target neuron (if existing)
	Neuron *target = dynamic_cast<Neuron*>(owner->getTarget());
	if(target == 0) {
		return owner->getStrengthValue().get();
	}
	
	//Update history
	mHistory.enqueue(target->getLastActivation());
	while(!mHistory.empty() && mHistory.size() > mHistorySize->get()) {
		mHistory.dequeue();
	}
	
	
	//Calcualte average
	double activationSum = 0.0;
	double average = 0.0;
	for(QListIterator<double> i(mHistory); i.hasNext();) {
		activationSum += i.next();
	}
	if(mHistory.size() > 0) {
		average = (activationSum / ((double) mHistory.size()));
	}
	else {
		average = 0.0;
	}
	mCurrentAverage->set(average);
	
	//Adapt synapse if required.
	Neuron *source = owner->getSource();
	if(source != 0) {
		double output = source->getLastOutputActivation();
		if(output != 0) {
			double change = 0.0;
			if(average > mDesiredActivationRange->getMax()) {
				change = (average - mDesiredActivationRange->getMax()) * output * -1.0;
			}
			else if(average < mDesiredActivationRange->getMin()) {
				change = (mDesiredActivationRange->getMin() - average) * output;
			}
			owner->getStrengthValue().set(owner->getStrengthValue().get() + (change * mChangeRate->get()));
		}
	}
	
	return owner->getStrengthValue().get();
}
Ejemplo n.º 6
0
void testCopy()
{
#ifdef USE_MATRIX
  RNN_MATRIX(rnnOrig);
  RNN_VECTOR(rnnCopy);
#endif
#ifdef USE_VECTOR
  RNN_MATRIX(rnnOrig);
  RNN_VECTOR(rnnCopy);
#endif
  for(int i = 0; i < 1000; i++)
  {
    Neuron *n = rnnOrig->createNeuron();
    if(drand48() < 0.5)
    {
      n->setTransferfunction(transferfunction_tanh);
    }
    else
    {
      n->setTransferfunction(transferfunction_id);
    }
  }
  for(int j = 0; j < 999; j++) // connect every neuron to neuron 0
  {
    __REAL w = ((drand48() < 0.5)?-1:1) * (10 * drand48() + 0.1);
    rnnOrig->createSynapse(rnnOrig->getNeuron(j), rnnOrig->getNeuron(j+1), w);
  }
  for(int j = 0; j < 10000; j++) // random synapses
  {
    __REAL w = ((drand48() < 0.5)?-1:1) * (10 * drand48() + 0.1);
    int source = (int)(drand48() * 100 + 0.5);
    int destination = (int)(drand48() * 100 + 0.5);
    // TODO: check why this is required for the test not to fail
    while(source != destination - 1)
    {
      source = (int)(drand48() * 100 + 0.5);
      destination = (int)(drand48() * 100 + 0.5);
    }
    rnnOrig->createSynapse(rnnOrig->getNeuron(source),
        rnnOrig->getNeuron(destination),
        w);
  }
  startTiming();
  rnnCopy->copy(rnnOrig);
  unsigned long time = stopTiming();
  cout << "RecurrentNeuralNetwork copy:\t\t\t\t\t";
  printTime(time);
  delete rnnOrig;
  delete rnnCopy;
}
void RungeKutta::calcMeanMembranePotential(Neuron& n_old, Neuron& n_new, double time, double timestep) {
    if (n_old.getType() == BRAINSTEM) return;
    
    double m = n_old.getM();
    double tau = n_old.getWeights()[0];
    double sigma = addWeightedNeighbors(n_old);
    double k1 = calcDerivative(time, m, sigma, tau);
    double k2 = calcDerivative(time + .5*timestep, m + (timestep/2)*k1, sigma, tau);
    double k3 = calcDerivative(time + .5*timestep, m + (timestep/2)*k2, sigma, tau);
    double k4 = calcDerivative(time + timestep, m + timestep*k3, sigma, tau);
    
    double newVal = m + (1.0/6)*timestep*(k1 + 2*k2 + 2*k3 + k4);
    n_new.setM(newVal);
    
}
int main() {
  ScopedPass pass("Evolver [single-bit adder]");
  
  Network network;
  Evolver evolver(network);
  Neuron * input0 = new OrNeuron();
  Neuron * input1 = new OrNeuron();
  Neuron * output = new OrNeuron();
  network.AddNeuron(*input0);
  network.AddNeuron(*input1);
  network.AddNeuron(*output);
  
  while (true) {
    bool flag0 = RandomBool();
    bool flag1 = RandomBool();
    if (flag0) input0->Fire();
    else input0->Inhibit();
    if (flag1) input1->Fire();
    else input1->Inhibit();
    bool gotResponse = false;
    bool expectingResponse = (flag0 && !flag1) || (flag1 && !flag0);
    // Give it ten cycles to respond with the answer
    for (int i = 0; i < 10; ++i) {
      network.Cycle();
      if (output->IsFiring()) {
        gotResponse = true;
      }
      evolver.Prune();
      evolver.Grow();
    }
    if (gotResponse != expectingResponse) {
      std::cout << "wrong!" << std::endl;
      network.UpdateLivesWithPain(0.5);
    } else {
      std::cout << "right!" << std::endl;
    }
    // Give the network some time to flush out its activity
    for (int i = 0; i < 5; ++i) {
      network.Cycle();
      evolver.Prune();
      evolver.Grow();
    }
  }
  
  return 0;
}
Ejemplo n.º 9
0
bool NeuralNetwork::AreNeuronsConnected(const Neuron& lhs,const Neuron & rhs) const {
	for (auto& connection : rhs.GetConnections()) {
		if (!connection.outGoing && &lhs == connection.neuron) {
			return true;
		}
	}
	return false;
}
Ejemplo n.º 10
0
float RecSOM::train_one_sample(vector<float> sample, int nf) {
	Neuron*		w     = NULL;
  float     qerr  = 0.0;

	input_layer = &sample[0];

	w = activate_find_winner(output_layer);
	qerr = w->recursive_distance(alpha, beta, input_layer, context_layer);

	for(int i = 0; i < map_dim_x*map_dim_y; i++) {
		map_layer[i]->update_weights(gama, w, input_layer, context_layer, sigma, nf);
	}
  
  update_context(output_layer);

	return qerr;
}
void SelfOrganizingMaps::evaluateIndependentVector(vector<double> inputVector){
	Neuron *bmu = getBMU(inputVector);
	_matrix->getNeuron(bmu->getX(), bmu->getY())->setNeuronColor(0,0,0);
/*
	cout << "A _bmuTestCases size = " << _bmuTestCases.size() << endl;
	_bmuTestCases.insert ( pair<Neuron *,Neuron *>(getBMU(inputVector),getBMU(inputVector)));
	cout << "B _bmuTestCases size = " << _bmuTestCases.size() << endl;
	cout << "Input Vector:" << endl;
	cout << inputVector[0] << " " << inputVector[1] << " " << inputVector[2] << endl;
	cout << "BMU:" << endl;
	bmu->info();
	
	double distanceToBMU = bmu->distanceToInputVector(inputVector);
	cout << "BMU distance to input vector: " << distanceToBMU << endl;
	int bmuX = bmu->getX();
	int bmuY = bmu->getY();

	if(((bmuX + 1 < _size) && (bmuX - 1 >= 0)) && ((bmuY + 1 < _size) && (bmuY - 1 >= 0))){
		Neuron *upLeft = _matrix->getNeuron(bmuX - 1, bmuY - 1);
		Neuron *up = _matrix->getNeuron(bmuX, bmuY - 1);
		Neuron *upRight = _matrix->getNeuron(bmuX + 1, bmuY - 1);
		Neuron *left = _matrix->getNeuron(bmuX - 1, bmuY);
		Neuron *right = _matrix->getNeuron(bmuX + 1, bmuY);
		Neuron *downLeft = _matrix->getNeuron(bmuX - 1, bmuY + 1);
		Neuron *down = _matrix->getNeuron(bmuX, bmuY + 1);
		Neuron *downRight = _matrix->getNeuron(bmuX + 1, bmuY + 1);

		double distUpLeft =  upLeft->distanceToInputVector(inputVector);
		double distUp = up->distanceToInputVector(inputVector);
		double distUpRight = upRight->distanceToInputVector(inputVector);
		double distLeft = left->distanceToInputVector(inputVector);
		double distRight = right->distanceToInputVector(inputVector);
		double distDownLeft = downLeft->distanceToInputVector(inputVector);
		double distDown = down->distanceToInputVector(inputVector);
		double distDownRight = downRight->distanceToInputVector(inputVector);

		cout << "Distances to side Neurons of the BMU" << endl;
		cout << distUpLeft << " " << distUp << " " << distUpRight << endl;
		cout << distLeft << "--" << distanceToBMU << "-- " << distRight << endl;
		cout << distDownLeft << " " << distDown << " " << distDownRight << endl; 
	}else{
		cout << "The BMU is in the borders" << endl;
	}
*/
}
Ejemplo n.º 12
0
 void train(std::array<T,N> const & x, T const & target, float learingRate) {
   output.error = target - (*this)(x);
   for (int i=0; i<M; ++i) middle.neurons[i].error =  output.w[i]*output.error;
   for (int i=0; i<M; ++i) input.neurons[i].error = vzero;
   for (int j=0; j<M; ++j) for (int i=0; i<M; ++i) input.neurons[j].error += middle.neurons[i].w[j]*middle.neurons[i].error;
   input.updateWeight(learingRate);
   middle.updateWeight(learingRate);
   output.updateWeight(learingRate);
 }
Ejemplo n.º 13
0
int main(int argc, char **argv) {

  struct arguments args;
  /* Default values. */
  // arguments.flag = true;
  // arguments.value = 0;

  argp_parse (&argp, argc, argv, 0, 0, &args);

  Neuron* neuronita = new Neuron(args.args[0]);

  for(int i = 0; i < neuronita->axon.size(); i++)
    flattenNeuronSegment(neuronita->axon[i]);
  for(int i = 0; i < neuronita->dendrites.size(); i++)
    flattenNeuronSegment(neuronita->dendrites[i]);

  neuronita->save(args.args[1]);
}
Ejemplo n.º 14
0
void Neuron :: collectInputs() {
	double collectedOut=0;
	vector<Edge*> input = getInputEdges();
	for(int i=0; i<input.size(); i++) {
        Neuron* start = input[i]->getStart();
		if(start == NULL){
            double w = input[i]->getWeight();
            collectedOut+=-1*w;
        }
        else{
            double w = input[i]->getWeight();
            double out = start->getOutput();
            collectedOut+=w*out;
        }
	}

	output = collectedOut;
    updateOutput();
}
Ejemplo n.º 15
0
 void print(void){ //print out synapse stats
    clog << "Syanapse connects to ";
    if(postSyn == NULL){ //this is to prevent crash by segfault
       clog << "a NULL neuron ";
    } else {
       clog << "neuron " << postSyn->getIndex();
    }
    clog << " with weight " << weight << endl;
    return;
 }
Ejemplo n.º 16
0
vector<double> NeuralNetwork::get_output(vector<double> input) {
    queue<Neuron*> open_queue;
    set<Neuron*> closed_set;
    
    for (int i = 0; i < input_neurons_.size(); i++) {
        input_neurons_[i].output_ = input[i];
        
        for (int j = 0; j < input_neurons_[i].outputs_.size(); j++) {
            open_queue.push(input_neurons_[i].outputs_[j]);
            closed_set.insert(input_neurons_[i].outputs_[j]);
        }
    }
    
    for (int i = 0; i < output_neurons_.size(); i++) {
        closed_set.insert(&output_neurons_[i]);
    
        output_neurons_[i].output_ = 0.0;
    }
    
    while (!open_queue.empty()) {
        Neuron* neuron = open_queue.front();
        
        open_queue.pop();
        
        neuron->get_output();
        
        for (int i = 0; i < neuron->outputs_.size(); i++) {
            if (closed_set.find(neuron->outputs_[i]) == closed_set.end()) {
                open_queue.push(neuron->outputs_[i]);
                closed_set.insert(neuron->outputs_[i]);
            }
        }
    }
    
    vector<double> output(output_neurons_.size());
    for (int i = 0; i < output.size(); i++) {
        output_neurons_[i].get_output();
    
        output[i] = output_neurons_[i].output_;
    }
    
    return output;
}
Ejemplo n.º 17
0
int main(int argc, char *argv[]) {

    if(argc < 1) {
        cout << "Usage: ./lab6 <epoch cnt> <inFile>" << endl;
        return 0;
    }
    //Read iris data into 2D vector
    vector <vector <string> > dataSet;
    ifstream inFile;
    inFile.open(argv[1]);
    while(inFile) {
        string temp;
        if(!getline( inFile, temp)) break; //error
        istringstream ss( temp );
        vector<string> tmp;
        while( ss ) {
            string s;
            if(!getline( ss, s, ',' )) break; //error
            tmp.push_back(s);
        }
        dataSet.push_back(tmp);
    }
    inFile.close();
    //Convert 2D string vector into doubles, split off the Yd
    vector< vector<double> > input(dataSet.size());
    vector<double> Yd;
    for( int i = 0; i < dataSet.size(); i++) {
        Yd.push_back(stod(dataSet[i][dataSet[i].size()-1]));
        for( int j = 0; j < dataSet[i].size()-1; j++) {
            input[i].push_back(stod(dataSet[i][j]));
        }
    }

    //Create neuron, test initial weights, train, test again
    Neuron* myNeuron = new Neuron();
    myNeuron->initializeWeights(4);
    test(myNeuron, input, Yd);
    train(myNeuron, input, Yd);
    test(myNeuron, input, Yd);

    return 0;
}
Ejemplo n.º 18
0
void Net::load(ifstream & loadStream) {
	clear();

	name = loadString(loadStream);
	loadStream.read((char*)&data, sizeof(NetData));

	//load private data
	loadStream.read((char*)(&neuronIndex), sizeof(int));
	loadStream.read((char*)(&synapseIndex), sizeof(int));

	//load neurons
	int count;
	loadStream.read((char*)(&count), sizeof(int));
	while(count>0){
		Neuron * newNode = new Neuron;
		newNode->load(loadStream);
		neuron.push_back(newNode);
		count--;
	}

	//load synapses
	loadStream.read((char*)(&count), sizeof(int));
	while(count>0){
		Synapse newSynapse;
		loadStream.read((char*)(&newSynapse.index), sizeof(int));
		loadStream.read((char*)(&newSynapse.weight), sizeof(float));
		loadStream.read((char*)(&newSynapse.potentiation), sizeof(float));
		loadStream.read((char*)(&newSynapse.fire_frequency), sizeof(char));
		loadStream.read((char*)(&newSynapse.fire_timer), sizeof(char));

		int idx;
		loadStream.read((char*)(&idx), sizeof(int));
		newSynapse.from = getNeuron(idx);
		loadStream.read((char*)(&idx), sizeof(int));
		newSynapse.to = getNeuron(idx);

		//cout<<"loading synapse #"<<newSynapse.index<<endl;
		synapse.push_back(newSynapse);
		count--;
	}

}
Ejemplo n.º 19
0
int main(int argc, char **argv) {

  struct arguments args;
  args.theta_file = "";
  args.phi_file = "";
  args.scale_file = "";
  args.cube_name = "";
  args.neuron_name = "";
  args.output_name = "output";
  args.min_width = 0;
  args.renderScale = 1;

  argp_parse(&argp, argc, argv, 0, 0, &args);

  printf("The arguments are:\n  cube_name: %s\n  neuron_name: %s\n"
         "  output_name: %s\n  theta_file: %s\n  phi_file: %s\n  scale_name: %s\n"
         "  min_width: %f\n renderScale = %f\n",
         args.cube_name.c_str(), args.neuron_name.c_str(), args.output_name.c_str(),
         args.theta_file.c_str(), args.phi_file.c_str(), args.scale_file.c_str(),
         args.min_width, args.renderScale);
  // exit(0);

  Neuron* neuron = new Neuron(args.neuron_name);

  Cube<uchar, ulong>* orig = new Cube<uchar,ulong>(args.cube_name);
  Cube<uchar,ulong>* rendered = orig->create_blank_cube_uchar(args.output_name);

  Cube<float, double>* theta = NULL;
  Cube<float, double>* phi   = NULL;
  Cube<float, double>* scale = NULL;

  if(args.theta_file != "")
    theta = orig->create_blank_cube(args.theta_file);
  if(args.phi_file != "")
    phi = orig->create_blank_cube(args.phi_file);
  if(args.scale_file != "")
    scale = orig->create_blank_cube(args.scale_file);

  neuron->renderInCube(rendered, theta, phi, scale, args.min_width, args.renderScale);
  // rendered->put_all(255);
  // neuron->renderSegmentInCube(neuron->dendrites[0],rendered, theta, phi, scale, args.min_width, args.renderScale);
}
Ejemplo n.º 20
0
void Layer::addNeuron(int option, transfert trsf, double mu) //NOUVEAU NEURONE !!!
{
	Neuron* neuron = new Neuron(this, trsf, mu);
	double weight = 0.0 ;

	if (!isLast()) //on établit des liens avec les neurones de la couche d'après
	{ //que s'ils existent
		for (int i = 0; i < getNextLayer()->getSize(); i++)
		{
			if(option == RAND){
				weight = rand() % 1000;
				weight = (weight - 500) / 500;
			}
			if(option == ZERO)
				weight = 0.0;
			//weight est maintenant un nombre aléatoire entre -1 et 1
			//std::cout << weight << std::endl;
			Binding* bdg = new Binding(neuron, weight);
			getNextLayer()->getNeuron(i)->addBinding(bdg); //le neuron ajouté à
			//la couche n aura un tableau de weight de taille égale au nombre de
			//neurones de la couche n+1
		}
	}
	if (!isFirst()) //on établit des liens avec les neurones de a couche d'avant
	{ //que s'ils existent
		for (int i = 0; i < getPreviousLayer()->getSize(); i++)
		{
			if(option == RAND){
				weight = rand() % 1000;
				weight = (weight - 500) / 500;
			}
			if(option == ZERO)
				weight = 0.0;
			//weight est maintenant un nombre aléatoire entre -1 et 1
			//std::cout << weight << std::endl;
			Binding* bdg = new Binding(getPreviousLayer()->getNeuron(i), weight);
			neuron->addBinding(bdg);
		}
	}
	m_neurons.push_back(neuron);
}
Ejemplo n.º 21
0
NeuralNetwork::NeuralNetwork(NeuralNetwork* n)
{
   m_numInputNeurons = n->m_numInputNeurons;
   m_numHiddenNeurons = n->m_numHiddenNeurons;   
   m_numOutputNeurons = n->m_numOutputNeurons;

   m_numHiddenLayers = n->m_numHiddenLayers;

   CreateNeurons();
   ConnectNeurons();

   for(int layer = 0; layer < m_numHiddenLayers; layer++)
   {
      for(int row = 0; row < Network[layer].size(); row++)
      {
         Neuron* neuronToModify = GetNeuron(layer, row);
         Neuron* neuronToCopy   = n->GetNeuron(layer, row);
         neuronToModify->SetOutputWeights(neuronToCopy->GetOutputWeights());
      }
   }
}
Ejemplo n.º 22
0
double			Network::getSigmaSum(Neuron &neuron)
{
  std::list<t_connection *>::iterator	it;
  std::list<t_connection *>::iterator	end;
  std::list<t_connection *>             &connections = neuron.getConnections();
  double				sigma = 0;
  
  end = connections.end();
  for (it = connections.begin(); it != end; ++it)
    sigma += ((*it)->neuron.getLastSigma() * (*it)->connnectionWeight);
  return sigma;
}
Ejemplo n.º 23
0
        static FunctionWithParameter<double, 1, 1, 2>* createLayer(NeuronE nType, int nrInputs, int nrOutputs)
            {
                Neuron<eNeuronTanh> neuronTanh;
                Neuron<eNeuronSigmoid> neuronSigmoid;
                Neuron<eNeuronLinear> neuronLinear;

                NeuronLayerTanhT *pTanhLayer;
                NeuronLayerSigmoidT *pSigmoidLayer;
                NeuronLayerLinearT *pLinearLayer;

                vector<double> weights;
                Vector::setUniform(weights,nrInputs+1,-1,1);
                switch (nType)
                {
                case eNeuronTanh:
                    neuronTanh.setWeights(weights);
                    pTanhLayer = new NeuronLayerTanhT(nrOutputs, neuronTanh);
                    return pTanhLayer;
                    break;
                case eNeuronSigmoid:
                    neuronSigmoid.setWeights(weights);
                    pSigmoidLayer = new NeuronLayerSigmoidT(nrOutputs, neuronSigmoid);
                    return pSigmoidLayer;
                    break;
                case eNeuronLinear:
                    neuronLinear.setWeights(weights);
                    pLinearLayer = new NeuronLayerLinearT(nrOutputs, neuronLinear);
                    return pLinearLayer;
                    break;
                default:
                    cerr << "Unknown neuron type" << endl;
                    return NULL;
                }
            }
	/**
	 * Returns the strength of the owner synapse.
	 * 
	 * @param owner the owner of this SynapseFunction.
	 * @return the strength of the owner.
	 */
	double CloneSimpleSynapseFunction::calculate(Synapse *owner) {
		SimpleSynapseFunction::calculate(owner);
		if(owner == 0) {
			return 0.0;
		}
		if(mTargetId->get() == 0) {
			//per default set the id to the own synapse.
			mTargetId->set(owner->getId());
		}
		if(mLastKnownTargetId != mTargetId->get()) {
			mTargetSynapse = 0;
		}
		mLastKnownTargetId = mTargetId->get();
		if(mTargetId->get() == owner->getId()) {
			mTargetSynapse = owner;
		}
		else {
			Neuron *neuron = owner->getSource();
			QList<Synapse*> synapses;
			if(neuron != 0) {
				NeuralNetwork *network = neuron->getOwnerNetwork();
				if(network != 0) {
					synapses = network->getSynapses();
				}
			}
			if(mTargetSynapse == 0) {
				mTargetSynapse = NeuralNetwork::selectSynapseById(mTargetId->get(), synapses);
			}
			if(mTargetSynapse != 0) {
				if(!synapses.contains(mTargetSynapse)) {
					mTargetSynapse = 0;
				}
			}
			if(mTargetSynapse != 0) {
				owner->getStrengthValue().set(mTargetSynapse->getStrengthValue().get());
			}
		}
		
		return SimpleSynapseFunction::calculate(owner);
	}
Ejemplo n.º 25
0
Archivo: main.cpp Proyecto: Ludusamo/AI
void train(std::vector<Data> knowndata, Neuron &neuron) {
	int currentTrainingIndex = 0;
	int dataSize = knowndata.size();

	for (int numLeft = dataSize; numLeft != 0; numLeft--) {
		if (DEBUG) {
			std::cout << "===============================================" << std::endl;
			std::cout << "Currently Training: " << currentTrainingIndex << std::endl;
		}
		Data curr = knowndata[currentTrainingIndex];
		int classificationGuess = neuron.classify(curr.input);
		if (classificationGuess != curr.classification) {
			// Reweight based on data
			neuron.recalculateWeights(learningRate, classificationGuess, curr.classification, curr.input);
			numLeft = dataSize + 1; // Resets so that it cycles through to recheck everything
		}

		if (currentTrainingIndex < dataSize - 1) currentTrainingIndex++;
		else currentTrainingIndex = 0;
		std::cout << std::endl;
	}
}
double RungeKutta::addWeightedNeighbors(Neuron& n) {
    double sum = 0.0;
    vector<double> weights = n.getWeights();
    
    for (int i = 0; i < m_cur_network->size(); i++) {
        // there is a weight offset of 2 because the metadata we are
        // interested in starts 2 into the array
        // 0th and 1st are Tau and B
        sum += weights[i + WEIGHT_OFFSET] * (*m_cur_network)[i].getX();
    }
    
    return sum;
}
bool ChangeActivationFunctionCommand::undoCommand() {
	if(mNewActivationFunction == 0 || mVisualizationContext == 0) 
	{
		return false;
	}	
	
	bool allowUndo = true;

	QMutexLocker guard(mVisualizationContext->getSelectionMutex());

	for(int i = 0; i < mNeurons.size() && i < mOldActivationFunctions.size(); ++i) {
		Neuron *neuron = mNeurons.at(i);
		ActivationFunction *af = mOldActivationFunctions.at(i);
		if(neuron == 0 || af == 0) {
			allowUndo = false;
			continue;
		}
		mNewActivationFunctions.append(neuron->getActivationFunction());
		neuron->setActivationFunction(af);
		neuron->getActivationFunction()->reset(neuron);
	}
	
	mOldActivationFunctions.clear();
	
// 	for(int i = 0; i < mNeurons.size() && i < mOldActivationFunctions.size(); ++i) {
// 		Neuron *neuron = mNeurons.at(i);
// 		ActivationFunction *af = mOldActivationFunctions.at(i);
// 		if(neuron == 0 || af == 0) {
// 			continue;
// 		}
// 		neuron->setActivationFunction(*af);
// 	}
	
	Neuro::getNeuralNetworkManager()->triggerNetworkStructureChangedEvent();
	//mVisualizationContext->notifyNeuralNetworkModified();

	return allowUndo;
}
Ejemplo n.º 28
0
void			Network::adjustLayerConnectionWeight(double learning_ratio, Neuron &neuron)
{
  std::list<t_connection *>::iterator	it;
  std::list<t_connection *>::iterator	end;
  std::list<t_connection *>		&connections = neuron.getConnections();
  double					delta;
  double					sigma;
  int					i = 0;

  end = connections.end();
  for (it = connections.begin(); it != end; ++it)
    {
      sigma = this->getSigmaSum((*it)->neuron) * (*it)->neuron.getLastOut() * (1.f - (*it)->neuron.getLastOut());
      (*it)->neuron.setLastSigma(sigma);
      delta = learning_ratio * sigma * neuron.getLastOut() + ALPHA * (*it)->connnectionWeight;
      // std::cout << "delta = " << delta << std::endl;
      (*it)->connnectionWeight += delta;
      // if (delta != 0.f)
	// std::cout << "delta = " << delta<< std::endl;
      // std::cout << "last out : " << (*it)->neuron.getLastOut() << std::endl;
      ++i;
    }
  neuron.drawConnections();
}
Ejemplo n.º 29
0
 NeuralNet::NeuralNet(const NetTopology& layers, float min_w, float max_w)
 {
     NetTopology::const_iterator it;
     if(layers.begin() == layers.end())
     {
         throw "NeuralNet::NeuralNet: too small net";
     }
     for(it = layers.begin() + 1; it != layers.end(); it++)
     {
         NeuronLayer nl;
         int num_neurons = *it;
         int num_neurons_prev_layer = *(it - 1);
         for(int i = 0; i < num_neurons; i++)
         {
             Neuron n;
             for(int j = 0; j < num_neurons_prev_layer; j++)
             {
                 n.push_back(addutil::general::rand_float(min_w, max_w));
             }
             nl.push_back(n);
         }
         m_layers.push_back(nl);
     }
 }
void SelfOrganizingMaps::evaluateIndependentRGBDataSet(vector<RGB *> inputDataset, int sigmaMultiplier){
	vector<double> weights;
	vector<double> distances;
	vector<Neuron *> bmus;
	double distance;
	double totalDistance;
	double average;
	double variance;
	double stdDeviation;
	double sigma;
	double lowerRange;
	double upperRange;
	int errors = 0;
	weights.resize(3);
	
	for(int i=0; i<inputDataset.size(); i++){
		weights[0] = inputDataset[i]->getRed();
		weights[1] = inputDataset[i]->getGreen();
		weights[2] = inputDataset[i]->getBlue();

		Neuron *bmu = getBMU(weights);
		distance = bmu->distanceToInputVector(weights);
		distances.push_back(distance);
		bmus.push_back(bmu);
		totalDistance += distance;
	}

	average = totalDistance/inputDataset.size();
	variance = Utils::getVariance(distances, average);
	stdDeviation = sqrt(variance);
	sigma = sigmaMultiplier*stdDeviation;
	lowerRange = average - sigma;
	upperRange = average + sigma;

	for(int i=0; i<inputDataset.size();i++){
		distance = distances[i];
		Neuron *bmu = bmus[i];
		if(distance < lowerRange || distance > upperRange){
			_matrix->getNeuron(bmu->getX(), bmu->getY())->setNeuronColor(255,0,0);
			errors++;
			cout << "Error out of " << sigmaMultiplier << " sigma" << endl;
		}else{
			_matrix->getNeuron(bmu->getX(), bmu->getY())->setNeuronColor(0,0,0);
			cout << "OK" << endl;
		}
	}
	cout << "Dataset results for " << sigmaMultiplier << " sigma " << errors;
	cout << "/" << inputDataset.size() << endl;
}