void FFNeuralNetTest::TestFFNetFeedForward()
    {
        std::map<std::string, double> params = {
            {"a", 0.0},
            {"z", 0.0},
            {"delta", 0.0},
            {"deltaUpdate", 0.0},
            {"bias", 1.0},
            {"weight", 1.0}
        };
        this->SetupGraph(this->_pNet, params);

        // every edge should be weighted 1.0, every bias should be 1.0.
        std::vector<double> inputs = {1.0, 1.0};

        double output = Sigmoid(1.0 + 3 * Sigmoid(3.0));

        std::vector<double> expectedOutputs = {output};
        EXPECT_EQ(expectedOutputs, this->_pNet->FeedForward(inputs));

        this->_pNet = std::make_shared<FFNeuralNet>(std::vector<int>{2, 3, 2});
        this->SetupGraph(this->_pNet, params);

        expectedOutputs = {output, output};
        EXPECT_EQ(expectedOutputs, this->_pNet->FeedForward(inputs));
    }
Example #2
0
//retroalimenta la red, para conseguir las salidas
void RedNeuronal::FeedForward(void)
{
    // variables para los contadores
    int i, j;
    // variable auxiliar para guardar el resultado de la suma de los pesos por las entradas
    double synapse_sum = 0.0f;

    //retroalimenta la capa oculta
    for ( i = 0; i < hidden_num; i++ ) {
        for ( j = 0; j < input_num; j++ ) {
            synapse_sum += InputPeso[j][i]*Input_nudo[j];
        }
        Hidden_nudo[i] = Sigmoid( synapse_sum + HBias[i]);

        //se reinicia el valor de la variable auxiliar para el siguiente loop
        synapse_sum = 0.0f;
    }

    //retroalimenta la capa de salida
    for ( i = 0; i < output_num; i++ ) {
        for ( j = 0; j < hidden_num; j++ ) {
            synapse_sum += HiddenPeso[j][i]*Hidden_nudo[j];
        }
        Output_nudo[i] = Sigmoid( synapse_sum + OBias[i] );

        //reinicio de la variable auxiliar
        synapse_sum = 0.0f;
    }
}
/* propagate the input through the neural network */
void ForwardPropagation(int* input, float* output)
{
	int layer, node, inNode;
	float tempO;

	/* propagate through first hidden layer */
	for(node = 0; node < neuronsPerLayer; node++)
	{
		tempO = 0;

		for(inNode = 0; inNode < neuronsPerLayer; inNode++)
		{
			tempO += input[inNode]*hiddenWeights[0][node][inNode];
		}

		hiddenNodeOutput[0][node] = Sigmoid(tempO + hiddenNodeBias[0][node]);

	}

	/* propagate through each next hidden layer */
	for(layer = 1; layer < numHiddenLayers; layer++)
	{
		for(node = 0; node < neuronsPerLayer; node++)
		{
			tempO = 0;

			for(inNode = 0; inNode < neuronsPerLayer; inNode++)
			{
				tempO += hiddenNodeOutput[layer-1][inNode]*hiddenWeights[layer][node][inNode];
			}

			hiddenNodeOutput[layer][node] = Sigmoid(tempO + hiddenNodeBias[layer][node]);

		}

	}

	layer = numHiddenLayers-1;
	/* propagate through output weights */
	for(node = 0; node < numOutputNodes; node++)
	{
		tempO = 0;

		for(inNode = 0; inNode < neuronsPerLayer; inNode++)
		{
			tempO += hiddenNodeOutput[layer][inNode]*outputWeights[node][inNode];
		}

		output[node] = Sigmoid(tempO + outputBias[node]);

	}

	return;
}
Example #4
0
double Neuron::run(std::vector<double> inputs)
{
    double sum = 0;
    int inputSize = inputs.size();
    int weightsSize = m_weights.size()-1;
    //bad size of inputs
    if(inputSize != weightsSize)
    {
        return 0;
    }
    m_lastInputs = inputs;
    //sums the weights * inputs
#ifndef USE_INTEL_IPP
    for(int i=0; i< weightsSize; i++)
    {
        sum += m_weights[i] * inputs[i];
    }
#else
    Ipp64f productVector[weightsSize];
    ippsMul_64f(m_weights.data(),inputs.data(),productVector,weightsSize); // multiply input and weight into productVector
    ippsSum_64f(productVector,weightsSize,&sum); // sum product vector element
#endif
    //adds in the bias
    sum += m_weights[weightsSize];
    //goes through the sigmoid and returns;
    m_lastOutput = Sigmoid(sum);
    return m_lastOutput;
}
Example #5
0
void NeuralNet::ForwardPass1Bunch(int bunchOccupation, float *pInp, float *pOut)
{
//   DumpMatrix(mpInputPatterns16, bunchOccupation, mNInp16, mNInp16);

   // normalize features
   if(mUseNorms)
   {
      Normalize(bunchOccupation, pInp, mNInp16, mpNormMeans16, mpNormDevs16);
   }
   
   // forward pass to hidden layer
   PrepareBiases(bunchOccupation, mNHid16, mpHiddenBiases16, mpHiddenPatterns16);
   MatrixMultiplyAndAdd(bunchOccupation, mNInp16, mNHid16, 
                        mpInputPatterns16, mpInpHidMatrix16, mpHiddenPatterns16);  
      
   Sigmoid(bunchOccupation, mpHiddenPatterns16, mNHid, mNHid16);

   // forward pass to output
   PrepareBiases(bunchOccupation, mNOut16, mpOutputBiases16, mpOutputPatterns16);
   MatrixMultiplyAndAdd(bunchOccupation, mNHid16, mNOut16, 
                        mpHiddenPatterns16, mpHidOutMatrix16, mpOutputPatterns16);  

//   puts("xxx - out");
//   DumpMatrix(mpOutputPatterns16, bunchOccupation, mNOut, mNOut16);
   SoftMax(bunchOccupation, mpOutputPatterns16, mNOut, mNOut16);
//   puts("xxx - out - softmax");
//   DumpMatrix(mpOutputPatterns16, bunchOccupation, mNOut, mNOut16);
}
//-------------------------------Update-----------------------------------
//
//	given an input vector this function calculates the output vector
//
//------------------------------------------------------------------------
vector<double> CNeuralNet::Update(vector<double> &inputs)
{
	
	if(numOperacion >=4)
		numOperacion = 0;

	int cWeight = 0;

	//first check that we have the correct amount of inputs
	if (inputs.size() != m_NumInputs)
	{
		//just return an empty vector if incorrect.
		vector<double>	p;
		return p;
	}

	//For each layer....
	for (int i=0; i<m_NumHiddenLayers + 1; ++i)
	{		
		if ( i > 0 )
		{
			inputs = outputs[numOperacion][i-1];
		}


		cWeight = 0;

		//for each neuron sum the (inputs * corresponding weights).Throw 
		//the total at our sigmoid function to get the output.
		for (int j=0; j<m_vecLayers[i].m_NumNeurons; ++j)
		{
			double netinput = 0;

			int	NumInputs = m_vecLayers[i].m_vecNeurons[j].m_NumInputs;

			//for each weight
			for (int k=0; k<NumInputs - 1; ++k)
			{
				//sum the weights x inputs
				netinput += m_vecLayers[i].m_vecNeurons[j].m_vecWeight[k] * 
					inputs[cWeight++];
			}

			//add in the bias
			netinput += m_vecLayers[i].m_vecNeurons[j].m_vecWeight[NumInputs-1] * 
				CParams::dBias;

			//we can store the outputs from each layer as we generate them. 
			//The combined activation is first filtered through the sigmoid 
			//function
			outputs[numOperacion][i][j] = Sigmoid(netinput,
				CParams::dActivationResponse);

			cWeight = 0;
		}
	}

	return outputs[numOperacion++][m_NumHiddenLayers];
}
Example #7
0
// Firing law: probability 1. Standard neurodynamics of the cell
// Lots to explore here. 
data_type SAM_Unit::FiringProbability( data_type pot, data_type thresh ) 
{
#if 0
	return( ( pot > thresh ) ? 1.0 : 0.0 );
#else
	return Sigmoid( c, pot - thresh );
#endif
}
Example #8
0
//-------------------------------Update-----------------------------------
//
//	given an input vector this function calculates the output vector
//
//------------------------------------------------------------------------
vector<double> CNeuralNet::Update(vector<double> &inputs)
{
	//stores the resultant outputs from each layer
	vector<double> outputs;

	int cWeight = 0;
	
	//first check that we have the correct amount of inputs
	if (inputs.size() != m_NumInputs)
  {
		//just return an empty vector if incorrect.
		return outputs;
  }
	
	//For each layer....
	for (int i=0; i<m_NumHiddenLayers + 1; ++i)
	{		
		if ( i > 0 )
    {
			inputs = outputs;
    }

		outputs.clear();
		
		cWeight = 0;

		//for each neuron sum the (inputs * corresponding weights).Throw 
		//the total at our sigmoid function to get the output.
		for (int j=0; j<m_vecLayers[i].m_NumNeurons; ++j)
		{
			double netinput = 0;

			int	NumInputs = m_vecLayers[i].m_vecNeurons[j].m_NumInputs;
			
			//for each weight
			for (int k=0; k<NumInputs - 1; ++k)
			{
				//sum the weights x inputs
				netinput += m_vecLayers[i].m_vecNeurons[j].m_vecWeight[k] * 
                    inputs[cWeight++];
			}

			//add in the bias
			netinput += m_vecLayers[i].m_vecNeurons[j].m_vecWeight[NumInputs-1] * 
                  D_BIAS;

			//we can store the outputs from each layer as we generate them. 
      //The combined activation is first filtered through the sigmoid 
      //function
			outputs.push_back(Sigmoid(netinput,
                                D_ACTIVE_RESPONSE));

			cWeight = 0;
		}
	}

	return outputs;
}
Example #9
0
TracePoint::TracePoint(const AircraftState &state):
  SearchPoint(state.location),
  time((unsigned)state.time),
  altitude(state.altitude),
  vario(state.netto_vario),
  engine_noise_level(0),
  drift_factor(Sigmoid(state.altitude_agl / 100) * 256)
{
}
void SparseAutoencoder<OptimizerType>::GetNewFeatures(arma::mat& data,
                                                      arma::mat& features)
{
  const size_t l1 = hiddenSize;
  const size_t l2 = visibleSize;

  Sigmoid(parameters.submat(0, 0, l1 - 1, l2 - 1) * data +
      arma::repmat(parameters.submat(0, l2, l1 - 1, l2), 1, data.n_cols),
      features);
}
Example #11
0
double NetWorkLayer::AddUp(int n, const vector<double> &input) {
  double result = 0;
  // the last one is check
  // so the cycle size is input.size() - 1
  for(int i = 0; i < inputNum; ++i) {
    result += weights[n][i] * input[i];
  }
  //cout << "compute " << Sigmoid(result) << endl;
  return Sigmoid(result - bias);
}
Example #12
0
TracePoint::TracePoint(const MoreData &basic)
  :SearchPoint(basic.location),
   time((unsigned)basic.time),
   altitude(basic.nav_altitude),
   vario(basic.netto_vario),
   engine_noise_level(basic.engine_noise_level_available
                      ? basic.engine_noise_level
                      : 0u),
   drift_factor(Sigmoid(basic.nav_altitude / 100) * 256)
{
}
double LogisticRegression::CalcFuncOutByFeaVec(vector<FeaValNode> & FeaValNodeVec)
{
	double dX = 0.0;
	vector<FeaValNode>::iterator p = FeaValNodeVec.begin();
	while (p != FeaValNodeVec.end())
	{
		if (p->iFeatureId < (int)ThetaVec.size())	// all input is evil
			dX += ThetaVec[p->iFeatureId] * p->dValue;			
		p++;
	}
	double dY = Sigmoid (dX);
	return dY;
}
Example #14
0
double VarRTM::PredictAUC(RTMC &m, Mat &z_bar) {
  VReal real,pre;
  for (int d = 0; d < held_out_net_.cols(); d++) {
    for (SpMatInIt it(held_out_net_, d); it; ++it) {
      double label = it.value();
      Vec pi = z_bar.col(d).cwiseProduct(z_bar.col(it.index()));
      double prob = Sigmoid(pi.dot(m.eta));
      real.push_back(label);
      pre.push_back(prob);
    }
  }
  return AUC(real,pre);
}
Example #15
0
float* SA_Layer::Forward_Propagate_Layer(float* input_layers)      // f( sigma(weights * inputs) + bias )
{
    this->input_layer=input_layers;
    for(int j = 0 ; j < num_current_node ; j++)
    {
        float net= 0;
        for(int i = 0 ; i < num_previous_node ; i++)
        {
            net += input_layer[i] * weight[j*num_previous_node+i];
        }
        net+=bias_weights[j];
        
        output_layer[j] = Sigmoid(net);
    }
    return output_layer;
}
Example #16
0
void CNeuralNet::feedForward(std::vector<double> inputs)
{
	std::vector<double> _inputs = inputs;	//store inputs in reference vector
	

	vector<double> outputs; //store the result of the outputs from each layer

	//For both layers (hidden and output)  
	for (int i = 0; i < 2; ++i)
	{

		if (i > 0)				//checks which layer you're on
		{
			_inputs = outputs;	//sets inputs to outputs if you're past the 'first' layer
		}

		outputs.clear();

	//iterate through neurons and get the sum of weights * inputs  
		for (int n = 0; n < m_vecLayer[i].m_iNumNeurons; ++n)
		{
			double netinput = 0;

			int NumInputs = m_vecLayer[i].m_vecNeurons[n].m_iNumInputs;

			//for each weight   
			for (int k = 0; k < NumInputs; ++k)
			{
				//sum the weights x inputs   
				netinput += m_vecLayer[i].m_vecNeurons[n].m_vecWeight[k] * _inputs[k];
			}

			//The combined activation is first filtered through the sigmoid    
			//function and a record is kept for each neuron    
			m_vecLayer[i].m_vecNeurons[n].m_Activation = Sigmoid(netinput);

			//store the outputs from each layer as we generate them.   
			outputs.push_back(m_vecLayer[i].m_vecNeurons[n].m_Activation);
		}
	}
	_outputActivation = outputs; //save what was in the output layer for later reference
	//std::cout << "Feedforward complete" << std::endl;
}
Example #17
0
    std::vector<std::string> TermFactory::available() const {
        std::vector<std::string> result;
        result.push_back(Discrete().className());
        result.push_back(Bell().className());
        result.push_back(Gaussian().className());
        result.push_back(GaussianProduct().className());

        result.push_back(PiShape().className());
        result.push_back(Ramp().className());
        result.push_back(Rectangle().className());
        result.push_back(SShape().className());
        result.push_back(Sigmoid().className());

        result.push_back(SigmoidDifference().className());
        result.push_back(SigmoidProduct().className());
        result.push_back(Trapezoid().className());
        result.push_back(Triangle().className());
        result.push_back(ZShape().className());
        return result;
    }
Example #18
0
// Computes the activation and output of the neuron if not fresh
// by pulling the outputs of all fan-in neurons
void Neuron::FeedForward() {
  if (!frwd_dirty_ ) {
    return;
  }
  // nothing to do for input nodes: just pass the input to the o/p
  // otherwise, pull the output of all fan-in neurons
  if (node_type_ != Input) {
    int fan_in_cnt = fan_in_.size();
    // sum out the activation
    activation_ = -bias_;
    for (int in = 0; in < fan_in_cnt; in++) {
      if (fan_in_[in]->frwd_dirty_) {
        fan_in_[in]->FeedForward();
      }
      activation_ += ((*(fan_in_weights_[in])) * fan_in_[in]->output_);
    }
    // sigmoid it
    output_ = Sigmoid(activation_);
  }
  frwd_dirty_ = false;
}
void LogisticRegression::Test()
{
	cout << "Test ......." << endl;
	
	ifstream fin(this->str_test_file_.c_str());
        if(!fin)
        {
        	cerr << "Can not read test data file !" << endl;
		exit(0);
	}

	string line;
	while(getline(fin, line))
	{
		vector<double> vd_data = split2double(line, "\t");
		int label = (int)vd_data.back();
		vd_data.pop_back();
		this->vec_test_label_.push_back(label);
		this->vec_test_data_.push_back(vd_data);
	}
	fin.close();

	ScaleData(this->vec_test_data_);

	//test
	int i_error_count = 0;
	for(int i = 0; i < this->vec_test_data_.size(); i++)
	{
		double dou_prob = Sigmoid(this->vec_test_data_[i]);	
		int i_ret_label = dou_prob > 0.5 ? 1 : 0;
		if(i_ret_label != this->vec_test_label_[i])
		{
			i_error_count ++;
		}
	}
	
	cout << "the error rate of this test is " << 1.0 * i_error_count / this->vec_test_data_.size() << endl;

}
Example #20
0
/****
phi: dimensions is doc_id, matrix is topic * doc_size 
z_bar: topic*doc_id
****/
void VarRTM::Infer(CorpusC &cor, RTMC &m, RTMVar* var) const {
  var->Init(cor, m);
  for(int it = 1; it < var_max_iter_; ++it) {
    for (size_t d = 0; d < cor.Len(); d++) {
      for(int it2 = 1; it2 < doc_var_max_iter_; ++it2) {
        Vec vec(m.TopicNum());
        vec.setZero();
        for (SpMatInIt it(net, d); it; ++it) {
          Vec pi = var->z_bar.col(d).cwiseProduct(var->z_bar.col(it.index()));
          vec += (1 - Sigmoid(m.eta.dot(pi)))*var->z_bar.col(it.index());
        }
        Vec gradient = vec.cwiseProduct(m.eta);
        gradient /= cor.TLen(d); //every word is considered to be different
        Vec expect_theta;
        ExpectTheta(var->gamma.col(d), &expect_theta);
        for (size_t n = 0; n < cor.ULen(d); n++) {
          for (int k = 0; k < m.TopicNum(); k++) {
            (var->phi)[d](k, n) = expect_theta[k] + 
                                m.ln_w(k, cor.Word(d, n)) + gradient[k];
          }
          double ln_phi_sum = LogSum((var->phi)[d].col(n));
          for (int k = 0; k < m.TopicNum(); k++) { //normalize phi
            (var->phi)[d](k, n) = exp((var->phi)[d](k, n) - ln_phi_sum);
          }
        }
        var->gamma.setConstant(m.alpha);
        for (size_t n = 0; n < cor.ULen(d); n++) {
          for (int k = 0; k < m.TopicNum(); k++) {
            var->gamma(k, d) += cor.Count(d, n) * (var->phi)[d](k, n);
          }
        }
        var->z_bar.col(d) = ZBar(cor.docs[d], var->phi[d]);
      }
    }
  }
}
Example #21
0
 TermFactory::TermFactory() : ConstructionFactory<Term*>("Term") {
     registerConstructor("", fl::null);
     registerConstructor(Bell().className(), &(Bell::constructor));
     registerConstructor(Binary().className(), &(Binary::constructor));
     registerConstructor(Concave().className(), &(Concave::constructor));
     registerConstructor(Constant().className(), &(Constant::constructor));
     registerConstructor(Cosine().className(), &(Cosine::constructor));
     registerConstructor(Discrete().className(), &(Discrete::constructor));
     registerConstructor(Function().className(), &(Function::constructor));
     registerConstructor(Gaussian().className(), &(Gaussian::constructor));
     registerConstructor(GaussianProduct().className(), &(GaussianProduct::constructor));
     registerConstructor(Linear().className(), &(Linear::constructor));
     registerConstructor(PiShape().className(), &(PiShape::constructor));
     registerConstructor(Ramp().className(), &(Ramp::constructor));
     registerConstructor(Rectangle().className(), &(Rectangle::constructor));
     registerConstructor(SShape().className(), &(SShape::constructor));
     registerConstructor(Sigmoid().className(), &(Sigmoid::constructor));
     registerConstructor(SigmoidDifference().className(), &(SigmoidDifference::constructor));
     registerConstructor(SigmoidProduct().className(), &(SigmoidProduct::constructor));
     registerConstructor(Spike().className(), &(Spike::constructor));
     registerConstructor(Trapezoid().className(), &(Trapezoid::constructor));
     registerConstructor(Triangle().className(), &(Triangle::constructor));
     registerConstructor(ZShape().className(), &(ZShape::constructor));
 }
Example #22
0
    Term* TermFactory::create(const std::string& className,
            const std::vector<scalar>& params) const {
        int requiredParams = -1;
        if (className == Discrete().className()) {
            if ((int)params.size() % 2 == 0) {
                Discrete* term = new Discrete();
                for (int i = 0; i < (int)params.size() - 1; i += 2) {
                    term->x.push_back(params.at(i));
                    term->y.push_back(params.at(i+1));
                }
                return term;
            } else {
                std::ostringstream ex;
                ex << "[syntax error] a discrete term requires an even list of values, "
                        "but found <" << params.size() << "> values";
                throw fl::Exception(ex.str(), FL_AT);
            }
        }

        if (className == Bell().className()) {
            if ((int)params.size() >= (requiredParams = 3)) {
                return new Bell("", params.at(0), params.at(1), params.at(2));
            }
        }

        if (className == Gaussian().className()) {
            if ((int)params.size() >= (requiredParams = 2)) {
                return new Gaussian("", params.at(0), params.at(1));
            }
        }

        if (className == GaussianProduct().className()) {
            if ((int)params.size() >= (requiredParams = 4)) {
                return new GaussianProduct("", params.at(0), params.at(1), params.at(2), params.at(3));
            }
        }

        if (className == PiShape().className()) {
            if ((int)params.size() >= (requiredParams = 4)) {
                return new PiShape("", params.at(0), params.at(1), params.at(2), params.at(3));
            }
        }

        if (className == Ramp().className()) {
            if ((int)params.size() >= (requiredParams = 2)) {
                return new Ramp("", params.at(0), params.at(1));
            }
        }


        if (className == Rectangle().className()) {
            if ((int)params.size() >= (requiredParams = 2)) {
                return new Rectangle("", params.at(0), params.at(1));
            }
        }

        if (className == SShape().className()) {
            if ((int)params.size() >= (requiredParams = 2)) {
                return new SShape("", params.at(0), params.at(1));
            }
        }

        if (className == Sigmoid().className()) {
            if ((int)params.size() >= (requiredParams = 2)) {
                return new Sigmoid("", params.at(0), params.at(1));
            }
        }

        if (className == SigmoidDifference().className()) {
            if ((int)params.size() >= (requiredParams = 4)) {
                return new SigmoidDifference("", params.at(0), params.at(1), params.at(2), params.at(3));
            }
        }

        if (className == SigmoidProduct().className()) {
            if ((int)params.size() >= (requiredParams = 4)) {
                return new SigmoidProduct("", params.at(0), params.at(1), params.at(2), params.at(3));
            }
        }

        if (className == Trapezoid().className()) {
            if ((int)params.size() >= (requiredParams = 4))
                return new Trapezoid("", params.at(0), params.at(1), params.at(2), params.at(3));
        }

        if (className == Triangle().className()) {
            if ((int)params.size() >= (requiredParams = 3))
                return new Triangle("", params.at(0), params.at(1), params.at(2));
        }

        if (className == ZShape().className()) {
            if ((int)params.size() >= (requiredParams = 2)) {
                return new ZShape("", params.at(0), params.at(1));
            }
        }

        if (requiredParams >= 0) {
            std::ostringstream ex;
            ex << "[factory error] Term of class<" + className + "> "
                    "requires " << requiredParams << " parameters";
            throw fl::Exception(ex.str(), FL_AT);
        }
        throw fl::Exception("[factory error] Term of class <" + className + "> not recognized", FL_AT);
    }
Example #23
0
		void AnnSigmoid(const float * src, size_t size, const float * slope, float * dst)
		{
			float s = slope[0];
			for (size_t i = 0; i < size; ++i)
				dst[i] = Sigmoid(src[i] * s);
		}
Example #24
0
double VarMGCTM::Infer(DocC &doc, MGCTMC &m, MGVar* para) const {
  double c = 1;
  InitVar(doc, m, para);
  MGVar &p = *para;
  for(int it = 1; (c > converged_.var_converged_) && (it < 
                       converged_.var_max_iter_); ++it) {
    //indicate variable eta
    Vec g_theta_ep;
    GThetaEp(p.g_theta, &g_theta_ep);
    Mat l_theta_ep;
    LThetaEp(p.l_theta, &l_theta_ep);
    for (int j = 0; j < m.LTopicNum1(); j++) {
      p.eta[j] = log(m.pi[j]);
      int l_topic_num = m.LTopicNum2();
      p.eta[j] += lgamma(l_topic_num*m.l_alpha[j]);
      p.eta[j] -= l_topic_num*lgamma(m.l_alpha[j]);
      for (int k = 0; k < m.LTopicNum2(); k++) {
        p.eta[j] += (m.l_alpha[j] - 1)*l_theta_ep(k, j);
      }

      for (size_t n = 0; n < doc.ULen(); n++) {
        double a = 0;
        for (int k = 0; k < m.LTopicNum2(); k++) {
          a += p.l_z[j](k, n) * l_theta_ep(k, j);
          a += p.l_z[j](k, n) * m.l_ln_w[j](k, doc.Word(n));
        }
        p.eta[j] += p.delta[n]*a*doc.Count(n);
      }
    }

    double ln_eta_sum = LogSum(p.eta);
    for (int j = 0; j < m.LTopicNum1(); j++) { //normalize eta
      p.eta[j] = exp(p.eta[j] - ln_eta_sum);
    }

    //omega
    p.omega[1] = m.gamma[1];
    for (size_t n = 0; n < doc.ULen(); n++) {
      p.omega[1] += p.delta(n)*doc.Count(n);
    }

    p.omega[0] = m.gamma[0];
    for (size_t n = 0; n < doc.ULen(); n++) {
      p.omega[0] += (1 - p.delta(n))*doc.Count(n);
    }

    //local theta
    for (int j = 0; j < m.LTopicNum1(); j++) {
      for (int k = 0; k < m.LTopicNum2(); k++) {
        p.l_theta(k, j) = p.eta[j] * m.l_alpha[j];
      }
    }
    for (int j = 0; j < m.LTopicNum1(); j++) {
      for (int k = 0; k < m.LTopicNum2(); k++) {
        for (size_t n = 0; n < doc.ULen(); n++) {
          p.l_theta(k, j) += doc.Count(n)* p.delta[n]*p.eta[j]* p.l_z[j](k, n);
        }
        p.l_theta(k, j) += 1 - p.eta[j];
      }
    }

    //global theta
    p.g_theta.setConstant(m.g_alpha);
    for (size_t n = 0; n < doc.ULen(); n++) {
      for (int k = 0; k < m.GTopicNum(); k++) {
        p.g_theta[k] += doc.Count(n) * p.g_z(k, n) * (1 - p.delta[n]);
      }
    }
 
    for (size_t n = 0; n < doc.ULen(); n++) {
      //variable delta
      double tmp = DiGamma(m.gamma[1]) - DiGamma(m.gamma[0]);
      for (int j = 0; j < m.LTopicNum1(); j++) {
        for (int k = 0; k < m.LTopicNum2(); k++) {
          tmp += p.eta[j]*p.l_z[j](k, n)*l_theta_ep(k,j); 
          tmp += p.eta[j]*p.l_z[j](k, n)*m.l_ln_w[j](k, doc.Word(n));
        }
      }
      for (int k = 0; k < m.GTopicNum(); k++) {
        tmp -= p.g_z(k, n) * g_theta_ep[k];
        tmp -= p.g_z(k, n) * m.g_ln_w(k, doc.Word(n));
      }
      p.delta[n] = Sigmoid(tmp);

      //local z
      for (int j = 0; j < m.LTopicNum1(); j++) {
        for (int k = 0; k < m.LTopicNum2(); k++) {
          p.l_z[j](k, n) = p.delta[n]*p.eta[j]*(l_theta_ep(k, j) +
                           m.l_ln_w[j](k, doc.Word(n)));
        }
        double ln_local_z_sum = LogSum(p.l_z[j].col(n));
        for (int k = 0; k < m.LTopicNum2(); k++) {
          p.l_z[j](k, n) = exp(p.l_z[j](k, n) - ln_local_z_sum);
        }
      }

      //global z
      for (int k = 0; k < m.GTopicNum(); k++) {
        p.g_z(k, n) = (1 - p.delta[n])*(g_theta_ep[k] + m.g_ln_w(k, doc.Word(n)));
      }
      double ln_z_sum = LogSum(p.g_z.col(n));
      for (int k = 0; k < m.GTopicNum(); k++) { //normalize g_z
        p.g_z(k, n) = exp(p.g_z(k, n) - ln_z_sum);
      }
    }

  }
  return Likelihood(doc, p, m);
}
Example #25
0
double cMathUtil::Sigmoid(double x)
{
	return Sigmoid(x, 1, 0);
}
Example #26
0
density Smooth::transition(filling disk, filling ring) const {
  double t1=birth_1*(1.0-Sigmoid(disk,0.5,smoothing_disk))+death_1*Sigmoid(disk,0.5,smoothing_disk);
  double t2=birth_2*(1.0-Sigmoid(disk,0.5,smoothing_disk))+death_2*Sigmoid(disk,0.5,smoothing_disk);
  return Sigmoid(ring,t1,smoothing_ring)*(1.0-Sigmoid(ring,t2,smoothing_ring));
}
void LogisticRegression::TrainModel()
{
	cout << "Train ...." << endl;
	//load data
	string line;
	ifstream fin(this->str_train_data_file_.c_str());
	if(!fin)
	{
		cerr << "Can not read train data file !" << endl;
		exit(0);
	}

	while(getline(fin, line))
	{
		vector<double> vd_data = split2double(line, "\t");
		int label = (int)vd_data.back();
		vd_data.pop_back();
		this->vec_train_label_.push_back(label);
		this->vec_train_data_.push_back(vd_data);
	}
	fin.close();

	//data scale
	InitScale(this->vec_train_data_);
	ScaleData(this->vec_train_data_);

	//train
	size_t i_round = 0;
	while(i_round < this->i_max_round)
	{
		
		cout << "Rount [" << i_round << "]....." << endl;
		vector<double> douVec_gradient(this->vec_train_data_[0].size(), 0);
		double dou_bias_gradient = 0;
		double dou_cost = 0;
		for(int i = 0; i < this->vec_train_data_.size(); i++)
		{
			
			double dou_h = Sigmoid(this->vec_train_data_[i]);
			
			//calculate cost for each data
			dou_cost += (this->vec_train_label_[i]*log(dou_h) + (1-this->vec_train_label_[i])*log(1-dou_h));

			//calculate gradient for each data
			for(int j = 0; j < douVec_gradient.size(); j++)
			{
				douVec_gradient[j] += (this->vec_train_label_[i] - dou_h)*this->vec_train_data_[i][j];

			}

			dou_bias_gradient += (this->vec_train_label_[i] - dou_h);
			
		}

		//L2 regularization
		double dou_regularization = 0;
		for(int i = 0; i < this->douVec_weights_.size(); i++)
		{
			dou_regularization += pow(this->douVec_weights_[i], 2);
		}

		//final cost 
		dou_cost = -1.0 * dou_cost / this->douVec_weights_.size() + this->dou_lambda_*dou_regularization/(2*this->douVec_weights_.size());
		cout << "Cost J = " << dou_cost << endl;
		
		//update
		string str_new_weights("new weights is [ ");
		string str_gradient("gradient is [");
		for(int k = 0; k < douVec_gradient.size(); k++)
		{
			this->douVec_weights_[k] += this->dou_step_*(douVec_gradient[k]+this->dou_lambda_*this->douVec_weights_[k])/this->vec_train_data_.size();
			str_new_weights = str_new_weights + double2string(this->douVec_weights_[k]) + " ";
			str_gradient =  str_gradient + double2string(douVec_gradient[k]) + " ";
		}
		
		this->dou_bias_ += this->dou_step_*dou_bias_gradient/this->vec_train_data_.size();
		str_new_weights = str_new_weights +  double2string(this->dou_bias_) + "]";
		str_gradient = str_gradient + double2string(dou_bias_gradient) +  "]";
		cout << str_new_weights << endl;
		cout << str_gradient << endl;

		i_round += 1;
	}


	//save model
	cout << "save model ..." << endl;
	ofstream fout("./logreg_model.txt");
	cout << this->douVec_weights_.size() << endl;
	for(int i = 0;  i < this->douVec_weights_.size(); i++)
	{
		fout << this->douVec_weights_[i] << " ";
	}
	fout << this->dou_bias_ << endl;
	fout.close();

}
Example #28
0
vector<double> NeuronalNet::Evaluate(const Sample &sample) {

	vector<double> inputs; // 95 Inputs // 71 inputs
	for (int i = 0; i < Parameter::subImageWidth; i++) {
		inputs.push_back(sample.gusts[i]);
		inputs.push_back(sample.windSpeeds[i]);
		// ignoriere direction:
		// inputs.push_back(sample.directions[i]);
	}
	for (int i = 0; i < Parameter::subImageWidth - 1; i++) {
		inputs.push_back(sample.changeDirections[i]);
	}

	vector<double> hiddenInputs; // 25 inputs

	// für input 1-48, immer zwei "zusammenfassen"
	double netInput = 0;
	for (int i = 0; i < 48; i++) {
		netInput += inputs[i] * inputWeights[i];
		if (i % 2 == 1) {
			// add bias
			// netInput += hiddenBias[(i - 2) / 3] * (-1);
			netInput += hiddenBias[(i - 1) / 2] * (-1);
			netInput = Sigmoid(netInput, Parameter::sigmoidResponse);
			hiddenInputs.push_back(netInput);
			netInput = 0;
		}
	}
	// für input 48-71 (23 change directions)
	netInput = 0;
	for (int i = 48; i < 71; i++) {
		netInput += inputs[i] * inputWeights[i];
	}
	netInput += hiddenBias[24] * (-1);
	netInput = Sigmoid(netInput, Parameter::sigmoidResponse);
	hiddenInputs.push_back(netInput);

	// Work With Hidden Layer:
	vector<double> output; // 1 outputs

	for (int i = 0; i < 1; i++) {
		netInput = 0;
		for (int j = 0; j < 25; j++) {
			netInput += hiddenInputs[i * 25 + j] * hiddenWeights[i * 25 + j];
		}
		netInput += outputBias[i] * (-1);

		// special sigmoid:
		
		double deltaX;
		double deltaY;
		if (netInput < 0) {
			// shift left:
			deltaX = 2.297;
			deltaY = 0;
		} else {
			deltaX = -2.297;
			deltaY = 0.5;
		}
		double x = (netInput * 2.0) + deltaX;
		netInput = Sigmoid(x, Parameter::sigmoidResponse);
		netInput = netInput * 0.5 + deltaY;
		// */
		/*
		more or less the same:

		double deltaX = 2.297;
		double x = (netInput * 2.0);
		netInput = 0.5 * Sigmoid(x + deltaX, Parameter::sigmoidResponse) + 0.5 * Sigmoid(x - deltaX, Parameter::sigmoidResponse);
		// */

		output.push_back(netInput);
	}

	return output;

	/*
	double no = output[0];
	double maybe = output[1];
	double yes = output[2];

	// cout << "No:\t" << no << endl;
	// cout << "Maybe:\t" << maybe << endl;
	// cout << "Yes:\t" << yes << endl;

	ResponseState result = no > maybe ? ResponseState::No : ResponseState::Maybe;
	if (yes > no || yes > maybe) {
		result = ResponseState::Yes;
	}
	return result; */
}