コード例 #1
0
ファイル: Committee.cpp プロジェクト: goodchong/rl_pursuit
Committee::Committee(const std::vector<Feature> &features, bool caching, const std::vector<SubClassifier> &classifiers, const Params &p):
  Classifier(features,caching),
  classifiers(classifiers),
  p(p)
{
  normalizeWeights();
}
コード例 #2
0
ファイル: loadbalancer.cpp プロジェクト: songhtdo/vespa
void
LoadBalancer::received(uint32_t nodeIndex, bool busy) {
    if (busy) {
        NodeInfo& info = _nodeInfo[nodeIndex];

        info.weight = info.weight - 0.01;
        normalizeWeights();
    }
}
コード例 #3
0
ファイル: sensor_model.cpp プロジェクト: mattcl/cs225b
void SensorModel::scResample(sensor_msgs::LaserScan *scan, vector<Pose>& poses) {
	if(likelihoodField == NULL)
	  return;
	for(int i = 0; i < poses.size(); i++) {
//		ROS_INFO("Computing weights for pose[%d]", i);
		if(!computeWeight(poses[i], scan))
			return;
	}
	normalizeWeights(poses);
}
コード例 #4
0
ファイル: gmm.hpp プロジェクト: buotex/praktikum
 bool  
   updateGM(unsigned int index, double weight, const arma::vec & mu, const arma::mat & sigma) {
     if (arma::det(sigma) <= 0) {
       //mixture_.erase(mixture_.begin() + index);
       weights_(index) = 0.;
       normalizeWeights();
       return false;
     }
     weights_[index] = weight;
     mixture_[index] = GM(mu, sigma);
     return true;
   }
コード例 #5
0
//TODO: is it worth caching the results as long as weights don't change?
std::vector<std::vector<float> > PhraseDictionaryMultiModel::getWeights(size_t numWeights, bool normalize) const
{
  const std::vector<float>* weights_ptr;
  std::vector<float> raw_weights;

  weights_ptr = GetTemporaryMultiModelWeightsVector();

  // HIEU - uninitialised variable.
  //checking weights passed to mosesserver; only valid for this sentence; *don't* raise exception if client weights are malformed
  if (weights_ptr == NULL || weights_ptr->size() == 0) {
    weights_ptr = &m_multimodelweights; //fall back to weights defined in config
  } else if(weights_ptr->size() != m_numModels && weights_ptr->size() != m_numModels * numWeights) {
    //TODO: can we pass error message to client if weights are malformed?
    std::cerr << "Must have either one multimodel weight per model (" << m_numModels << "), or one per weighted feature and model (" << numWeights << "*" << m_numModels << "). You have " << weights_ptr->size() << ". Reverting to weights in config";
    weights_ptr = &m_multimodelweights; //fall back to weights defined in config
  }

  //checking weights defined in config; only valid for this sentence; raise exception if config weights are malformed
  if (weights_ptr == NULL || weights_ptr->size() == 0) {
    for (size_t i=0; i < m_numModels; i++) {
      raw_weights.push_back(1.0/m_numModels); //uniform weights created online
    }
  } else if(weights_ptr->size() != m_numModels && weights_ptr->size() != m_numModels * numWeights) {
    std::stringstream strme;
    strme << "Must have either one multimodel weight per model (" << m_numModels << "), or one per weighted feature and model (" << numWeights << "*" << m_numModels << "). You have " << weights_ptr->size() << ".";
    UTIL_THROW(util::Exception, strme.str());
  } else {
    raw_weights = *weights_ptr;
  }

  std::vector<std::vector<float> > multimodelweights (numWeights);

  for (size_t i=0; i < numWeights; i++) {
    std::vector<float> weights_onefeature (m_numModels);
    if(raw_weights.size() == m_numModels) {
      weights_onefeature = raw_weights;
    } else {
      copy ( raw_weights.begin()+i*m_numModels, raw_weights.begin()+(i+1)*m_numModels, weights_onefeature.begin() );
    }
    if(normalize) {
      multimodelweights[i] = normalizeWeights(weights_onefeature);
    } else {
      multimodelweights[i] = weights_onefeature;
    }
  }

  return multimodelweights;
}
コード例 #6
0
void PFilterSIRCUDA::update(fvec measurement) {
    float neff=0.0;
    float* measurementDev;
    cudaMalloc( &measurementDev, (size_t) measurement.n_rows * sizeof(float)) ;
    cudaMemcpy(measurementDev,measurement.memptr(),(size_t) measurement.n_rows * sizeof(float), cudaMemcpyHostToDevice);

    //fmat virtualMeasurementOfParticles;
    //fmat differences = zeros<fmat>(measurement.n_rows,particles.samples.n_cols);
    frowvec evals;

    //virtualMeasurementOfParticles = process->hfun(&particles.samples);
    measurementOnGPU = process->hfun_gpu(samplesOnGPU, particles.samples.n_cols, particles.samples.n_rows);

    // calculate differences
    //differences = virtualMeasurementOfParticles - inputMatrix;
    /*for (unsigned int i=0; i< virtualMeasurementOfParticles.n_cols; ++i)
    {
    	for(unsigned int j=0; j<virtualMeasurementOfParticles.n_rows;++j)
    	{
    		differences(j,i) = virtualMeasurementOfParticles(j,i) - measurement(j);
    	}
    }*/

    callDeviationKernel(measurementOnGPU, measurementDev, particles.samples.n_rows,
                        particles.samples.n_cols, deviationsOnGPU);

    evals = process->eval_gpu(deviationsOnGPU, particles.samples.n_cols);

    // % is the Schur product (elementwise vector multiplication
    particles.weights = particles.weights % evals;

    // get samples from graphics card
    cudaMemcpy(particles.samples.memptr(),samplesOnGPU, particles.samples.n_elem * sizeof(float), cudaMemcpyDeviceToHost);

    normalizeWeights();

    neff=calculateNeff();

    if (neff <= nthr) {
#ifdef VERBOSE
        printf("too few particles. 1/N for all particles\n");
#endif
        particles.weights = ones<frowvec>(particles.weights.n_cols) / (float)particles.weights.n_cols;
    }
    else    particles = resampler->resample(&particles);
}
コード例 #7
0
/**
  * processing update step including resampling if possible
  */
void BFilterCUDA::update(fvec measurement) {

    float neff=0.0;
    fmat virtualMeasurementOfParticles;
    fmat differences = zeros<fmat>(particles.samples.n_rows,particles.samples.n_cols);
    fvec evals;

    // generate simulated measurements
    virtualMeasurementOfParticles = process->hfun(&particles.samples);

    // calculate differences
    //differences = virtualMeasurementOfParticles - inputMatrix;
    for (unsigned int i=0; i< virtualMeasurementOfParticles.n_cols; ++i)
    {
        for(unsigned int j=0; j<virtualMeasurementOfParticles.n_rows; ++j)
        {
            differences(j,i) = virtualMeasurementOfParticles(j,i) - measurement(j);
        }
    }

    // evaluation of particles
    evals = process->eval(&differences);
    // calculating new particle weights
    particles.weights = particles.weights * evals;

    normalizeWeights();

    neff=calculateNeff();
    if (neff <= nthr) {
        // Neff is too low so all particles are weighted with standard values
#ifdef VERBOSE
        printf("too few particles. 1/N for all particles\n");
#endif
        standardWeighting();
    }
    else {
        // Neff is high enough to resample particles
        particles = resampler->resample(&particles);
    }
}
コード例 #8
0
/**
  * pressing prediction step including a temporary estimation
  */
void BFilterCUDA::predict() {
    // step forward
    particles.samples = process->ffun(&particles.samples);

    normalizeWeights();
}
コード例 #9
0
void PFilterSIRCUDA::predict() {

    samplesOnGPU = process->ffun_gpu(&particles.samples);

    normalizeWeights();
}
コード例 #10
0
vector<float> PhraseDictionaryMultiModelCounts::MinimizePerplexity(vector<pair<string, string> > &phrase_pair_vector)
{

  const StaticData &staticData = StaticData::Instance();
  const string& factorDelimiter = staticData.GetFactorDelimiter();

  map<pair<string, string>, size_t> phrase_pair_map;

  for ( vector<pair<string, string> >::const_iterator iter = phrase_pair_vector.begin(); iter != phrase_pair_vector.end(); ++iter ) {
    phrase_pair_map[*iter] += 1;
  }

  vector<multiModelCountsStatisticsOptimization*> optimizerStats;

  for ( map<pair<string, string>, size_t>::iterator iter = phrase_pair_map.begin(); iter != phrase_pair_map.end(); ++iter ) {

    pair<string, string> phrase_pair = iter->first;
    string source_string = phrase_pair.first;
    string target_string = phrase_pair.second;

    vector<float> fs(m_numModels);
    map<string,multiModelCountsStatistics*>* allStats = new(map<string,multiModelCountsStatistics*>);

    Phrase sourcePhrase(0);
    sourcePhrase.CreateFromString(Input, m_input, source_string, factorDelimiter, NULL);

    CollectSufficientStatistics(sourcePhrase, fs, allStats); //optimization potential: only call this once per source phrase

    //phrase pair not found; leave cache empty
    if (allStats->find(target_string) == allStats->end()) {
      RemoveAllInMap(*allStats);
      delete allStats;
      continue;
    }

    multiModelCountsStatisticsOptimization * targetStatistics = new multiModelCountsStatisticsOptimization();
    targetStatistics->targetPhrase = new TargetPhrase(*(*allStats)[target_string]->targetPhrase);
    targetStatistics->fs = fs;
    targetStatistics->fst = (*allStats)[target_string]->fst;
    targetStatistics->ft = (*allStats)[target_string]->ft;
    targetStatistics->f = iter->second;

    try {
      pair<vector< set<size_t> >, vector< set<size_t> > > alignment = GetAlignmentsForLexWeights(sourcePhrase, static_cast<const Phrase&>(*targetStatistics->targetPhrase), targetStatistics->targetPhrase->GetAlignTerm());
      targetStatistics->lexCachee2f = CacheLexicalStatistics(static_cast<const Phrase&>(*targetStatistics->targetPhrase), sourcePhrase, alignment.second, m_lexTable_e2f, false );
      targetStatistics->lexCachef2e = CacheLexicalStatistics(sourcePhrase, static_cast<const Phrase&>(*targetStatistics->targetPhrase), alignment.first, m_lexTable_f2e, true );

      optimizerStats.push_back(targetStatistics);
    } catch (AlignmentException& e) {}

    RemoveAllInMap(*allStats);
    delete allStats;
  }

  Sentence sentence;
  CleanUpAfterSentenceProcessing(sentence); // free memory used by compact phrase tables

  vector<float> ret (m_numModels*4);
  for (size_t iFeature=0; iFeature < 4; iFeature++) {

    CrossEntropyCounts * ObjectiveFunction = new CrossEntropyCounts(optimizerStats, this, iFeature);

    vector<float> weight_vector = Optimize(ObjectiveFunction, m_numModels);

    if (m_mode == "interpolate") {
      weight_vector = normalizeWeights(weight_vector);
    } else if (m_mode == "instance_weighting") {
      float first_value = weight_vector[0];
      for (size_t i=0; i < m_numModels; i++) {
        weight_vector[i] = weight_vector[i]/first_value;
      }
    }
    cerr << "Weight vector for feature " << iFeature << ": ";
    for (size_t i=0; i < m_numModels; i++) {
      ret[(iFeature*m_numModels)+i] = weight_vector[i];
      cerr << weight_vector[i] << " ";
    }
    cerr << endl;
    delete ObjectiveFunction;
  }

  RemoveAllInColl(optimizerStats);
  return ret;

}
コード例 #11
0
ファイル: boost.cpp プロジェクト: ArkaJU/opencv
    void updateWeightsAndTrim( int treeidx, vector<int>& sidx )
    {
        int i, n = (int)w->sidx.size();
        int nvars = (int)varIdx.size();
        double sumw = 0., C = 1.;
        cv::AutoBuffer<double> buf(n + nvars);
        double* result = buf.data();
        float* sbuf = (float*)(result + n);
        Mat sample(1, nvars, CV_32F, sbuf);
        int predictFlags = bparams.boostType == Boost::DISCRETE ? (PREDICT_MAX_VOTE | RAW_OUTPUT) : PREDICT_SUM;
        predictFlags |= COMPRESSED_INPUT;

        for( i = 0; i < n; i++ )
        {
            w->data->getSample(varIdx, w->sidx[i], sbuf );
            result[i] = predictTrees(Range(treeidx, treeidx+1), sample, predictFlags);
        }

        // now update weights and other parameters for each type of boosting
        if( bparams.boostType == Boost::DISCRETE )
        {
            // Discrete AdaBoost:
            //   weak_eval[i] (=f(x_i)) is in {-1,1}
            //   err = sum(w_i*(f(x_i) != y_i))/sum(w_i)
            //   C = log((1-err)/err)
            //   w_i *= exp(C*(f(x_i) != y_i))
            double err = 0.;

            for( i = 0; i < n; i++ )
            {
                int si = w->sidx[i];
                double wval = w->sample_weights[si];
                sumw += wval;
                err += wval*(result[i] != w->cat_responses[si]);
            }

            if( sumw != 0 )
                err /= sumw;
            C = -log_ratio( err );
            double scale = std::exp(C);

            sumw = 0;
            for( i = 0; i < n; i++ )
            {
                int si = w->sidx[i];
                double wval = w->sample_weights[si];
                if( result[i] != w->cat_responses[si] )
                    wval *= scale;
                sumw += wval;
                w->sample_weights[si] = wval;
            }

            scaleTree(roots[treeidx], C);
        }
        else if( bparams.boostType == Boost::REAL || bparams.boostType == Boost::GENTLE )
        {
            // Real AdaBoost:
            //   weak_eval[i] = f(x_i) = 0.5*log(p(x_i)/(1-p(x_i))), p(x_i)=P(y=1|x_i)
            //   w_i *= exp(-y_i*f(x_i))

            // Gentle AdaBoost:
            //   weak_eval[i] = f(x_i) in [-1,1]
            //   w_i *= exp(-y_i*f(x_i))
            for( i = 0; i < n; i++ )
            {
                int si = w->sidx[i];
                CV_Assert( std::abs(w->ord_responses[si]) == 1 );
                double wval = w->sample_weights[si]*std::exp(-result[i]*w->ord_responses[si]);
                sumw += wval;
                w->sample_weights[si] = wval;
            }
        }
        else if( bparams.boostType == Boost::LOGIT )
        {
            // LogitBoost:
            //   weak_eval[i] = f(x_i) in [-z_max,z_max]
            //   sum_response = F(x_i).
            //   F(x_i) += 0.5*f(x_i)
            //   p(x_i) = exp(F(x_i))/(exp(F(x_i)) + exp(-F(x_i))=1/(1+exp(-2*F(x_i)))
            //   reuse weak_eval: weak_eval[i] <- p(x_i)
            //   w_i = p(x_i)*1(1 - p(x_i))
            //   z_i = ((y_i+1)/2 - p(x_i))/(p(x_i)*(1 - p(x_i)))
            //   store z_i to the data->data_root as the new target responses
            const double lb_weight_thresh = FLT_EPSILON;
            const double lb_z_max = 10.;

            for( i = 0; i < n; i++ )
            {
                int si = w->sidx[i];
                sumResult[i] += 0.5*result[i];
                double p = 1./(1 + std::exp(-2*sumResult[i]));
                double wval = std::max( p*(1 - p), lb_weight_thresh ), z;
                w->sample_weights[si] = wval;
                sumw += wval;
                if( w->ord_responses[si] > 0 )
                {
                    z = 1./p;
                    w->ord_responses[si] = std::min(z, lb_z_max);
                }
                else
                {
                    z = 1./(1-p);
                    w->ord_responses[si] = -std::min(z, lb_z_max);
                }
            }
        }
        else
            CV_Error(CV_StsNotImplemented, "Unknown boosting type");

        /*if( bparams.boostType != Boost::LOGIT )
        {
            double err = 0;
            for( i = 0; i < n; i++ )
            {
                sumResult[i] += result[i]*C;
                if( bparams.boostType != Boost::DISCRETE )
                    err += sumResult[i]*w->ord_responses[w->sidx[i]] < 0;
                else
                    err += sumResult[i]*w->cat_responses[w->sidx[i]] < 0;
            }
            printf("%d trees. C=%.2f, training error=%.1f%%, working set size=%d (out of %d)\n", (int)roots.size(), C, err*100./n, (int)sidx.size(), n);
        }*/

        // renormalize weights
        if( sumw > FLT_EPSILON )
            normalizeWeights();

        if( bparams.weightTrimRate <= 0. || bparams.weightTrimRate >= 1. )
            return;

        for( i = 0; i < n; i++ )
            result[i] = w->sample_weights[w->sidx[i]];
        std::sort(result, result + n);

        // as weight trimming occurs immediately after updating the weights,
        // where they are renormalized, we assume that the weight sum = 1.
        sumw = 1. - bparams.weightTrimRate;

        for( i = 0; i < n; i++ )
        {
            double wval = result[i];
            if( sumw <= 0 )
                break;
            sumw -= wval;
        }

        double threshold = i < n ? result[i] : DBL_MAX;
        sidx.clear();

        for( i = 0; i < n; i++ )
        {
            int si = w->sidx[i];
            if( w->sample_weights[si] >= threshold )
                sidx.push_back(si);
        }
    }
コード例 #12
0
vector<float> PhraseDictionaryMultiModel::MinimizePerplexity(vector<pair<string, string> > &phrase_pair_vector)
{

  map<pair<string, string>, size_t> phrase_pair_map;

  for ( vector<pair<string, string> >::const_iterator iter = phrase_pair_vector.begin(); iter != phrase_pair_vector.end(); ++iter ) {
    phrase_pair_map[*iter] += 1;
  }

  vector<multiModelStatisticsOptimization*> optimizerStats;

  for ( map<pair<string, string>, size_t>::iterator iter = phrase_pair_map.begin(); iter != phrase_pair_map.end(); ++iter ) {

    pair<string, string> phrase_pair = iter->first;
    string source_string = phrase_pair.first;
    string target_string = phrase_pair.second;

    vector<float> fs(m_numModels);
    map<string,multiModelStatistics*>* allStats = new(map<string,multiModelStatistics*>);

    Phrase sourcePhrase(0);
    sourcePhrase.CreateFromString(Input, m_input, source_string, NULL);

    CollectSufficientStatistics(sourcePhrase, allStats); //optimization potential: only call this once per source phrase

    //phrase pair not found; leave cache empty
    if (allStats->find(target_string) == allStats->end()) {
      RemoveAllInMap(*allStats);
      delete allStats;
      continue;
    }

    multiModelStatisticsOptimization* targetStatistics = new multiModelStatisticsOptimization();
    targetStatistics->targetPhrase = new TargetPhrase(*(*allStats)[target_string]->targetPhrase);
    targetStatistics->p = (*allStats)[target_string]->p;
    targetStatistics->f = iter->second;
    optimizerStats.push_back(targetStatistics);

    RemoveAllInMap(*allStats);
    delete allStats;
  }

  Sentence sentence;
  CleanUpAfterSentenceProcessing(sentence); // free memory used by compact phrase tables

  size_t numWeights = m_numScoreComponents;

  vector<float> ret (m_numModels*numWeights);
  for (size_t iFeature=0; iFeature < numWeights; iFeature++) {

    CrossEntropy * ObjectiveFunction = new CrossEntropy(optimizerStats, this, iFeature);

    vector<float> weight_vector = Optimize(ObjectiveFunction, m_numModels);

    if (m_mode == "interpolate") {
      weight_vector = normalizeWeights(weight_vector);
    }

    cerr << "Weight vector for feature " << iFeature << ": ";
    for (size_t i=0; i < m_numModels; i++) {
      ret[(iFeature*m_numModels)+i] = weight_vector[i];
      cerr << weight_vector[i] << " ";
    }
    cerr << endl;
    delete ObjectiveFunction;
  }

  RemoveAllInColl(optimizerStats);
  return ret;

}