bool VariableAccessData::shouldUseDoubleFormatAccordingToVote()
{
    // We don't support this facility for arguments, yet.
    // FIXME: make this work for arguments.
    if (local().isArgument())
        return false;
        
    // If the variable is not a number prediction, then this doesn't
    // make any sense.
    if (!isFullNumberSpeculation(prediction())) {
        // FIXME: we may end up forcing a local in inlined argument position to be a double even
        // if it is sometimes not even numeric, since this never signals the fact that it doesn't
        // want doubles. https://bugs.webkit.org/show_bug.cgi?id=109511
        return false;
    }
        
    // If the variable is predicted to hold only doubles, then it's a
    // no-brainer: it should be formatted as a double.
    if (isDoubleSpeculation(prediction()))
        return true;
        
    // If the variable is known to be used as an integer, then be safe -
    // don't force it to be a double.
    if (flags() & NodeBytecodeUsesAsInt)
        return false;
        
    // If the variable has been voted to become a double, then make it a
    // double.
    if (voteRatio() >= Options::doubleVoteRatioForDoubleFormat())
        return true;
        
    return false;
}
/*!

  Do the filtering and prediction of the measure signal.

  \param z : Measures \f${\bf z}_k\f$ used to initialise the filter. The dimension of
  this vector is equal to the number of signal to filter (given by
  getNumberOfSignal()) multiplied by the size of the measure vector
  (given by getMeasureSize()) .

  \exception vpException::notInitialized : If the filter is not
  initialized. To initialize the filter see initFilter().

*/
void
vpLinearKalmanFilterInstantiation::filter(vpColVector &z)
{
    if (nsignal < 1) {
        vpERROR_TRACE("Bad signal number. You need to initialize the Kalman filter") ;
        throw(vpException(vpException::notInitialized,
                          "Bad signal number")) ;
    }

    // Specific initialization of the filter that depends on the state model
    if (iter == 0) {
        Xest = 0;
        switch (model) {
        case stateConstVel_MeasurePos:
        case stateConstVelWithColoredNoise_MeasureVel:
        case stateConstAccWithColoredNoise_MeasureVel:
            for (unsigned int i=0;  i < size_measure*nsignal ;  i++ ) {
                Xest[size_state*i] = z[i] ;
            }
            prediction();
            //      init_done = true;
            break;
        case unknown:
            vpERROR_TRACE("Kalman state model is not set") ;
            throw(vpException(vpException::notInitialized, "Kalman state model is not set")) ;
            break;
        }
        iter ++;

        return;
    }
    else if (iter == 1) {
        if (model == stateConstVel_MeasurePos) {
            for (unsigned int i=0;  i < size_measure*nsignal ;  i++ ) {
                double z_prev = Xest[size_state*i]; // Previous mesured position
                //	std::cout << "Mesure pre: " << z_prev << std::endl;
                Xest[size_state*i]   = z[i] ;
                Xest[size_state*i+1] = (z[i] - z_prev) / dt ;
            }
            prediction();
            iter ++;

            return;
        }
    }

    filtering(z);
    prediction();

}
int main(int argc, char* argv[]) {

	store_map();
	init_position(0);
	display();

	printf("prediction\n");
	prediction(1);// we move from one meter to the right
	display();
	printf("estimation\n");
	estimation(wall,wall);
	normalization();
	display();
	  
	printf("prediction\n");
	prediction(1);// we move from one meter to the right
	display();
	printf("estimation\n");
	estimation(wall,wall);
	normalization();
	display();
	
	printf("prediction\n");
	prediction(1);// we move from one meter to the right
	display();
	printf("estimation\n");
	estimation(wall,wall);
	normalization();
	display();
	
	printf("prediction\n");
	prediction(1);// we move from one meter to the right
	display();
	printf("estimation\n");
	estimation(wall,wall);
	normalization();
	display();
	
	printf("prediction\n");
	prediction(1);// we move from one meter to the right
	display();
	printf("estimation\n");
	estimation(wall,wall);
	normalization();
	display();
	
	return 0;

}// main
void NavEstimator::imuCallback(const sensor_msgs::Imu &msg)
{
    ros::Time now = ros::Time::now();
    float Ts = (float)(now.sec - old.sec) + (float)(((int32_t)now.nsec - (int32_t)old.nsec)/1e9);
    old = now;

    if(Ts > 0.5)
        ROS_WARN_STREAM("estimation time step was to big: " << Ts);
    else
    {
        alpha = exp(-lpf_a_*Ts);

        phat = alpha*phat + (1-alpha)*msg.angular_velocity.x;
        qhat = alpha*qhat + (1-alpha)*msg.angular_velocity.y;
        rhat = alpha*rhat + (1-alpha)*msg.angular_velocity.z;

        double phi, theta, psi;
        tf::Quaternion q(msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w);
        tf::Matrix3x3 m(q);
        m.getRPY(phi, theta, psi);
        phihat = phi;
        thetahat = theta;
        psihat = psi;

        prediction(Ts);

        if(gps_initialized_)
            publish();
    }
}
示例#5
0
SEXP CatBoostPrepareEval_R(SEXP approxParam, SEXP typeParam, SEXP columnCountParam, SEXP threadCountParam) {
    SEXP result = NULL;
    R_API_BEGIN();
    SEXP dataDim = getAttrib(approxParam, R_DimSymbol);
    size_t dataRows = static_cast<size_t>(INTEGER(dataDim)[0]) / asInteger(columnCountParam);
    TVector<TVector<double>> prediction(asInteger(columnCountParam), TVector<double>(dataRows));
    for (size_t i = 0, k = 0; i < dataRows; ++i) {
        for (size_t j = 0; j < prediction.size(); ++j) {
            prediction[j][i] = static_cast<double>(REAL(approxParam)[k++]);
        }
    }

    NPar::TLocalExecutor executor;
    executor.RunAdditionalThreads(asInteger(threadCountParam) - 1);
    EPredictionType predictionType;
    CB_ENSURE(TryFromString<EPredictionType>(CHAR(asChar(typeParam)), predictionType),
              "unsupported prediction type: 'Probability', 'Class' or 'RawFormulaVal' was expected");
    prediction = PrepareEval(predictionType, prediction, &executor);

    size_t predictionSize = prediction.size() * dataRows;
    result = PROTECT(allocVector(REALSXP, predictionSize));
    for (size_t i = 0, k = 0; i < dataRows; ++i) {
        for (size_t j = 0; j < prediction.size(); ++j) {
            REAL(result)[k++] = prediction[j][i];
        }
    }
    R_API_END();
    UNPROTECT(1);
    return result;
}
示例#6
0
double* Perceptron::getPrediction(const Instance& instance) {
	if (!init) {
		init = true;
		numberClasses = instance.getNumberClasses();
		numberInputAttributes = instance.getNumberInputAttributes();
		weightAttributes.resize(numberClasses);
		classPrediction = new double[numberClasses];
		initWeightAttributes();
		instancesSeen = 0;
		mLearningRatio = 1.0;

		for (int i = 0; i < numberClasses; i++) {
			classPrediction[i] = 0.0;
		}
		return classPrediction;
	}

	vector<double> value(numberInputAttributes);
	for (int i = 0; i < numberInputAttributes; i++) {
		value[i] = instance.getInputAttributeValue(i);
	}
	int actualClass = (int) instance.getLabel();
	for (int i = 0; i < numberClasses; i++) {
		classPrediction[i] = prediction(actualClass, value, i);
	}
	return classPrediction;
}
/**
 * Predict class and return confidence
 */
pair<int, float> MondrianForest::predict_class_confident(Sample& sample) {
    pair<int, float> prediction (0, 0.0);
    
    /* Distance value that influence prediction */
    /* Init confidence values */
    mondrian_confidence m_conf;

    /* Go through all trees and calculate probability */
    arma::fvec pred_prob = predict_probability(sample, m_conf);

    int pred_class = -1;  /* Predicted class of Mondrian forest */
    float tmp_value = 0.;
    for (int i = 0; i < int(pred_prob.size()); i++) {
        if (pred_prob[i] > tmp_value) {
            tmp_value = pred_prob[i];
            pred_class = i;
        }
    }
    prediction.first = pred_class;

    /* Calculate confidence */
    float confidence = confidence_prediction(pred_prob, m_conf);
    prediction.second = confidence;
    return prediction;
}
示例#8
0
double BackProp::measureAccuracy(Matrix& validationSet, Matrix& validationSetLabels, bool showNetwork)
{
	if(showNetwork)
	{
		std::cout << "Network... " << std::endl;
		BackPropLayer* itr = &_layers[0];
		while(itr != NULL)
		{
			std::cout << itr->toString() << std::endl;
			itr = itr->getNextLayer();
		}
	}

	double nRight = 0;
	double total = validationSet.rows();
	std::vector<double> prediction(1);
	for(size_t i = 0; i < validationSet.rows(); i++)
	{
		double actualLabel = validationSetLabels.row(i)[0];
		predict(validationSet.row(i), prediction);

		if(actualLabel == prediction[0])
			nRight += 1;
	}

	double percentRight = nRight / total;
	return percentRight;
}
fmat ModelCircle2D::ffun(fmat *current){

    fmat pNoiseSample = pNoise.sample(current->n_cols);

    fmat prediction = pNoiseSample;
    float phi = 0;

    for (unsigned int i = 0; i < current->n_cols; ++i)
    {
        phi = std::atan2(current->at(1,i), current->at(0,i));
        phi += phiStep;
        prediction(0,i) += radius * std::cos(phi);
        prediction(1,i) += radius * std::sin(phi);
    }

    return prediction;
}
示例#10
0
cv::Mat kalmanPredict(cv::KalmanFilter &kf, cv::Mat control) //prediction = kf.predict(control);
{
	cv::Mat prediction(6, 1, CV_32F);
	prediction = kf.predict(control);
	//prediction = kf.predict();
	kf.statePre.copyTo(kf.statePost);
	kf.errorCovPre.copyTo(kf.errorCovPost);
	return prediction;
}
    // TODO needs some love
    void run() {
        while(ros::ok()) {
            current_time = ros::Time::now();
            double dt = (current_time - last_time).toSec();
            // TODO these predicst and measure variables are not something that I understand
            if (predict) {
                prediction(dt);
                last_time = current_time;
                predict = false;

                if (measure) {
                    update_estimated_measurement();
                    weighting();
                    measure = false;
                }

                Normalization();
                resample();
            }

            if (visualization_enabled) {
                if (publish_particles) {
                    particles_pub.publish(loc_viz.get_particle_marker(particle_state));
                }
                if (publish_robot) {
                    robot_pub.publish(
                        loc_viz.get_robot_marker(std::vector<ras::sensor_info>(), x, y, theta, visualization_use_distance));
                }
                if (publish_thickened_walls) {
                    wall_cell_pub.publish(loc_viz.get_thick_wall_grid_cells());
                }
                if (publish_walls) {
                    grid_cell_pub.publish(loc_viz.get_wall_grid_cells());
                }
                if (publish_path) {
                    point_path_pub.publish(loc_viz.add_to_path(x, y));
                }
            }

            //Publish Geometry msg of Predicted postion
            geometry_msgs::Pose2D msg;
            msg.x = x;
            msg.y = y;
            msg.theta = theta;
            position_pub.publish(msg);

            //Use if add a subscription(Add as good measure)
            ros::spinOnce();
            //Delays untill it is time to send another message
            rate->sleep();
            last_time = current_time;
        }
    }
示例#12
0
static void 
xsens_mti_message_handler(carmen_xsens_global_quat_message *xsens_mti)
{
	if (!xsens_handler.initial_state_initialized)
	{
		initialize_states(xsens_mti);
		return;
	}

	// Must check if timestamp has changed due to log file jump on log playback

	if (reinitialized_gps)//if (check_time_difference(xsens_mti->timestamp))
	{
		carmen_fused_odometry_initialize(fused_odometry_parameters);
		initialize_states(xsens_mti);
		reinitialized_gps = 0;

		return;
	}

	xsens_sensor_vector_index = (xsens_sensor_vector_index + 1) % XSENS_SENSOR_VECTOR_SIZE;
	if (xsens_sensor_vector[xsens_sensor_vector_index] != NULL)
		destroy_sensor_vector_xsens_xyz(xsens_sensor_vector[xsens_sensor_vector_index]);

	sensor_vector_xsens_xyz *sensor_vector = create_sensor_vector_xsens_mti(xsens_mti, &xsens_handler.gps_xyz);
	xsens_sensor_vector[xsens_sensor_vector_index] = sensor_vector;
	
	set_best_yaw_estimate(xsens_sensor_vector, xsens_sensor_vector_index);

	xsens_handler.orientation = sensor_vector->orientation; 	// Used by create_car_odometry_control()
	xsens_handler.ang_velocity = sensor_vector->ang_velocity; 	// Used by create_car_odometry_control()

	if (!kalman_filter)
	{
		if (xsens_handler.gps_performance_changed)
		{
			xsens_handler.gps_performance_degradation = 50.0;
			xsens_handler.gps_performance_changed = 0;
		}
		if (xsens_handler.gps_orientation_performance_changed)
		{
			xsens_handler.gps_orientation_performance_degradation = 40.0;
			xsens_handler.gps_orientation_performance_changed = 0;
		}
		prediction(sensor_vector->timestamp, fused_odometry_parameters);
		correction(measure_weight_orientation, sensor_vector, fused_odometry_parameters);
		publish_fused_odometry();
		xsens_handler.gps_performance_degradation *= 0.98; // fator de decaimento
		xsens_handler.gps_orientation_performance_degradation *= 0.98; // fator de decaimento
	}
	xsens_handler.last_xsens_message_timestamp = xsens_mti->timestamp;
}
示例#13
0
MoveParameters LinearMoveEstimator::predict() {
  MoveParameters prediction(0, 0, 0, 0, 0, 0);
  if(last_measurements.size() != 0) {
    float factor = 0;
    float weight = 1;
    for(boost::circular_buffer<MoveParameters>::iterator m = last_measurements.begin();
            m < last_measurements.end(); m++, weight += 1.0) {
      prediction += (*m) * weight;
      factor += weight;
    }
    prediction /= factor;
  }
  return prediction;
}
示例#14
0
int accuracy_one()
{
    //this function calculates theaccuracy of method 1
    int count=0,i;
    float acc;
    int n1,n2;
    char fname[16];
    for(i=0; i<SIZE; i++)
    {
        sprintf(fname,"data/%d.txt",i);
        n1=prediction(fname);
        n2=digitrec(fname);
        if(n1==n2)
            count++;
    }
    return count;
}
示例#15
0
  double ConditionalBayesProcess::negativeCrossValidation()
  {
    // This is highly ineffient implementation for comparison purposes.
    Dataset data(mData);

    size_t n = data.getNSamples();
    double sum = 0.0;

    matrixd tempF(mMean.mFeatM);


    // We take the first element, use it for validation and then paste
    // it at the end. Thus, after every iteration, the first element
    // is different and, at the end, all the elements should have
    // rotated.
    for(size_t i = 0; i<n; ++i)
      {
	// Take the first element
	const double y = data.getSampleY(0);
	const vectord x = data.getSampleX(0);

	// Remove it for cross validation
	data.mX.erase(data.mX.begin()); 
	utils::erase(data.mY,data.mY.begin());
	utils::erase_column(mMean.mFeatM,0);

	// Compute the cross validation
	computeCholeskyCorrelation();
	precomputePrediction(); 
	ProbabilityDistribution* pd = prediction(x);
	sum += std::log(pd->pdf(y));

	//Paste it back at the end
	data.addSample(x,y);
	mMean.mFeatM.resize(mMean.mFeatM.size1(),mMean.mFeatM.size2()+1);  
	mMean.mFeatM = tempF;
      }
    std::cout << "End" << data.getNSamples();
    return -sum;   //Because we are minimizing.
  }
示例#16
0
文件: state4d.cpp 项目: CRAVA/crava
std::vector<FFTGrid*>
State4D::doRockPhysicsInversion(TimeLine                               & time_line,
                                const std::vector<DistributionsRock *>   rock_distributions,
                                TimeEvolution                          & timeEvolution)
{
  LogKit::WriteHeader("Start 4D rock physics inversion");
  bool debug=true; // triggers printouts
  // inverse transform posterior
  iFFT();

  // Note rockSample contains rock sample for all time steps.
  int nSim=10000; // NBNB OK 100000->10000 for speed during debug

  LogKit::LogFormatted(LogKit::Low,"\nSampling rock physics distribution...");
  std::vector<std::vector<std::vector<double> > > rockSample = timeEvolution.returnCorrelatedSample(nSim,time_line, rock_distributions);
  LogKit::LogFormatted(LogKit::Low,"done\n\n");

  int nTimeSteps = static_cast<int>(rockSample.size());
  //nSim=rockSample[0].size();
  int nParam = static_cast<int>(rockSample[0][0].size());
  int nM = 3;  // number of seismic parameters.
  int nRockProperties = nParam-nM; // number of rock parameters



  //write rockSamples to check ok
  if(debug)
  {
    NRLib::Matrix rockSamples(nSim,nParam*nTimeSteps);

    for(int i = 0;i<nSim;i++)
      for(int j=0;j < nTimeSteps;j++ )
        for(int k=0;k<nParam;k++)
        {
          int ind=k+j*nParam;
          rockSamples(i,ind)=rockSample[j][i][k];
        }

    NRLib::WriteMatrixToFile("rockSample.dat", rockSamples);
  }

  std::vector<std::vector<double> > mSamp = makeSeismicParamsFromrockSample(rockSample);

  //write seismic parameters to check ok
  if(debug)
  {
    NRLib::Matrix seisPar(nSim,6);

    for(int i = 0;i<nSim;i++)
      for(int j=0;j < 6;j++ )
        {
          seisPar(i,j)=mSamp[j][i];
        }

    NRLib::WriteMatrixToFile("seisParSample.dat", seisPar);
  }


  NRLib::Vector fullPriorMean    = timeEvolution.computePriorMeanStaticAndDynamicLastTimeStep();
  NRLib::Matrix fullPriorCov     = timeEvolution.computePriorCovStaticAndDynamicLastTimeStep();
  NRLib::Matrix fullPosteriorCov = GetFullCov();


  if(debug)
  {
    NRLib::WriteMatrixToFile("priorCov.dat", fullPriorCov );
    NRLib::WriteMatrixToFile("posteriorCov.dat", fullPosteriorCov);
    NRLib::WriteVectorToFile("priorMean.dat",fullPriorMean );
  }

  LogKit::LogFormatted(LogKit::Low,"\nMaking rock-physics lookup tables, table 1 of %d\n",nRockProperties+1);

  RockPhysicsInversion4D* rockPhysicsInv = new RockPhysicsInversion4D(fullPriorMean,fullPriorCov,fullPosteriorCov,mSamp);
  //LogKit::LogFormatted(LogKit::Low,"done\n\n");

  std::vector<FFTGrid*> prediction(nRockProperties);

  LogKit::LogFormatted(LogKit::Low,"\nDoing rock-physics predictions...");

  for(int i=0;i<nRockProperties;i++)
  {
    LogKit::LogFormatted(LogKit::Low,"\nMaking rock-physics lookup tables, table %d of %d\n",i+2,nRockProperties+1);
    std::vector<double> rSamp = getRockPropertiesFromRockSample(rockSample,i);
    rockPhysicsInv->makeNewPredictionTable(mSamp,rSamp);
    prediction[i] = rockPhysicsInv->makePredictions(mu_static_, mu_dynamic_ );
  }
  LogKit::LogFormatted(LogKit::Low,"done\n\n");

  delete rockPhysicsInv;

  return prediction;
}
示例#17
0
文件: ltp.c 项目: Arcen/faac
static int pitch(double *sb_samples, double *x_buffer, int flen, int lag0, int lag1,
          double *predicted_samples, double *gain, int *cb_idx)
{
    int i, j, delay;
    double corr1, corr2, lag_corr;
    double p_max, energy, lag_energy;

    /*
     * Below is a figure illustrating how the lag and the
     * samples in the buffer relate to each other.
     *
     * ------------------------------------------------------------------
     * |              |               |                |                 |
     * |    slot 1    |      2        |       3        |       4         |
     * |              |               |                |                 |
     * ------------------------------------------------------------------
     *
     * lag = 0 refers to the end of slot 4 and lag = DELAY refers to the end
     * of slot 2. The start of the predicted frame is then obtained by
     * adding the length of the frame to the lag. Remember that slot 4 doesn't
     * actually exist, since it is always filled with zeros.
     *
     * The above short explanation was for long blocks. For short blocks the
     * zero lag doesn't refer to the end of slot 4 but to the start of slot
     * 4 - the frame length of a short block.
     *
     * Some extra code is then needed to handle those lag values that refer
     * to slot 4.
     */

    p_max = 0.0;
    lag_corr = lag_energy = 0.0;
    delay = lag0;


	for (i = lag0; i<lag1; i++)
	{
		energy	= 0.0;
		corr1	= 0.0;
		for (j=0; j < flen; j++)
		{
			if (j < i+BLOCK_LEN_LONG)
			{
				corr1  += sb_samples[j] * _MDCT_SCALE * x_buffer[NOK_LT_BLEN - flen/2 - i + j];
				energy += _MDCT_SCALE * x_buffer[NOK_LT_BLEN - flen/2 - i + j] * _MDCT_SCALE * x_buffer[NOK_LT_BLEN - flen/2 - i + j];
			}
		}
        if (energy != 0.0)
            corr2 = corr1 / sqrt(energy);
        else
            corr2 = 0.0;
		
        if (p_max < corr2)
        {
            p_max = corr2;
            delay = i;
            lag_corr = corr1;
            lag_energy = energy;
        }
	}		
    /* Compute the gain. */
    if(lag_energy != 0.0)
        *gain =  lag_corr / (1.010 * lag_energy);
    else
        *gain = 0.0;

    /* Quantize the gain. */
    w_quantize(gain, cb_idx);
//  printf("Delay: %d, Coeff: %f", delay, *gain);
	
    /* Get the predicted signal. */
    prediction(x_buffer, predicted_samples, gain, delay, flen);

	
    return (delay);
}
示例#18
0
LabelImage RandomForestImage::predict(const RGBDImage& image,
         cuv::ndarray<float, cuv::host_memory_space>* probabilities, const bool onGPU, bool useDepthImages) const {

    LabelImage prediction(image.getWidth(), image.getHeight());

    const LabelType numClasses = getNumClasses();

    if (treeData.size() != ensemble.size()) {
        throw std::runtime_error((boost::format("tree data size: %d, ensemble size: %d. histograms normalized?")
                % treeData.size() % ensemble.size()).str());
    }

    cuv::ndarray<float, cuv::host_memory_space> hostProbabilities(
            cuv::extents[numClasses][image.getHeight()][image.getWidth()],
            m_predictionAllocator);

    if (onGPU) {
        cuv::ndarray<float, cuv::dev_memory_space> deviceProbabilities(
                cuv::extents[numClasses][image.getHeight()][image.getWidth()],
                m_predictionAllocator);
        cudaSafeCall(cudaMemset(deviceProbabilities.ptr(), 0, static_cast<size_t>(deviceProbabilities.size() * sizeof(float))));

        {
            utils::Profile profile("classifyImagesGPU");
            for (const boost::shared_ptr<const TreeNodes>& data : treeData) {
                classifyImage(treeData.size(), deviceProbabilities, image, numClasses, data, useDepthImages);
            }
        }

        normalizeProbabilities(deviceProbabilities);

        cuv::ndarray<LabelType, cuv::dev_memory_space> output(image.getHeight(), image.getWidth(),
                m_predictionAllocator);
        determineMaxProbabilities(deviceProbabilities, output);

        hostProbabilities = deviceProbabilities;
        cuv::ndarray<LabelType, cuv::host_memory_space> outputHost(image.getHeight(), image.getWidth(),
                m_predictionAllocator);

        outputHost = output;

        {
            utils::Profile profile("setLabels");
            for (int y = 0; y < image.getHeight(); ++y) {
                for (int x = 0; x < image.getWidth(); ++x) {
                    prediction.setLabel(x, y, static_cast<LabelType>(outputHost(y, x)));
                }
            }
        }
    } else {
        utils::Profile profile("classifyImagesCPU");

        tbb::parallel_for(tbb::blocked_range<size_t>(0, image.getHeight()),
                [&](const tbb::blocked_range<size_t>& range) {
                    for(size_t y = range.begin(); y != range.end(); y++) {
                        for(int x=0; x < image.getWidth(); x++) {

                            for (LabelType label = 0; label < numClasses; label++) {
                                hostProbabilities(label, y, x) = 0.0f;
                            }

                            for (const auto& tree : ensemble) {
                                const auto& t = tree->getTree();
                                PixelInstance pixel(&image, 0, x, y);
                                const auto& hist = t->classifySoft(pixel);
                                assert(hist.size() == numClasses);
                                for(LabelType label = 0; label<hist.size(); label++) {
                                    hostProbabilities(label, y, x) += hist[label];
                                }
                            }

                            double sum = 0.0f;
                            for (LabelType label = 0; label < numClasses; label++) {
                                sum += hostProbabilities(label, y, x);
                            }
                            float bestProb = -1.0f;
                            for (LabelType label = 0; label < numClasses; label++) {
                                hostProbabilities(label, y, x) /= sum;
                                float prob = hostProbabilities(label, y, x);
                                if (prob > bestProb) {
                                    prediction.setLabel(x, y, label);
                                    bestProb = prob;
                                }
                            }
                        }
                    }
                });
    }

    if (probabilities) {
        *probabilities = hostProbabilities;
    }

    return prediction;
}
fmat ModelWPAMGPU::ffun(fmat *current)
{
    fmat prediction(current->n_rows,current->n_cols);
    fmat pNoiseSample = pNoise.sample(current->n_cols);
    fmat u = U.sample(current->n_cols);
    float* lastState_dev;
    float* F_dev;
	float* U_dev;
	float* pNoise_dev;
    int stateDimension = current->n_rows;
    int numberOfSamples = current->n_cols;
    float* newState_dev;

	//allocate memory on gpu
    cudaMalloc( &lastState_dev, (size_t) current->n_elem * sizeof(float)) ;
	cudaMalloc( &F_dev, (size_t) F.n_elem * sizeof(float)) ;
	cudaMalloc( &U_dev, (size_t) u.n_elem * sizeof(float)) ;
	cudaMalloc( &pNoise_dev, (size_t) pNoiseSample.n_elem * sizeof(float)) ;
	cudaMalloc( &newState_dev, (size_t) prediction.n_elem * sizeof(float)) ;

	//Copy particles and weights to the gpu
    cudaMemcpy(lastState_dev,current->memptr(),(size_t) current->n_elem * sizeof(float), cudaMemcpyHostToDevice);
	cudaMemcpy(F_dev,F.memptr(),(size_t) F.n_elem * sizeof(float), cudaMemcpyHostToDevice);
    //cudaMemcpy(U_dev,u.memptr(),(size_t) u.n_elem * sizeof(float), cudaMemcpyHostToDevice);
    //cudaMemcpy(pNoise_dev,pNoiseSample.memptr(),(size_t) pNoiseSample.n_elem * sizeof(float), cudaMemcpyHostToDevice);

    //pNoise
    curandGenerateNormal(gen, pNoise_dev, numberOfSamples, 0.0f, 50.0e-6f);
    curandGenerateNormal(gen, pNoise_dev+numberOfSamples, numberOfSamples, 0.0f, 50.0e-6f);
    curandGenerateNormal(gen, pNoise_dev+2*numberOfSamples, numberOfSamples, 0.0f, 50.0e-6f);
    curandGenerateNormal(gen, pNoise_dev+3*numberOfSamples, numberOfSamples, 0.0f, 10.0e-6f);
    curandGenerateNormal(gen, pNoise_dev+4*numberOfSamples, numberOfSamples, 0.0f, 10.0e-6f);
    curandGenerateNormal(gen, pNoise_dev+5*numberOfSamples, numberOfSamples, 0.0f, 10.0e-6f);
    curandGenerateNormal(gen, pNoise_dev+6*numberOfSamples, numberOfSamples, 0.0f, 100.0e-6f);
    curandGenerateNormal(gen, pNoise_dev+7*numberOfSamples, numberOfSamples, 0.0f, 100.0e-6f);
    curandGenerateNormal(gen, pNoise_dev+8*numberOfSamples, numberOfSamples, 0.0f, 100.0e-6f);

    // U
    U.batch.at(0);
    for (unsigned int i=0; i< 9 ;++i)
    {
        curandGenerateNormal(gen, U_dev+ i*numberOfSamples, numberOfSamples, U.batch.at(i)->a, U.batch.at(i)->b);
    }
    /*curandGenerateNormal(gen, oNoise_dev, numberOfSamples, 0.0f, 50.0e-6f);
    curandGenerateNormal(gen, oNoise_dev+numberOfSamples, numberOfSamples, 0.0f, 50.0e-6f);
    curandGenerateNormal(gen, oNoise_dev+2*numberOfSamples, numberOfSamples, 0.0f, 50.0e-6f);
    curandGenerateNormal(gen, oNoise_dev+3*numberOfSamples, numberOfSamples, 0.0f, 10.0e-6f);
    curandGenerateNormal(gen, oNoise_dev+4*numberOfSamples, numberOfSamples, 0.0f, 10.0e-6f);
    curandGenerateNormal(gen, oNoise_dev+5*numberOfSamples, numberOfSamples, 0.0f, 10.0e-6f);
    curandGenerateNormal(gen, oNoise_dev+6*numberOfSamples, numberOfSamples, 0.0f, 100.0e-6f);
    curandGenerateNormal(gen, oNoise_dev+7*numberOfSamples, numberOfSamples, 0.0f, 100.0e-6f);
    curandGenerateNormal(gen, oNoise_dev+8*numberOfSamples, numberOfSamples, 0.0f, 100.0e-6f);*/

    //prediction = F * current + pNoiseSample + u ;
    callFfunKernel(lastState_dev, F_dev, U_dev, pNoise_dev, stateDimension ,numberOfSamples,newState_dev);
    //printf("%s\n",cudaGetErrorString(cudaGetLastError()));

	//get estimation from gpu
    cudaMemcpy(prediction.memptr(),newState_dev,current->n_elem * sizeof(float), cudaMemcpyDeviceToHost);

	// clean up the graphics card
    cudaFree(lastState_dev);
	cudaFree(newState_dev);
	cudaFree(F_dev);
	cudaFree(U_dev);
	cudaFree(pNoise_dev);

    return prediction;
}
示例#20
0
int main() {
	std::mt19937 generator(time(nullptr));

	sys::ComputeSystem cs;

	cs.create(sys::ComputeSystem::_gpu);

	sys::ComputeProgram prog;

	prog.loadFromFile("resources/neoKernels.cl", cs);

	// --------------------------- Create the Sparse Coder ---------------------------

	cl::Image2D inputImage = cl::Image2D(cs.getContext(), CL_MEM_READ_WRITE, cl::ImageFormat(CL_R, CL_FLOAT), 64, 64);

	std::ifstream fromFile("resources/train-images.idx3-ubyte", std::ios::binary | std::ios::in);

	if (!fromFile.is_open()) {
		std::cerr << "Could not open train-images.idx3-ubyte!" << std::endl;

		return 1;
	}

	std::vector<neo::PredictiveHierarchy::LayerDesc> layerDescs(4);

	layerDescs[0]._size = { 64, 64 };
	layerDescs[0]._feedForwardRadius = 8;

	layerDescs[1]._size = { 48, 48 };

	layerDescs[2]._size = { 32, 32 };

	layerDescs[3]._size = { 24, 24 };

	neo::PredictiveHierarchy ph;

	ph.createRandom(cs, prog, { 64, 64 }, layerDescs, { -0.01f, 0.01f }, { 0.01f, 0.05f }, 0.1f, generator);

	float avgError = 1.0f;

	float avgErrorDecay = 0.1f;

	sf::RenderWindow window;

	window.create(sf::VideoMode(1024, 512), "MNIST Video Test");

	vis::Plot plot;

	plot._curves.push_back(vis::Curve());

	plot._curves[0]._name = "Squared Error";

	std::uniform_int_distribution<int> digitDist(0, 59999);
	std::uniform_real<float> dist01(0.0f, 1.0f);

	sf::RenderTexture rt;

	rt.create(64, 64);

	sf::Image digit0;
	sf::Texture digit0Tex;
	sf::Image digit1;
	sf::Texture digit1Tex;

	sf::Image pred;
	sf::Texture predTex;

	digit0.create(28, 28);
	digit1.create(28, 28);

	pred.create(rt.getSize().x, rt.getSize().y);

	const float boundingSize = (64 - 28) / 2;
	const float center = 32;
	const float minimum = center - boundingSize;
	const float maximum = center + boundingSize;

	float avgError2 = 1.0f;

	const float avgError2Decay = 0.01f;

	std::vector<float> prediction(64 * 64, 0.0f);

	for (int iter = 0; iter < 10000; iter++) {
		// Select digit indices
		int d0 = digitDist(generator);
		int d1 = digitDist(generator);

		// Load digits
		Image img0, img1;

		loadMNISTimage(fromFile, d0, img0);
		loadMNISTimage(fromFile, d1, img1);

		for (int x = 0; x < digit0.getSize().x; x++)
			for (int y = 0; y < digit0.getSize().y; y++) {
				int index = x + y * digit0.getSize().x;

				sf::Color c = sf::Color::White;

				c.a = img0._intensities[index];

				digit0.setPixel(x, y, c);
			}

		digit0Tex.loadFromImage(digit0);

		for (int x = 0; x < digit1.getSize().x; x++)
			for (int y = 0; y < digit1.getSize().y; y++) {
				int index = x + y * digit1.getSize().x;

				sf::Color c = sf::Color::White;

				c.a = img1._intensities[index];

				digit1.setPixel(x, y, c);
			}

		digit1Tex.loadFromImage(digit1);

		sf::Vector2f vel0(dist01(generator) * 2.0f - 1.0f, dist01(generator) * 2.0f - 1.0f);
		sf::Vector2f vel1(dist01(generator) * 2.0f - 1.0f, dist01(generator) * 2.0f - 1.0f);

		sf::Vector2f pos0(dist01(generator) * (maximum - minimum) + minimum, dist01(generator) * (maximum - minimum) + minimum);
		sf::Vector2f pos1(dist01(generator) * (maximum - minimum) + minimum, dist01(generator) * (maximum - minimum) + minimum);

		float vel0mul = dist01(generator) * 6.0f / std::max(1.0f, std::sqrt(vel0.x * vel0.x + vel0.y + vel0.y));

		vel0 *= vel0mul;

		float vel1mul = dist01(generator) * 6.0f / std::max(1.0f, std::sqrt(vel1.x * vel1.x + vel1.y + vel1.y));

		vel1 *= vel1mul;

		// Render video
		for (int f = 0; f < 20; f++) {
			sf::Event windowEvent;

			while (window.pollEvent(windowEvent)) {
				switch (windowEvent.type) {
				case sf::Event::Closed:
					return 0;
				}
			}

			pos0 += vel0;

			pos1 += vel1;

			if (pos0.x < minimum) {
				pos0.x = minimum;

				vel0.x *= -1.0f;
			}
			else if (pos0.x > maximum) {
				pos0.x = maximum;

				vel0.x *= -1.0f;
			}

			if (pos0.y < minimum) {
				pos0.y = minimum;

				vel0.y *= -1.0f;
			}
			else if (pos0.y > maximum) {
				pos0.y = maximum;

				vel0.y *= -1.0f;
			}

			if (pos1.x < minimum) {
				pos1.x = minimum;

				vel1.x *= -1.0f;
			}
			else if (pos1.x > maximum) {
				pos1.x = maximum;

				vel1.x *= -1.0f;
			}

			if (pos1.y < minimum) {
				pos1.y = minimum;

				vel1.y *= -1.0f;
			}
			else if (pos1.y > maximum) {
				pos1.y = maximum;

				vel1.y *= -1.0f;
			}

			window.clear();
			rt.clear(sf::Color::Black);

			sf::Sprite s0;

			s0.setTexture(digit0Tex);

			s0.setOrigin(28 / 2, 28 / 2);

			s0.setPosition(pos0);

			rt.draw(s0);

			sf::Sprite s1;

			s1.setTexture(digit1Tex);

			s1.setOrigin(28 / 2, 28 / 2);

			s1.setPosition(pos1);

			rt.draw(s1);

			rt.display();

			// Get input image
			sf::Image res = rt.getTexture().copyToImage();

			// Show RT
			const float scale = 4.0f;

			sf::Sprite s;

			s.setScale(scale, scale);

			s.setTexture(rt.getTexture());

			window.draw(s);

			std::vector<float> input(64 * 64);

			// Train
			if (sf::Keyboard::isKeyPressed(sf::Keyboard::T)) {
				for (int x = 0; x < res.getSize().x; x++)
					for (int y = 0; y < res.getSize().y; y++) {
						input[x + y * 64] = prediction[x + y * 64];
					}
			}
			else {
				const float predictionIncorporateRatio = 0.1f;

				for (int x = 0; x < res.getSize().x; x++)
					for (int y = 0; y < res.getSize().y; y++) {
						input[x + y * 64] = (1.0f - predictionIncorporateRatio) * res.getPixel(x, y).r / 255.0f + predictionIncorporateRatio * prediction[x + y * 64];
					}
			}

			// Error
			float error = 0.0f;

			for (int x = 0; x < res.getSize().x; x++)
				for (int y = 0; y < res.getSize().y; y++) {
					error += std::pow(res.getPixel(x, y).r / 255.0f - prediction[x + y * 64], 2);
				}

			error /= res.getSize().x * res.getSize().y;

			avgError2 = (1.0f - avgError2Decay) * avgError2 + avgError2Decay * error;

			std::cout << "Squared Error: " << avgError2 << std::endl;

			cs.getQueue().enqueueWriteImage(inputImage, CL_TRUE, { 0, 0, 0 }, { 64, 64, 1 }, 0, 0, input.data());

			ph.simStep(cs, inputImage);

			cs.getQueue().enqueueReadImage(ph.getPrediction(), CL_TRUE, { 0, 0, 0 }, { 64, 64, 1 }, 0, 0, prediction.data());

			// Show prediction
			for (int x = 0; x < rt.getSize().x; x++)
				for (int y = 0; y < rt.getSize().y; y++) {
					sf::Color c = sf::Color::White;

					c.r = c.b = c.g = std::min(1.0f, std::max(0.0f, prediction[x + y * 64])) * 255.0f;

					pred.setPixel(x, y, c);
				}

			predTex.loadFromImage(pred);

			sf::Sprite sp;

			sp.setTexture(predTex);

			sp.setScale(scale, scale);

			sp.setPosition(window.getSize().x - scale * rt.getSize().x, 0);

			window.draw(sp);

			/*sf::Image sdr;

			sdr.create(prsdr.getLayerDescs().front()._width, prsdr.getLayerDescs().front()._height);

			for (int x = 0; x < sdr.getSize().x; x++)
				for (int y = 0; y < sdr.getSize().y; y++) {
					sf::Color c = sf::Color::White;

					c.r = c.g = c.b = prsdr.getLayers().front()._sdr.getHiddenState(x, y) * 255.0f;

					sdr.setPixel(x, y, c);
				}

			sf::Texture sdrTex;

			sdrTex.loadFromImage(sdr);

			sf::Sprite sdrS;

			sdrS.setTexture(sdrTex);

			sdrS.setPosition(0.0f, window.getSize().y - sdrTex.getSize().y * scale);

			sdrS.setScale(scale, scale);

			window.draw(sdrS);*/

			window.display();
	
			if (sf::Keyboard::isKeyPressed(sf::Keyboard::Escape))
				return 0;
		}
	}

	/*sf::RenderTexture rt;

	rt.create(1024, 1024);

	sf::Texture lineGradientTexture;

	lineGradientTexture.loadFromFile("resources/lineGradient.png");

	sf::Font tickFont;

	tickFont.loadFromFile("resources/arial.ttf");

	plot.draw(rt, lineGradientTexture, tickFont, 1.0f, sf::Vector2f(0.0f, step), sf::Vector2f(0.0f, 1.0f), sf::Vector2f(128.0f, 128.0f), sf::Vector2f(500.0f, 0.1f), 2.0f, 3.0f, 1.5f, 3.0f, 20.0f, 6);

	rt.display();

	rt.getTexture().copyToImage().saveToFile("plot.png");*/

	return 0;
}
示例#21
0
double Perceptron::getInstanceMultiplier(int actualClass, vector<double>& value, int i) {
	double pred = prediction(actualClass, value, i);
	double actual = (i == actualClass) ? 1.0 : 0.0;
	double delta = (actual - pred) * pred * (1 - pred);
	return mLearningRatio * delta;
}
示例#22
0
void test(RandomForestImage& randomForest, const std::string& folderTesting,
        const std::string& folderPrediction, const bool useDepthFilling,
        const bool writeProbabilityImages) {

    auto filenames = listImageFilenames(folderTesting);
    if (filenames.empty()) {
        throw std::runtime_error(std::string("found no files in ") + folderTesting);
    }

    CURFIL_INFO("got " << filenames.size() << " files for prediction");

    CURFIL_INFO("label/color map:");
    const auto labelColorMap = randomForest.getLabelColorMap();
    for (const auto& labelColor : labelColorMap) {
        const auto color = LabelImage::decodeLabel(labelColor.first);
        CURFIL_INFO("label: " << static_cast<int>(labelColor.first) << ", color: RGB(" << color << ")");
    }

    tbb::mutex totalMutex;
    utils::Average averageAccuracy;
    utils::Average averageAccuracyWithoutVoid;

    const LabelType numClasses = randomForest.getNumClasses();
    ConfusionMatrix totalConfusionMatrix(numClasses);

    size_t i = 0;

    const bool useCIELab = randomForest.getConfiguration().isUseCIELab();
    CURFIL_INFO("CIELab: " << useCIELab);
    CURFIL_INFO("DepthFilling: " << useDepthFilling);

    bool onGPU = randomForest.getConfiguration().getAccelerationMode() == GPU_ONLY;

    size_t grainSize = 1;
    if (!onGPU) {
        grainSize = filenames.size();
    }

    bool writeImages = true;
    if (folderPrediction.empty()) {
        CURFIL_WARNING("no prediction folder given. will not write images");
        writeImages = false;
    }

    tbb::parallel_for(tbb::blocked_range<size_t>(0, filenames.size(), grainSize),
            [&](const tbb::blocked_range<size_t>& range) {
                for(size_t fileNr = range.begin(); fileNr != range.end(); fileNr++) {
                    const std::string& filename = filenames[fileNr];
                    const auto imageLabelPair = loadImagePair(filename, useCIELab, useDepthFilling);
                    const RGBDImage& testImage = imageLabelPair.getRGBDImage();
                    const LabelImage& groundTruth = imageLabelPair.getLabelImage();
                    LabelImage prediction(testImage.getWidth(), testImage.getHeight());

                    for(int y = 0; y < groundTruth.getHeight(); y++) {
                        for(int x = 0; x < groundTruth.getWidth(); x++) {
                            const LabelType label = groundTruth.getLabel(x, y);
                            if (label >= numClasses) {
                                const auto msg = (boost::format("illegal label in ground truth image '%s' at pixel (%d,%d): %d RGB(%3d,%3d,%3d) (numClasses: %d)")
                                        % filename
                                        % x % y
                                        % static_cast<int>(label)
                                        % LabelImage::decodeLabel(label)[0]
                                        % LabelImage::decodeLabel(label)[1]
                                        % LabelImage::decodeLabel(label)[2]
                                        % static_cast<int>(numClasses)
                                ).str();
                                throw std::runtime_error(msg);
                            }
                        }
                    }

                    boost::filesystem::path fn(testImage.getFilename());
                    const std::string basepath = folderPrediction + "/" + boost::filesystem::basename(fn);

                    cuv::ndarray<float, cuv::host_memory_space> probabilities;

                    prediction = randomForest.predict(testImage, &probabilities, onGPU);

#ifndef NDEBUG
            for(LabelType label = 0; label < randomForest.getNumClasses(); label++) {
                if (!randomForest.shouldIgnoreLabel(label)) {
                    continue;
                }

                // ignored classes must not be predicted as we did not sample them
                for(size_t y = 0; y < probabilities.shape(0); y++) {
                    for(size_t x = 0; x < probabilities.shape(1); x++) {
                        const float& probability = probabilities(label, y, x);
                        assert(probability == 0.0);
                    }
                }
            }
#endif

            if (writeImages && writeProbabilityImages) {
                utils::Profile profile("writeProbabilityImages");
                RGBDImage probabilityImage(testImage.getWidth(), testImage.getHeight());
                for(LabelType label = 0; label< randomForest.getNumClasses(); label++) {

                    if (randomForest.shouldIgnoreLabel(label)) {
                        continue;
                    }

                    for(int y = 0; y < probabilityImage.getHeight(); y++) {
                        for(int x = 0; x < probabilityImage.getWidth(); x++) {
                            const float& probability = probabilities(label, y, x);
                            for(int c=0; c<3; c++) {
                                probabilityImage.setColor(x, y, c, probability);
                            }
                        }
                    }
                    const std::string filename = (boost::format("%s_label_%d.png") % basepath % static_cast<int>(label)).str();
                    probabilityImage.saveColor(filename);
                }
            }

            int thisNumber;

            {
                tbb::mutex::scoped_lock total(totalMutex);
                thisNumber = i++;
            }

            if (writeImages) {
                utils::Profile profile("writeImages");
                testImage.saveColor(basepath + ".png");
                testImage.saveDepth(basepath + "_depth.png");
                groundTruth.save(basepath + "_ground_truth.png");
                prediction.save(basepath + "_prediction.png");
            }

            ConfusionMatrix confusionMatrix(numClasses);
            double accuracy = calculatePixelAccuracy(prediction, groundTruth, true, &confusionMatrix);
            double accuracyWithoutVoid = calculatePixelAccuracy(prediction, groundTruth, false);

            tbb::mutex::scoped_lock lock(totalMutex);

            CURFIL_INFO("prediction " << (thisNumber + 1) << "/" << filenames.size()
                    << " (" << testImage.getFilename() << "): pixel accuracy (without void): " << 100 * accuracy
                    << " (" << 100 * accuracyWithoutVoid << ")");

            averageAccuracy.addValue(accuracy);
            averageAccuracyWithoutVoid.addValue(accuracyWithoutVoid);

            totalConfusionMatrix += confusionMatrix;
        }

    });

    tbb::mutex::scoped_lock lock(totalMutex);
    double accuracy = averageAccuracy.getAverage();
    double accuracyWithoutVoid = averageAccuracyWithoutVoid.getAverage();

    totalConfusionMatrix.normalize();

    CURFIL_INFO(totalConfusionMatrix);

    CURFIL_INFO("pixel accuracy: " << 100 * accuracy);
    CURFIL_INFO("pixel accuracy without void: " << 100 * accuracyWithoutVoid);
}
示例#23
0
/**
   starts particle filter by getting all particles sampled.

*/
void start_particle_filter(){

   int message, message_delivered, done;
  
   //printf("\nStarte Particle Filter\n");

   // get first measurement
   get_new_measurement();

   #ifndef ONLYPC
   // send messages to sampling hw/sw message box
   for (message=1; message<=number_of_blocks; message++){

           
	   message_delivered = FALSE;
	   while (message_delivered == FALSE){
      
	      done = cyg_mbox_tryput( mb_sampling_handle[0], ( void * ) message );

	      if (done > 0){
	          //printf("\n[Sampling Switch] Nachricht %d an Sampling weitergeleitet", message);
                  message_delivered = TRUE;   
             }
	      
	  }
      }
    #else
	int i;
	int iteration = 0;
	
	while (42){

		iteration++;
		//printf("\n--------------------------------");	
		//printf("\n--  I T E R A T I O N   %d  --", iteration);	
		//printf("\n--------------------------------");		

		// (1) [SAMPLING]: sample particles
		for (i=0; i<N; i++) prediction(&particles[i]);
		//printf("\n(1) sampling");

		// (2) receive new measurement
		get_new_measurement();
		if (end_of_particle_filter==TRUE) return;
		//printf("\n(2) get new measurement");

		// (3) observation:
		for (i=0; i<N; i++) extract_observation(&particles[i], &observations[i]);
		//printf("\n(3) observation");

		// (4) [IMPORTANCE]: weight particle according to likelihood
		for (i=0; i<N; i++) particles[i].w = likelihood (&particles[i], &observations[i], ref_data);
		//printf("\n(4) importance");

		// (5) iteration done: estimate, output, etc.
		iteration_done(particles, observations, ref_data, N);
		//printf("\n(5) iteration done");

		if (iteration%1000 == 999){
		     // (6) [RESAMPLING]: resample particle according to weights
		     resampling_pc();
		     //printf("\n(6) resampling");

		     // (7) presampling
		     prepare_sampling();
		     //printf("\n(7) prepare sampling\n");
		}
	}


    #endif

   // printf("\nParticle Filter gestartet\n");

   

}
示例#24
0
/**
   sample sw thread

    @param data: input data for sw thread
*/
void sampling_sw_thread (cyg_addrword_t data){

  //unsigned int thread_number = (unsigned int) data;
    int from, to;
    int done;
    int i;
    int new_message = FALSE;
    int message = 1;
    int message_to_deliver = FALSE;
    timing_t time_start_s = 0, time_stop_s = 0, time_sampling_sw = 0;
    int number_of_measurements_s = 0;
    
    while (42) {

      // 1) if there is no message to delivered, check for new message
      while (message_to_deliver == FALSE && new_message == FALSE){

            message = (int) cyg_mbox_get( mb_sampling_handle[0] );
            if (message > 0 && message <= number_of_blocks){
 
	      //printf("\n[Sampling Thread No. %d] Nachricht %d erhalten", (int) thread_number, message);
	          new_message = TRUE;
		  time_start_s = gettime();
            }
      }

 #ifdef USE_CACHE  
       XCache_EnableDCache( 0xF0000000 );
#endif     

      // 2) if a new message has arrived, sample the particles
      while (new_message == TRUE){

	  new_message = FALSE;

    	  from = (message - 1) * block_size;
          to   = from + block_size - 1;
          if ((N - 1) < to)
	       to = N - 1; 
      
          // sample particles
          for (i=from; i<=to; i++){
          
                  // predict particle
                  prediction (&particles[i]);
          }

          message_to_deliver = TRUE;
      }

#ifdef USE_CACHE  
       XCache_EnableDCache( 0xF0000000 );
#endif
  
      time_stop_s = gettime();
    
      // 3) if a message should be delivered, deliver it
      while ( message_to_deliver == TRUE){

            done = cyg_mbox_tryput( mb_sampling_done_handle[0], ( void * ) message );
           if (done > 0){
	     //printf("\n[Sampling Thread No. %d] Nachricht %d geschickt", (int) thread_number, message);
              message_to_deliver = FALSE;
              time_sampling_sw = calc_timediff( time_start_s, time_stop_s );
              number_of_measurements_s++;
              //diag_printf("\nSampling SW: %d, \tmessage %d, \ttime: %d", number_of_measurements_s, message, time_sampling_sw);
           }
      }
          
   }
}
示例#25
0
int main(int argc,char *argv[])
{
	int n1=0,n2=0; // #anay: bad variable names. 
	int a1=0,a2=0;
	int i,chk=0;
	char fname[16];
	
	if(argc==2)
	{
		if(strcmp(argv[1],"create_tr_data")==0)
		create_training_data();

		else if(strcmp(argv[1],"break_digitwise")==0)
		data_set_divide();
		
		else if(strcmp(argv[1],"break_samplewise")==0)
		training_data_divide();
		
		else if(strcmp(argv[1],"ave_construct")==0)
		ave_construct();
		
		else
		printf("Read the instructions and write again :(");
	}
	else if(argc==3)
	{
		// getting output by method 1
		if(strcmp(argv[1],"m1")==0)
		{
			if(strcmp(argv[2],"acc")==0)
			{
				n1=accuracy_one();
				printf("accuracy for this method is %d/1500 \n",n1);
			}
			else
			{
				for(i=0;i<1499;i++)
				{
					sprintf(fname,"data/%d.txt",i);
					if(strcmp(fname,argv[2])==0)
					chk++;
				}
				if(chk!=0)
				{
				n1=prediction(argv[2]);
				printf("the predicted value by this method is %d \n",n1);
				}
				else
				printf("Such file does not exist \n");
			}
		}
		//getting output by method 2
		else if(strcmp(argv[1],"m2")==0)
		{
			printf("%s",argv[2]);
			if(strcmp(argv[2],"acc")==0)
			{
				a2=accuracy_two();
				printf("accuracy for this method is %d/1500  \n",a2);
			}
			else
			{
				for(i=0;i<1499;i++)
				{
					sprintf(fname,"data/%d.txt",i);
					if(strcmp(fname,argv[2])==0)
					chk++;
				}
				if(chk!=0)
				{
				n2=predict(argv[2]);
				printf("the predicted value by this method is %d \n",n2);
				}
				else
				printf("Such file does not exist \n");
			}
		}
		else
		{
			printf("Check the name of method you wrote!!! \n");
		}
	}
	else
	{
		printf("check the number of arguments");
	}
}
示例#26
-1
LabelImage RandomForestImage::improveHistograms(const RGBDImage& image, const LabelImage& labelImage, const bool onGPU, bool useDepthImages) const {

    LabelImage prediction(image.getWidth(), image.getHeight());

    const LabelType numClasses = getNumClasses();

    if (treeData.size() != ensemble.size()) {
        throw std::runtime_error((boost::format("tree data size: %d, ensemble size: %d. histograms normalized?")
                % treeData.size() % ensemble.size()).str());
    }

    cuv::ndarray<float, cuv::host_memory_space> hostProbabilities(
            cuv::extents[numClasses][image.getHeight()][image.getWidth()],
            m_predictionAllocator);

    //These offsets should have been used instead of traversing to the leaf again
/*	cuv::ndarray<unsigned int, cuv::dev_memory_space> nodeOffsets(
			cuv::extents[image.getHeight()][image.getWidth()],
			m_predictionAllocator);
*/

    if (onGPU) {
        cuv::ndarray<float, cuv::dev_memory_space> deviceProbabilities(
                cuv::extents[numClasses][image.getHeight()][image.getWidth()],
                m_predictionAllocator);
        cudaSafeCall(cudaMemset(deviceProbabilities.ptr(), 0, static_cast<size_t>(deviceProbabilities.size() * sizeof(float))));

        {
            utils::Profile profile("classifyImagesGPU");
            for (const boost::shared_ptr<const TreeNodes>& data : treeData) {
                classifyImage(treeData.size(), deviceProbabilities, image, numClasses, data, useDepthImages);
                bool found_tree = false;
				//should be change to parallel for and add lock
				for (size_t treeNr = 0; treeNr < ensemble.size(); treeNr++) {
					if (data->getTreeId() == ensemble[treeNr]->getId()) {
						found_tree  =true;
						const boost::shared_ptr<RandomTree<PixelInstance, ImageFeatureFunction> >& tree = ensemble[treeNr]->getTree();
						//this should have been used and done before trying to classify the images, since it doesn't change
						//std::vector<size_t> leafSet;
						//tree->collectLeafNodes(leafSet);
						for (int y = 0; y < image.getHeight(); y++)
							for (int x = 0; x < image.getWidth(); x++) {
								LabelType label = labelImage.getLabel(x,y);
									if (!shouldIgnoreLabel(label)) {
										PixelInstance pixel(&image, label, x, y);
										//This should be changed. When classifying the image, the nodeoffsets should be returned and those used directly
										//instead of traversing again to the leaves. As a test, can check if the nodeoffset is the same as the one returned
										//by travertoleaf
										tree->setAllPixelsHistogram(pixel);
					                }
							}
					}
					if (found_tree)
						break;
				}
            }
        }

    }
    //should also add the CPU code!
    return prediction;
}