int main() {
	FileHeap file_heap("DATA_1.dat");

	FilePointer *small_pointer = file_heap.alloc(16);
	FilePointer *big_pointer = file_heap.alloc(16 * 4);

	char* small_data = random_generator(10);
	file_heap.setValue(small_pointer, small_data, 10);

	char* small_data_retrieved = file_heap.getValue(small_pointer);
	cout << small_data_retrieved << endl;

	char* big_data = random_generator(50);
	file_heap.setValue(big_pointer, big_data, 50);

	char* big_data_retrieved = file_heap.getValue(big_pointer);
	cout << big_data_retrieved << endl;

	delete [] small_data;
	delete [] big_data;

	file_heap.free(small_pointer);

	delete small_pointer;
	delete big_pointer;

	return 0;
}
typename Kernel::Model MaxConsensus
(
  const Kernel &kernel,
  const Scorer &scorer,
  std::vector<uint32_t> *best_inliers = nullptr,
  uint32_t max_iteration = 1024
)
{
  const uint32_t min_samples = Kernel::MINIMUM_SAMPLES;
  const uint32_t total_samples = kernel.NumSamples();

  size_t best_num_inliers = 0;
  typename Kernel::Model best_model;

  // Test if we have sufficient points to for the kernel.
  if (total_samples < min_samples) {
    if (best_inliers) {
      best_inliers->resize(0);
    }
    return best_model;
  }

  // In this robust estimator, the scorer always works on all the data points
  // at once. So precompute the list ahead of time.
  std::vector<uint32_t> all_samples(total_samples);
  std::iota(all_samples.begin(), all_samples.end(), 0);

  // Random number generator configuration
  std::mt19937 random_generator(std::mt19937::default_seed);

  std::vector<uint32_t> sample;
  for (uint32_t iteration = 0;  iteration < max_iteration; ++iteration) {
    UniformSample(min_samples, random_generator, &all_samples, &sample);

      std::vector<typename Kernel::Model> models;
      kernel.Fit(sample, &models);

      // Compute costs for each fit.
      for (const auto& model_it : models) {
        std::vector<uint32_t> inliers;
        scorer.Score(kernel, model_it, all_samples, &inliers);

        if (best_num_inliers < inliers.size()) {
          best_num_inliers = inliers.size();
          best_model = model_it;
          if (best_inliers) {
            best_inliers->swap(inliers);
          }
        }
      }
  }
  return best_model;
}
示例#3
0
/**Function********************************************************************

  Synopsis [Returns the DD to the best position encountered during
  sifting if there was improvement.]

  Description [Otherwise, "tosses a coin" to decide whether to keep
  the current configuration or return the DD to the original
  one. Returns 1 in case of success; 0 otherwise.]

  SideEffects [None]

  SeeAlso     []

******************************************************************************/
static int
siftBackwardProb(
  DdManager * table,
  Move * moves,
  int  size,
  double  temp)
{
    Move   *move;
    int    res;
    int    best_size = size;
    double coin, threshold;

    /* Look for best size during the last sifting */
    for (move = moves; move != NULL; move = move->next) {
	if (move->size < best_size) {
	    best_size = move->size;
	}
    }
    
    /* If best_size equals size, the last sifting did not produce any
    ** improvement. We now toss a coin to decide whether to retain
    ** this change or not.
    */
    if (best_size == size) {
	coin = random_generator();
#ifdef DD_STATS
	tosses++;
#endif
	threshold = exp(-((double)(table->keys - table->isolated - size))/temp);
	if (coin < threshold) {
#ifdef DD_STATS
	    acceptances++;
#endif
	    return(1);
	}
    }

    /* Either there was improvement, or we have decided not to
    ** accept the uphill move. Go to best position.
    */
    res = table->keys - table->isolated;
    for (move = moves; move != NULL; move = move->next) {
	if (res == best_size) return(1);
	res = cuddSwapInPlace(table,(int)move->x,(int)move->y);
	if (!res) return(0);
    }

    return(1);

} /* end of sift_backward_prob */
unsigned long long compute_pi_by_random(unsigned long long number_of_tosses){
    unsigned int seed = std::chrono::system_clock::now().time_since_epoch().count();
    std::mt19937 random_generator(seed);
    uint32_t random_generator_size = random_generator.max() - random_generator.min();
    uint32_t random_generator_min = random_generator.min();

    int number_in_circle = 0;
    for( int toss = 0; toss < number_of_tosses; toss++ ){
        /* 2 toss for x and y value in square([-1, 1], [-1, 1]),
         * and compute length^2 of (0, 0) -> (x, y) */
        uint32_t random1 = random_generator();
        uint32_t random2 = random_generator();
        double x = (random1 / (double)random_generator_size) - random_generator_min + -1;
        double y = (random2 / (double)random_generator_size) - random_generator_min + -1;
        double length_square = x*x + y*y;

        if( length_square <= 1 * 1 ){
            /* in circle at (0, 0) and radius 1 */
            number_in_circle++;
        }
    }
    return number_in_circle;
}
RenderThread::RenderThread(boost::shared_ptr<Scene> _scene_ptr, boost::shared_ptr<Settings> _settings_ptr, boost::shared_ptr<Image> _image_ptr, boost::shared_ptr<TaskStack> _task_stack_ptr, boost::shared_ptr<IntegratorInterface> _integrator)
{
  m_scene_ptr = _scene_ptr;
  m_settings_ptr = _settings_ptr;
  m_image_ptr = _image_ptr;
  m_task_stack_ptr = _task_stack_ptr;
  m_integrator = _integrator;

  m_integrator->limitDepth(m_settings_ptr->depth);
  m_integrator->absorption(m_settings_ptr->absorption);

  //Construct uniform sampler to avoid scene mutation when rendering across multiple threads
  boost::shared_ptr<RandomGenerator> random_generator(new RandomGenerator());
  m_random_generator = random_generator;
}
示例#6
0
/* random polyhedron */
ap_abstract0_t* random_poly(ap_manager_t* man,int dim)
{
  int i;
  ap_abstract0_t* p;
  ap_interval_t** t = ap_interval_array_alloc(dim);
  ap_generator0_array_t ar = ap_generator0_array_make(dim);
  for (i=0;i<dim;i++)
    random_interval(t[i]);
  for (i=0;i<dim;i++)
    ar.p[i] = random_generator(dim,AP_GEN_RAY);
  if (intdim)
    p = ap_abstract0_of_box(man,dim/2,dim-dim/2,(ap_interval_t**)t);
  else
    p = ap_abstract0_of_box(man,0,dim,(ap_interval_t**)t);
    
  ap_abstract0_add_ray_array(man,true,p,&ar);
  ap_generator0_array_clear(&ar);
  ap_interval_array_free(t,dim);
  return p;
}
示例#7
0
void test_add_ray(void)
{
  printf("\nadd rays\n");
  LOOP {
    size_t i, dim = 4, nb = 4;
    ap_abstract0_t* pka,*pkr, *ppla,*pplr;
    ap_generator0_array_t ar = ap_generator0_array_make(nb);
    pka = random_poly(pk,dim);
    ppla = convert(ppl,pka);
    for (i=0;i<nb;i++)
      ar.p[i] = random_generator(dim,(rand()%100>=80)?AP_GEN_LINE:AP_GEN_RAY);
    pkr = ap_abstract0_add_ray_array(pk,false,pka,&ar);
    pplr = ap_abstract0_add_ray_array(ppl,false,ppla,&ar);
    RESULT('*');
    if (!is_eq(pkr,pplr)) {
      ERROR("different results");
      ap_generator0_array_fprint(stderr,&ar,NULL);
      print_poly("pka",pka); print_poly("pkr",pkr); print_poly("pplr",pplr);
    }
    ap_abstract0_free(pk,pka); ap_abstract0_free(ppl,ppla);
    ap_abstract0_free(pk,pkr); ap_abstract0_free(ppl,pplr);
    ap_generator0_array_clear(&ar);
  } ENDLOOP;
}
示例#8
0
int main (int argc, char** argv) {
	FloorPlanner fp;

	samples_data_type temp_samples;
	samples_data_type power_samples;

	double avg_power, avg_temp;
	double std_dev_power, std_dev_temp;
	double cur_power_dev, cur_temp_dev;

	double cov;

	double corr;
	double avg_corr;
	int count_corr;

	// construct a trivial random generator engine from a time-based seed:
	unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
	std::default_random_engine random_generator(seed);

	std::cout << std::endl;
	std::cout << "Thermal Side-Channel Leakage Verification: Determine Entropy and Correlation of Power and Thermal Maps" << std::endl;
	std::cout << "------------------------------------------------------------------------------------------------------" << std::endl;
	std::cout << "WARNING: File handling implicitly assumes that the dimensions of power and thermal maps are all the same, both within HotSpot and Corblivar; parsing and calculation will most likely fail if there are dimension mismatches!" << std:: endl;
	std::cout << std::endl;

	// parse program parameter, config file, and further files
	IO::parseParametersFiles(fp, argc, argv);
	// parse blocks
	IO::parseBlocks(fp);
	// parse nets
	IO::parseNets(fp);

	// generate DAG (directed acyclic graph) for SL-STA (system-level static timing analysis)
	fp.initTimingPowerAnalyser();

	// init Corblivar core
	CorblivarCore corb = CorblivarCore(fp.getLayers(), fp.getBlocks().size());

	// parse alignment request
	IO::parseAlignmentRequests(fp, corb.editAlignments());

	// init thermal analyzer, only reasonable after parsing config file
	fp.initThermalAnalyzer();

	// init routing-utilization analyzer
	fp.initRoutingUtilAnalyzer();

	// no solution file found; error
	if (!fp.inputSolutionFileOpen()) {
		std::cout << "Corblivar> ";
		std::cout << "ERROR: Solution file required for call of " << argv[0] << std::endl << std::endl;
		exit(1);
	}

	// required solution file found; parse from file, and generate layout and all data such as power and thermal maps
	//
	// read from file
	IO::parseCorblivarFile(fp, corb);

	// assume read in data as currently best solution
	corb.storeBestCBLs();

	// overall cost is not determined; cost cannot be determined since no
	// normalization during SA search was performed
	//
	// generates also all required files
	fp.finalize(corb, false);
	std::cout << std::endl;

	// allocate vectors
	for (int layer = 0; layer < fp.getLayers(); layer++) {

		power_samples.emplace_back(samples_data_layer_type());
		temp_samples.emplace_back(samples_data_layer_type());
	}

	// generate power data and gather related HotSpot simulation temperature data
	//
	for (unsigned sampling_iter = 0; sampling_iter < SAMPLING_ITERATIONS; sampling_iter++) {

		std::cout << std::endl;
		std::cout << "Sampling iteration: " << (sampling_iter + 1) << "/" << SAMPLING_ITERATIONS << std::endl;
		std::cout << "------------------------------" << std::endl;

		// first, randomly vary power densities in blocks
		//
		for (Block const& b : fp.getBlocks()) {

			// restore original value, used as mean for Gaussian distribution of power densities
			b.power_density_unscaled = b.power_density_unscaled_back;

			// calculate new power value, based on Gaussian distribution
			std::normal_distribution<double> gaussian(b.power_density_unscaled, b.power_density_unscaled * MEAN_TO_STD_DEV_FACTOR);

			b.power_density_unscaled = gaussian(random_generator);

			if (DBG) {
				std::cout << "Block " << b.id << ":" << std::endl;
				std::cout << " Original power = " << b.power_density_unscaled_back << std::endl;
				std::cout << " New random power = " << b.power_density_unscaled << std::endl;
			}
		}

		// second, generate new power maps
		//
		fp.editThermalAnalyzer().generatePowerMaps(fp.getLayers(), fp.getBlocks(), fp.getOutline(), fp.getPowerBlurringParameters());

		// copy data from Corblivar power maps into local data structure power_samples
		//
		for (int layer = 0; layer < fp.getLayers(); layer++) {
			for (unsigned x = 0; x < ThermalAnalyzer::THERMAL_MAP_DIM; x++) {
				for (unsigned y = 0; y < ThermalAnalyzer::THERMAL_MAP_DIM; y++) {

					power_samples[layer][x][y][sampling_iter] = fp.getThermalAnalyzer().getPowerMapsOrig()[layer][x][y].power_density;
				}
			}
		}

		// third, run HotSpot on this new map
		//
		// generate new ptrace file first
		writeHotSpotPtrace(fp);
		// HotSpot.sh system call
		system(std::string("./HotSpot.sh " + fp.getBenchmark() + " " + std::to_string(fp.getLayers())).c_str());

		// fourth, read in the new HotSpot results into local data structure temp_samples
		//
		parseHotSpotFiles(fp, sampling_iter, temp_samples);


		if (DBG) {
			std::cout << "Printing gathered power/temperature data for sampling iteration " << sampling_iter << std::endl;
			std::cout << std::endl;

			for (int layer = 0; layer < fp.getLayers(); layer++) {
				std::cout << " Layer " << layer << std::endl;
				std::cout << std::endl;

				for (unsigned x = 0; x < ThermalAnalyzer::THERMAL_MAP_DIM; x++) {
					for (unsigned y = 0; y < ThermalAnalyzer::THERMAL_MAP_DIM; y++) {

						std::cout << "  Power[" << x << "][" << y << "]: " << power_samples[layer][x][y][sampling_iter] << std::endl;
						std::cout << "  Temp [" << x << "][" << y << "]: " << temp_samples[layer][x][y][sampling_iter] << std::endl;
					}
				}
			}
		}
	}

	// calculate avg Pearson correlation over all bins
	//
	std::cout << std::endl;
	std::cout << "Sampling results" << std::endl;
	std::cout << "----------------" << std::endl;

	for (int layer = 0; layer < fp.getLayers(); layer++) {

		// dbg output
		if (DBG) {
			std::cout << std::endl;
			std::cout << "Pearson correlations on layer " << layer << std::endl;
			std::cout << std::endl;
		}

		avg_corr = 0.0;
		count_corr = 0;

		for (unsigned x = 0; x < ThermalAnalyzer::THERMAL_MAP_DIM; x++) {
			for (unsigned y = 0; y < ThermalAnalyzer::THERMAL_MAP_DIM; y++) {

				avg_power = avg_temp = 0.0;
				cov = std_dev_power = std_dev_temp = 0.0;
				corr = 0.0;

				// first pass: determine avg values
				//
				for (unsigned sampling_iter = 0; sampling_iter < SAMPLING_ITERATIONS; sampling_iter++) {

					avg_power += power_samples[layer][x][y][sampling_iter];
					avg_temp += temp_samples[layer][x][y][sampling_iter];
				}
				avg_power /= SAMPLING_ITERATIONS;
				avg_temp /= SAMPLING_ITERATIONS;

				// dbg output
				if (DBG) {
					std::cout << "Bin: " << x << ", " << y << std::endl;
					std::cout << " Avg power: " << avg_power << std::endl;
					std::cout << " Avg temp: " << avg_temp << std::endl;
				}
				
				// second pass: determine covariance and standard deviations
				//
				for (unsigned sampling_iter = 0; sampling_iter < SAMPLING_ITERATIONS; sampling_iter++) {

					// deviations of current values from avg values
					cur_power_dev = power_samples[layer][x][y][sampling_iter] - avg_power;
					cur_temp_dev = temp_samples[layer][x][y][sampling_iter] - avg_temp;

					// covariance
					cov += cur_power_dev * cur_temp_dev;

					// standard deviation, calculate its sqrt later on
					std_dev_power += std::pow(cur_power_dev, 2.0);
					std_dev_temp += std::pow(cur_temp_dev, 2.0);
				}
				cov /= SAMPLING_ITERATIONS;
				std_dev_power /= SAMPLING_ITERATIONS;
				std_dev_temp /= SAMPLING_ITERATIONS;

				std_dev_power = std::sqrt(std_dev_power);
				std_dev_temp = std::sqrt(std_dev_temp);

				// calculate Pearson correlation: covariance over product of standard deviations
				//
				corr = cov / (std_dev_power * std_dev_temp);

				// consider only valid correlations values
				if (!std::isnan(corr)) {
					avg_corr += corr;
					count_corr++;
				}

				// dbg output
				if (DBG) {
					std::cout << " Correlation: " << corr << std::endl;
					if (std::isnan(corr)) {
						std::cout << "  NAN, because of zero power; to be skipped" << std::endl;
					}
				}
			}
		}
		avg_corr /= count_corr;

		std::cout << "Avg Pearson correlations over all bins on layer " << layer << ": " << avg_corr << std::endl;
	}
}
void DisplayBitCodeStimulus::drawInUnitSquare(shared_ptr<StimulusDisplay> display){

    
    int n = (int)(n_markers_variable->getValue());
    double sep_ratio = (double)(separation_ratio_variable->getValue());
    double marker_width = 1.0 / ((double)n + (double)(n+1)*sep_ratio);
    double sep_width = sep_ratio * marker_width;
    double marker_height = 1.0 / (1.0 + 2.0*sep_ratio);
    double sep_height = marker_height * sep_ratio;
    
    double bg_lum = (double)(bg_luminance_variable->getValue());
    double fg_lum = (double)(fg_luminance_variable->getValue());
    
    
    glBindTexture(GL_TEXTURE_2D, 0);
	glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
	glEnable (GL_BLEND); 
	glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
	glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_ADD);
    
    GLfloat z = 0.01;
    
    // draw background
    glBegin(GL_QUADS);
    glColor4f(bg_lum, bg_lum, bg_lum, *alpha_multiplier);
    glVertex3f(0.0,0.0,z);
    glVertex3f(1.0,0.0,z);
    glVertex3f(1.0,1.0,z);
    glVertex3f(0.0,1.0,z);
    glEnd();
    
    
    std::cerr << display->getCurrentContextIndex() << "!" << std::endl;
    if(display->getCurrentContextIndex() == 0){
        // generate a unique code
        int old_code = (int)(code_variable->getValue());
        int new_code = old_code;
        while(new_code == old_code){
            new_code = random_generator();
        }
        
        code_variable->setValue(Datum((long)new_code));
    }
    
    z = 0.05;
    
    int current_code = (int)(code_variable->getValue());
    
    int counter = 0;
    for(double x = sep_width; x <= 1.0; x += marker_width+sep_width){
        
        int bitmask = 1 << counter;
        counter++;
        
        if((current_code & bitmask) == bitmask){
            
            glBegin(GL_QUADS);
            glColor4f(fg_lum, fg_lum, fg_lum, *alpha_multiplier);
            glVertex3f(x,sep_height,z);
            glVertex3f(x+marker_width,sep_height,z);
            glVertex3f(x+marker_width,sep_height+marker_height,z);
            glVertex3f(x,sep_height+marker_height,z);
            glEnd();
        }
    }
    
    glDisable(GL_BLEND);
    
}
示例#10
0
void StageSelectScene::update_stage_list()
{
	clear_stage_list();

	std::list< string_t > stage_name_list;

	WIN32_FIND_DATA find_data;

	string_t file_pattern;
	
	if ( page_ < get_max_story_page() )
	{
		file_pattern = get_stage_dir_name_by_page( page_ ) + common::serialize( page_ ) + "-*.stage";
	}
	else
	{
		file_pattern = get_stage_dir_name_by_page( page_ ) + "*.stage";
	}
	
	HANDLE find_handle = FindFirstFile( file_pattern.c_str(), & find_data );

	if ( find_handle  != INVALID_HANDLE_VALUE )
	{
		while ( true )
		{
			stage_name_list.push_back( find_data.cFileName );

			if ( ! FindNextFile( find_handle, & find_data ) )
			{
				break;
			}
		}

		FindClose( find_handle );
	}

	stage_count_ = stage_name_list.size();

	int n = 0;
	string_t last_stage_name;

	for ( auto i = stage_name_list.begin(); i != stage_name_list.end(); ++i )
	{
		auto stage_name = *i;
		stage_name.resize( stage_name.find_first_of( "." ) );

		// ストーリー用ステージでは、前のステージをクリアしていないと、このステージは一覧に含まない
		if ( is_story_page() && ! last_stage_name.empty() )
		{
			if ( get_save_data()->get( ( get_stage_prefix_by_page( page_ ) + "." + last_stage_name ).c_str(), 0 ) == 0 )
			{
				break;
			}
		}
		if ( stage_name == common::serialize( get_max_story_page() - 1 ) + "-" + common::serialize( get_max_stage_per_page() ) )
		{
			// final stage
			if ( ! is_final_stage_open() )
			{
				continue;
			}
		}

		last_stage_name = stage_name;

		Stage* stage = new Stage();
		stage->name = stage_name;
		stage->rect = get_stage_dst_rect( stage, n );
		stage->cleared = get_save_data()->get( ( get_stage_prefix_by_page( page_ ) + "." + stage->name ).c_str(), 0 ) != 0;
		stage->completed = get_save_data()->get( ( get_stage_prefix_by_page( page_ ) + "." + stage->name ).c_str(), 0 ) == 2;

		try
		{
			stage->texture = get_graphics_manager()->load_named_texture( stage->name.c_str(), ( get_stage_dir_name_by_page( page_ ) + stage->name + ".png" ).c_str() );
		}
		catch ( ... )
		{
			stage->texture = get_graphics_manager()->load_named_texture( stage->name.c_str(), "media/stage/default.png" );
		}

		stage_list_.push_back( stage );

		n++;

		if ( n >= get_max_stage_per_page() )
		{
			break;
		}
	}

	std::random_device seed_generator;
	std::mt19937 random_generator( seed_generator() );

	std::shuffle( circle_src_rect_list_.begin(), circle_src_rect_list_.end(), random_generator );
	std::shuffle( face_src_rect_list_.begin(), face_src_rect_list_.end(), random_generator );
}
示例#11
0
/**Function********************************************************************

  Synopsis    [Get new variable-order by simulated annealing algorithm.]

  Description [Get x, y by random selection. Choose either
  exchange or jump randomly. In case of jump, choose between jump_up
  and jump_down randomly. Do exchange or jump and get optimal case.
  Loop until there is no improvement or temperature reaches
  minimum. Returns 1 in case of success; 0 otherwise.]

  SideEffects [None]

  SeeAlso     []

******************************************************************************/
int
cuddAnnealing(
  DdManager * table,
  int  lower,
  int  upper)
{
    int         nvars;
    int         size;
    int         x,y;
    int         result;
    int		c1, c2, c3, c4;
    int		BestCost;
    int		*BestOrder;
    double	NewTemp, temp;
    double	rand1;
    int         innerloop, maxGen;
    int         ecount, ucount, dcount;
   
    nvars = upper - lower + 1;

    result = cuddSifting(table,lower,upper);
#ifdef DD_STATS
    (void) fprintf(table->out,"\n");
#endif
    if (result == 0) return(0);

    size = table->keys - table->isolated;

    /* Keep track of the best order. */
    BestCost = size;
    BestOrder = ALLOC(int,nvars);
    if (BestOrder == NULL) {
	table->errorCode = CUDD_MEMORY_OUT;
	return(0);
    }
    copyOrder(table,BestOrder,lower,upper);

    temp = BETA * size;
    maxGen = (int) (MAXGEN_RATIO * nvars);

    c1 = size + 10;
    c2 = c1 + 10;
    c3 = size;
    c4 = c2 + 10;
    ecount = ucount = dcount = 0;
 
    while (!stopping_criterion(c1, c2, c3, c4, temp)) {
#ifdef DD_STATS
	(void) fprintf(table->out,"temp=%f\tsize=%d\tgen=%d\t",
		       temp,size,maxGen);
	tosses = acceptances = 0;
#endif
	for (innerloop = 0; innerloop < maxGen; innerloop++) {
	    /* Choose x, y  randomly. */
	    x = (int) Cudd_Random() % nvars;
	    do {
		y = (int) Cudd_Random() % nvars;
	    } while (x == y);
	    x += lower;
	    y += lower;
	    if (x > y) {
		int tmp = x;
		x = y;
		y = tmp;
	    }

	    /* Choose move with roulette wheel. */
	    rand1 = random_generator();
	    if (rand1 < EXC_PROB) {
		result = ddExchange(table,x,y,temp);       /* exchange */
		ecount++;
#if 0
		(void) fprintf(table->out,
			       "Exchange of %d and %d: size = %d\n",
			       x,y,table->keys - table->isolated);
#endif
	    } else if (rand1 < EXC_PROB + JUMP_UP_PROB) {
		result = ddJumpingAux(table,y,x,y,temp); /* jumping_up */
		ucount++;
#if 0
		(void) fprintf(table->out,
			       "Jump up of %d to %d: size = %d\n",
			       y,x,table->keys - table->isolated);
#endif
	    } else {
		result = ddJumpingAux(table,x,x,y,temp); /* jumping_down */
		dcount++;
#if 0
		(void) fprintf(table->out,
			       "Jump down of %d to %d: size = %d\n",
			       x,y,table->keys - table->isolated);
#endif
	    }

	    if (!result) {
		FREE(BestOrder);
		return(0);
	    }

	    size = table->keys - table->isolated;	/* keep current size */
	    if (size < BestCost) {			/* update best order */
		BestCost = size;
		copyOrder(table,BestOrder,lower,upper);
	    }
	}
	c1 = c2;
	c2 = c3;
	c3 = c4;
	c4 = size;
	NewTemp = ALPHA * temp;
	if (NewTemp >= 1.0) {
	    maxGen = (int)(log(NewTemp) / log(temp) * maxGen);
	}
	temp = NewTemp;	                /* control variable */
#ifdef DD_STATS
	(void) fprintf(table->out,"uphill = %d\taccepted = %d\n",
		       tosses,acceptances);
	fflush(table->out);
#endif
    }

    result = restoreOrder(table,BestOrder,lower,upper);
    FREE(BestOrder);
    if (!result) return(0);
#ifdef DD_STATS
    fprintf(table->out,"#:N_EXCHANGE %8d : total exchanges\n",ecount);
    fprintf(table->out,"#:N_JUMPUP   %8d : total jumps up\n",ucount);
    fprintf(table->out,"#:N_JUMPDOWN %8d : total jumps down",dcount);
#endif
    return(1);

} /* end of cuddAnnealing */
typename Kernel::Model RANSAC(
  const Kernel &kernel,
  const Scorer &scorer,
  std::vector<uint32_t> *best_inliers = nullptr ,
  size_t *best_score = nullptr , // Found number of inliers
  double outliers_probability = 1e-2)
{
  assert(outliers_probability < 1.0);
  assert(outliers_probability > 0.0);
  uint32_t iteration = 0;
  const uint32_t min_samples = Kernel::MINIMUM_SAMPLES;
  const uint32_t total_samples = kernel.NumSamples();

  uint32_t max_iterations = 100;
  const uint32_t really_max_iterations = 4096;

  uint32_t best_num_inliers = 0;
  double best_inlier_ratio = 0.0;
  typename Kernel::Model best_model;

  // Test if we have sufficient points for the kernel.
  if (total_samples < min_samples) {
    if (best_inliers) {
      best_inliers->resize(0);
    }
    return best_model;
  }

  // In this robust estimator, the scorer always works on all the data points
  // at once. So precompute the list ahead of time [0,..,total_samples].
  std::vector<uint32_t> all_samples(total_samples);
  std::iota(all_samples.begin(), all_samples.end(), 0);

  //--
  // Random number generation
  std::mt19937 random_generator(std::mt19937::default_seed);

  std::vector<uint32_t> sample;
  for (iteration = 0;
    iteration < max_iterations &&
    iteration < really_max_iterations; ++iteration) {
      UniformSample(min_samples, random_generator, &all_samples, &sample);

      std::vector<typename Kernel::Model> models;
      kernel.Fit(sample, &models);

      // Compute the inlier list for each fit.
      for (const auto& model_it : models) {
        std::vector<uint32_t> inliers;
        scorer.Score(kernel, model_it, all_samples, &inliers);

        if (best_num_inliers < inliers.size()) {
          best_num_inliers = inliers.size();
          best_inlier_ratio = inliers.size() / double(total_samples);
          best_model = model_it;
          if (best_inliers) {
            best_inliers->swap(inliers);
          }
        }
        if (best_inlier_ratio) {
          max_iterations = IterationsRequired(min_samples,
            outliers_probability,
            best_inlier_ratio);
        }
      }
  }
  if (best_score)
    *best_score = best_num_inliers;
  return best_model;
}
示例#13
0
vector<Mat_<double> > FernCascade::Train(const vector<Mat_<uchar> >& images,
                                    const vector<Mat_<double> >& current_shapes,
                                    const vector<Mat_<double> >& ground_truth_shapes,
                                    const vector<BoundingBox> & bounding_box,
                                    const Mat_<double>& mean_shape,
                                    int second_level_num,
                                    int candidate_pixel_num,
                                    int fern_pixel_num){
    Mat_<double> candidate_pixel_locations(candidate_pixel_num,2);
    Mat_<int> nearest_landmark_index(candidate_pixel_num,1);
    vector<Mat_<double> > regression_targets;
    RNG random_generator(getTickCount());
    second_level_num_ = second_level_num;
    
    // calculate regression targets: the difference between ground truth shapes and current shapes
    // candidate_pixel_locations: the locations of candidate pixels, indexed relative to its nearest landmark on mean shape 
    regression_targets.resize(current_shapes.size()); 
    for(int i = 0;i < current_shapes.size();i++){
        regression_targets[i] = ProjectShape(ground_truth_shapes[i],bounding_box[i]) 
                                - ProjectShape(current_shapes[i],bounding_box[i]);
        Mat_<double> rotation;
        double scale;
        SimilarityTransform(mean_shape,ProjectShape(current_shapes[i],bounding_box[i]),rotation,scale);
        transpose(rotation,rotation);

        regression_targets[i] = scale * regression_targets[i] * rotation;
    }

    
    // get candidate pixel locations, please refer to 'shape-indexed features'
    for(int i = 0;i < candidate_pixel_num;i++){
        double x = random_generator.uniform(-1.0,1.0);
        double y = random_generator.uniform(-1.0,1.0);
        if(x*x + y*y > 1.0){
            i--;
            continue;
        }
        // find nearest landmark index
        double min_dist = 1e10;
        int min_index = 0;
        for(int j = 0;j < mean_shape.rows;j++){
            double temp = pow(mean_shape(j,0)-x,2.0) + pow(mean_shape(j,1)-y,2.0);
            if(temp < min_dist){
                min_dist = temp;
                min_index = j;
            }
        }
        candidate_pixel_locations(i,0) = x - mean_shape(min_index,0);
        candidate_pixel_locations(i,1) = y - mean_shape(min_index,1);
        nearest_landmark_index(i) = min_index;   
    }

    // get densities of candidate pixels for each image
    // for densities: each row is the pixel densities at each candidate pixels for an image 
    // Mat_<double> densities(images.size(), candidate_pixel_num);
    vector<vector<double> > densities;
    densities.resize(candidate_pixel_num);
    for(int i = 0;i < images.size();i++){
        Mat_<double> rotation;
        double scale;
        Mat_<double> temp = ProjectShape(current_shapes[i],bounding_box[i]);
        SimilarityTransform(temp,mean_shape,rotation,scale);
        for(int j = 0;j < candidate_pixel_num;j++){
            double project_x = rotation(0,0) * candidate_pixel_locations(j,0) + rotation(0,1) * candidate_pixel_locations(j,1);
            double project_y = rotation(1,0) * candidate_pixel_locations(j,0) + rotation(1,1) * candidate_pixel_locations(j,1);
            project_x = scale * project_x * bounding_box[i].width / 2.0;
            project_y = scale * project_y * bounding_box[i].height / 2.0;
            int index = nearest_landmark_index(j);
            int real_x = project_x + current_shapes[i](index,0);
            int real_y = project_y + current_shapes[i](index,1); 
            real_x = std::max(0.0,std::min((double)real_x,images[i].cols-1.0));
            real_y = std::max(0.0,std::min((double)real_y,images[i].rows-1.0));
            densities[j].push_back((int)images[i](real_y,real_x));
        }
    }
        
    // calculate the covariance between densities at each candidate pixels 
    Mat_<double> covariance(candidate_pixel_num,candidate_pixel_num);
    Mat_<double> mean;
    for(int i = 0;i < candidate_pixel_num;i++){
        for(int j = i;j< candidate_pixel_num;j++){
            double correlation_result = calculate_covariance(densities[i],densities[j]);
            covariance(i,j) = correlation_result;
            covariance(j,i) = correlation_result;
        }
    } 


    // train ferns
    vector<Mat_<double> > prediction;
    prediction.resize(regression_targets.size());
    for(int i = 0;i < regression_targets.size();i++){
        prediction[i] = Mat::zeros(mean_shape.rows,2,CV_64FC1); 
    } 
    ferns_.resize(second_level_num);
    for(int i = 0;i < second_level_num;i++){
        cout<<"Training ferns: "<<i+1<<" out of "<<second_level_num<<endl;
        vector<Mat_<double> > temp = ferns_[i].Train(densities,covariance,candidate_pixel_locations,nearest_landmark_index,regression_targets,fern_pixel_num);     
        // update regression targets
        for(int j = 0;j < temp.size();j++){
            prediction[j] = prediction[j] + temp[j];
            regression_targets[j] = regression_targets[j] - temp[j];
        }  
    }
    
    for(int i = 0;i < prediction.size();i++){
        Mat_<double> rotation;
        double scale;
        SimilarityTransform(ProjectShape(current_shapes[i],bounding_box[i]),mean_shape,rotation,scale);
        transpose(rotation,rotation);
        prediction[i] = scale * prediction[i] * rotation; 
    } 
    return prediction;    
}
示例#14
0
	random_generator rnd::get_random_generator(unsigned int seed)
	{
		return random_generator(seed);
	}
示例#15
0
/**
 * Train a fern cascade.
 * @param images training images in gray scale
 * @param normalize_matrix similarity matrix
 * @param target_shapes target shapes of each face image
 * @param mean_shape mean shape
 * @param second_level_num level number for second level regression 
 * @param current_shapes current shapes of training images
 * @param pixel_pair_num number of pair of pixels to be selected
 * @param normalized_targets (target - current) * normalize_matrix
 */
void FernCascade::train(const vector<Mat_<uchar> >& images,
        const vector<Mat_<double> >& target_shapes,
        int second_level_num,
        vector<Mat_<double> >& current_shapes,
        int pixel_pair_num,
        vector<Mat_<double> >& normalized_targets,
        int pixel_pair_in_fern,
        const Mat_<double>& mean_shape,
        const vector<Bbox>& target_bounding_box){

    second_level_num_ = second_level_num;
	// coordinates of selected pixels
    Mat_<double> pixel_coordinates(pixel_pair_num,2);
	Mat_<int> nearest_keypoint_index(pixel_pair_num,1);
    RNG random_generator(getTickCount());
    int landmark_num = current_shapes[0].rows;
    int training_num = images.size();
    int image_width = images[0].cols;
    int image_height = images[0].rows;

    /* vector<Mat_<double> > normalized_curr_shape;  */
    // get bounding box of target shapes
   
    vector<Mat_<double> > normalized_shapes;
    /* vector<Mat_<double> > normalized_ground_truth; */


    normalized_shapes = project_shape(current_shapes,target_bounding_box);
    /* normalized_ground_truth = project_shape(target_shapes,target_bounding_box); */
    
    /*
    vector<SimilarityTransform> curr_to_mean_shape;
    vector<SimilarityTransform> ground_to_mean_shape;
    
    curr_to_mean_shape = get_similarity_transform(mean_shape,normalized_shapes);
    ground_to_mean_shape = get_similarity_transform(mean_shape,normalized_ground_truth);
     
    vector<Mat_<double> > curr_project_to_mean;
    vector<Mat_<double> > ground_project_to_mean;
    
    for(int i = 0;i < current_shapes.size();i++){
    ipd// get normalized_targets 
        Mat_<double> temp = curr_to_mean_shape[i].apply_similarity_transform(normalized_shapes[i]);
        curr_project_to_mean.push_back(temp.clone());

        temp = ground_to_mean_shape[i].apply_similarity_transform(normalized_ground_truth[i]);
        ground_to_mean_shape.push_back(temp.clone()); 
    }i
    */


    // calculate normalized targets
    normalized_targets = inverse_shape(current_shapes,target_bounding_box);
    normalized_targets = compose_shape(normalized_targets,target_shapes,target_bounding_box); 
    
    for(int i = 0;i < normalized_targets.size();i++){
        Mat_<double> rotation;
        Mat_<double> translation;
        double scale;
        translate_scale_rotate(mean_shape,normalized_shapes[i],translation,scale,rotation);
        transpose(rotation,rotation);
        normalized_targets[i] = scale * normalized_targets[i] * rotation;
    } 

    
    // normalized_targets.clear();
    // for(int i = 0;i < curr_project_to_mean.size();i++){
        // normalized_targets.push_back(ground_project_to_mean[i] - curr_project_to_mean[i]);
    // }

    // generate feature pixel location 
    for(int i = 0;i < pixel_pair_num;i++){
        double x = random_generator.uniform(-1.0,1.0);
        double y = random_generator.uniform(-1.0,1.0);

        if(x*x + y*y > 1){
            i--;
            continue;
        }
        
        // get its nearest landmark
        double min_dist = 1e10;
        int min_index = 0;
        for(int j = 0;j < landmark_num;j++){
            double temp = pow(mean_shape(j,0) - x,2.0) + pow(mean_shape(j,1) - y,2.0);
            if(temp < min_dist){
                min_dist = temp;
                min_index = j;
            } 
        } 
        nearest_keypoint_index(i) = min_index;
        pixel_coordinates(i,0) = x - mean_shape(min_index,0);
        pixel_coordinates(i,1) = y - mean_shape(min_index,1);
    }

    // get feature pixel location for each image
    // for pixel_density, each vector in it stores the pixel value for each image on the corresponding pixel locations
    vector<vector<double> > pixel_density;
    pixel_density.resize(pixel_pair_num);
    for(int i = 0;i < normalized_shapes.size();i++){
        // similarity transform from normalized_shapes to mean shape     
        Mat_<double> rotation(2,2);
        Mat_<double> translation(landmark_num,2);
        double scale = 0;

        translate_scale_rotate(normalized_shapes[i],mean_shape,translation,scale,rotation); 
       
        for(int j = 0;j < pixel_pair_num;j++){
            double x = pixel_coordinates(j,0);
            double y = pixel_coordinates(j,1);
            double project_x = rotation(0,0) * x + rotation(0,1) * y;
            double project_y = rotation(1,0) * x + rotation(1,1) * y;
            project_x = project_x * scale;
            project_y = project_y * scale;
            
            // resize according to bounding_box
            project_x = project_x * target_bounding_box[i].width/2.0;
            project_y = project_y * target_bounding_box[i].height/2.0; 
            
            int index = nearest_keypoint_index(j); 
            int real_x = project_x + current_shapes[i](index,0);
            int real_y = project_y + current_shapes[i](index,1);
            
             
            if(real_x < 0){
                real_x = 0;
            } 
            if(real_y < 0){
                real_y = 0;
            }
            if(real_x > images[i].cols-1){
                real_x = images[i].cols-1;
            }
            if(real_y > images[i].rows - 1){
                real_y = images[i].rows - 1;
            }
            pixel_density[j].push_back(int(images[i](real_y,real_x)));    
        }
    }

   	// calculate the correlation between pixels 
    Mat_<double> correlation(pixel_pair_num,pixel_pair_num);
    for(int i = 0;i < pixel_pair_num;i++){
        for(int j = i;j< pixel_pair_num;j++){
            double correlation_result = calculate_covariance(pixel_density[i],pixel_density[j]);
            correlation(i,j) = correlation_result;
            correlation(j,i) = correlation_result;
        }
    }
	// train ferns
    primary_fern_.resize(second_level_num);

    // predications for each shape 
    vector<Mat_<double> > prediction;
    prediction.resize(current_shapes.size());
    for(int i = 0;i < current_shapes.size();i++){
        prediction[i] = Mat::zeros(landmark_num,2,CV_64FC1);
    }
    
    for(int i = 0;i < second_level_num;i++){
		cout<<"Training fern "<<i<<endl;
        primary_fern_[i].train(pixel_density,correlation,pixel_coordinates,nearest_keypoint_index, current_shapes,pixel_pair_in_fern,normalized_targets,
                prediction); 
    }
    for(int i = 0;i < prediction.size();i++){
        Mat_<double> rotation;
        Mat_<double> translation;
        double scale;
        translate_scale_rotate(normalized_shapes[i],mean_shape,translation,scale,rotation);
        transpose(rotation,rotation);
        prediction[i] = scale * prediction[i] * rotation; 
    }
    current_shapes = compose_shape(prediction, current_shapes, target_bounding_box); 
    current_shapes = reproject_shape(current_shapes, target_bounding_box);
   
    /* Mat_<uchar> test_image_1 = images[10].clone(); */
    // for(int i = 0;i < landmark_num;i++){
        // circle(test_image_1,Point2d(current_shapes[10](i,0),current_shapes[10](i,1)),3,Scalar(255,0,0),-1,8,0);
    // }
    // imshow("result",test_image_1);
    /* waitKey(0);  */

}