void
compute_weights(std::vector<particle_datmo_t> &particle_set, double total_dist)
{
	double norm_dist; // normalized distance, values in [0.0, 10.0]
	double total_weight;
	double inv_total_dist = 1.0/total_dist; //multiplication >=20 times faster than division

	/* pre calculated parts of normal distribution PDF for better performance */
//	double std_dev = 0.1;
//	double inv_variance = 1.0/std_dev*std_dev;
//	double const_part = inv_variance*(1.0/sqrt(2*M_PI));

	/* normalize distances */
	for (std::vector<particle_datmo_t>::iterator it = particle_set.begin(); it != particle_set.end(); ++it)
	{
		norm_dist = 1000*inv_total_dist*it->dist;
		it->norm_dist = norm_dist;
//		it->weight = particle_weight_by_normal_distribution(norm_dist, 0.0, inv_variance, const_part);
//		it->weight = exp(-it->dist*it->dist);
//		it->weight = exp(-norm_dist*norm_dist*inv_variance);
		it->weight = exp(-it->dist);
		total_weight += it->weight;
	}

	/* normalize weights */
	normalize_weights(particle_set, total_weight);
}
std::vector<moving_objects3_particle_t>
algorithm_particle_filter(std::vector<moving_objects3_particle_t> particle_set_t_1,
		carmen_velodyne_projected_on_ground_message velodyne_projected_on_ground,
		double delta_time)
{
	std::vector<moving_objects3_particle_t> particle_set_t;

	double total_weight = 0.0;

	std::vector<moving_objects3_particle_t>::iterator it = particle_set_t_1.begin();
	std::vector<moving_objects3_particle_t>::iterator end = particle_set_t_1.end();
	for (; it != end; ++it)
	{
		moving_objects3_particle_t particle_t;
		// Motion Model
		particle_t = sample_motion_model((*it), delta_time);

		// Measurement Model -> RANSAC
		// cost = measurement_model(particle_t, velodyne_projected_on_ground);

		// Weighing particles
		particle_t.weight = get_particle_weight(particle_t, velodyne_projected_on_ground);
		total_weight += particle_t.weight;

		particle_set_t.push_back(particle_t);
	}

	// normalize particles weight
	normalize_weights(particle_set_t, total_weight);

	// resample
	particle_set_t = resample(particle_set_t);

	return particle_set_t;
}
  /// SECTION: Combo with RGB and Depth
  ComboComputer_RGBPlusDepth::ComboComputer_RGBPlusDepth(Size win_size, Size block_size, Size block_stride, Size cell_size): 
    FeatureBinCombiner(win_size, block_size, block_stride, cell_size)
  {
    computers.clear();
    
    // for now, use ZHOG and ZAREA
    computers.push_back(shared_ptr<DepthFeatComputer>(
      new HOGComputer18p4_General(selectDepthFn,win_size,block_size,block_stride,cell_size)));
    //computers.push_back(shared_ptr<DepthFeatComputer>(
      //new HOGComputer_Area(win_size,block_size,block_stride,cell_size)));
    computers.push_back(shared_ptr<DepthFeatComputer>(
      new HOGComputer18p4_General([](const ImRGBZ&im)-> const Mat&{return im.gray();},win_size,block_size,block_stride,cell_size)));
    //computers.push_back(shared_ptr<DepthFeatComputer>(
      //new FaceFeature(win_size,block_size,block_stride,cell_size)));
    // skin
    //computers.push_back(shared_ptr<DepthFeatComputer>(
      //new SkinFeatureComputer(win_size,block_size,block_stride,cell_size)));
    
    normalize_weights();
    
    //weights[1] *= HOA_IMPORTANCE; // double the weight of HoA
    //assert(std::dynamic_pointer_cast<HOGComputer_Area>(computers[1]));
    //weights[3] = 1; // don't normalize the face feature weight.
    
    for(int iter = 0; iter < computers.size(); ++iter)
      log_once(printfpp("ComboComputer_RGBPlusDepth:%d weight = %f",
			   iter,weights[iter]));
  }
  FeatureBinCombiner::FeatureBinCombiner(Size win_size, Size block_size, Size block_stride, Size cell_size)
  {
    // for now, use ZHOG and ZAREA
    computers.push_back(shared_ptr<DepthFeatComputer>(
      new HOGComputer18p4_General([](const ImRGBZ&im)->const Mat&{return im.Z;},win_size,block_size,block_stride,cell_size)));
    //computers.push_back(shared_ptr<DepthFeatComputer>(
      //new HOGComputer_Area(win_size,block_size,block_stride,cell_size)));
//     computers.push_back(shared_ptr<DepthFeatComputer>(
//       new HistOfNormals(win_size,block_size,block_stride,cell_size)));
    //computers.push_back(shared_ptr<DepthFeatComputer>(
      //new HOGComputer18p4_General([](const ImRGBZ&im)->const Mat&{return im.gray();},win_size,block_size,block_stride,cell_size)));
    
    normalize_weights();
  }
void localization_node::resample()
{
    normalize_weights();

    mypoint new_all_particles_[N_PARTICLES];

    int index = rand()%N_PARTICLES; //Random number from one to 1000

    double beta = 0;

    double max = 0;

    for (int i=0;i<=N_PARTICLES-1;i++)
    {
        if(weights_[i]>max)
        {
            max=weights_[i];
        }

    }


    for (int i=0;i<=N_PARTICLES-1;i++)
    {

        double rand_val = ((double)std::rand() / (double)RAND_MAX);

        beta = beta + rand_val*2.0*max ;

        while (beta > weights_[index])
        {
            beta = beta - weights_[index];
            index = (index + 1)%N_PARTICLES ;
        }

        new_all_particles_[i] = all_particles_[index];
    }

    for(int i=0;i<=N_PARTICLES-1;i++)
    {
        all_particles_[i] = new_all_particles_[i];
    }
}
  ComboComputer_Depth::ComboComputer_Depth(Size win_size, Size block_size, Size block_stride, Size cell_size): FeatureBinCombiner(win_size, block_size, block_stride, cell_size)
  {
    computers.clear();
    
    // for now, use ZHOG and ZAREA
    // Depth HOG
    computers.push_back(shared_ptr<DepthFeatComputer>(
      new HOGComputer18p4_General(selectDepthFn,win_size,block_size,block_stride,cell_size)));
    // HoA
    //computers.push_back(shared_ptr<DepthFeatComputer>(
      //new HOGComputer_Area(win_size,block_size,block_stride,cell_size)));
    
    normalize_weights();
    
    // update the normalized weights
    //weights[1] *= HOA_IMPORTANCE; 
    //assert(std::dynamic_pointer_cast<HOGComputer_Area>(computers[1]));
    
    for(int iter = 0; iter < computers.size(); ++iter)
      log_once(printfpp("ComboComputer_Depth:%d weight = %f",
			   iter,weights[iter]));
  }
std::vector<particle_datmo_t>
algorithm_monte_carlo(std::vector<particle_datmo_t> particle_set_t_1, double x, double y, double delta_time,
		pcl::PointCloud<pcl::PointXYZ> &pcl_cloud, object_geometry_t obj_geometry, carmen_vector_3D_t car_global_position, int num_association, particle_datmo_t mean_particle)
{
	std::vector<particle_datmo_t> particle_set_t;
	particle_datmo_t particle_t;
	double total_weight = 0.0;
	//double sigma = 3.0;	// todo Verificar a variância mais adequada
	//double total_dist = 0.0;

//	double sum_dist = 0;
	double mean_dist = 0;
//	double sum_var = 0;
//	double var, tmp;
	// obtenção da media
//	for (std::vector<particle_datmo_t>::iterator it = particle_set_t_1.begin(); it != particle_set_t_1.end(); ++it)
//	{
//		it->dist = measurement_model(*it, x, y, pcl_cloud, obj_geometry, car_global_position);
//		sum_dist += it->dist;
//	}
//	mean_dist = sum_dist / num_of_particles;
//
//
//	// obtencao da variancia
//	for (std::vector<particle_datmo_t>::iterator it = particle_set_t_1.begin(); it != particle_set_t_1.end(); ++it)
//	{
//		tmp = mean_dist - it->dist;
//		sum_var += (tmp * tmp);
//	}
//	var = sum_var/num_of_particles;
//
//	sigma = sqrt(mean_dist);

	/* Prediction */
	for (std::vector<particle_datmo_t>::iterator it = particle_set_t_1.begin(); it != particle_set_t_1.end(); ++it)
	{
		// Motion Model
		particle_t = sample_motion_model(delta_time, (*it), mean_dist, mean_particle);

		// Measurement Model
		particle_t.dist = measurement_model(particle_t, x, y, pcl_cloud, obj_geometry, car_global_position);

		// Weighing particles (still not normalized)
//		total_dist += particle_t.dist;
//		particle_t.weight = -particle_t.dist * particle_t.dist * 100.0;
		//particle_t.weight = particle_weight_pose_reading_model(particle_t.dist);
		particle_t.weight = particle_weight_by_normal_distribution(particle_t.dist, sigma, 0.0);
		total_weight += particle_t.weight;
//		particle_t.weight = particle_weight_by_normal_distribution(particle_t.dist, 1.0, 0.0);
//		particle_t.weight = exp(-0.5 * (particle_t.dist) * (particle_t.dist));

		particle_set_t.push_back(particle_t);

//		if(num_association == 1) {
//			printf("%f %d\n", particle_t.dist, particle_t.class_id);
//		}

	}
	/* Weighing particles */
	normalize_weights(particle_set_t, total_weight);
//	compute_weights(particle_set_t, total_dist);

	/* Resample */
	//if (mean_particle.velocity > threshold_min_velocity) // teste para fazer o resample com velocidades altas
		resample(particle_set_t);

	num_association = num_association + 1;

	return particle_set_t;
}
Example #8
0
/**
 * @brief Main principal
 * @param argc El número de argumentos del programa
 * @param argv Cadenas de argumentos del programa
 * @return Nada si es correcto o algún número negativo si es incorrecto
 */
int main( int argc, char** argv ) {
	
	if( argc < 4 )
		return -1;

	// Declaración de variables
	gsl_rng *rng;
	IplImage *frame, *hsv_frame;
	histogram **ref_histos, *histo_aux;
	CvCapture *video;
	particle **particles, **aux, **nuevas_particulas;
	CvScalar color_rojo = CV_RGB(255,0,0), color_azul = CV_RGB(0,0,255);
	CvRect *regions;
	int num_objects = 0;
	int i = 1, MAX_OBJECTS = atoi(argv[3]), PARTICLES = atoi(argv[2]);
	FILE *datos;
	char name[45], num[3], *p1, *p2;
	clock_t t_ini, t_fin;
	double ms;
	
	video = cvCaptureFromFile( argv[1] );
	if( !video ) {
		printf("No se pudo abrir el fichero de video %s\n", argv[1]);
		exit(-1);
	}

	first_frame = cvQueryFrame( video );
	num_objects = get_regions( &regions,  MAX_OBJECTS, argv[1] );
	if( num_objects == 0 )
		exit(-1);

	t_ini = clock();
	hsv_frame = bgr2hsv( first_frame );
	histo_aux = (histogram*) malloc( sizeof(histogram) );
	histo_aux->n = NH*NS + NV;
	nuevas_particulas = (particle**) malloc( num_objects * sizeof( particle* ) );
	for( int j = 0; j < num_objects; ++j )
		nuevas_particulas[j] = (particle*) malloc( PARTICLES * sizeof( particle ) );
			
	// Computamos los histogramas de referencia y distribuimos las partículas iniciales
	ref_histos = compute_ref_histos( hsv_frame, regions, num_objects );
	particles = init_distribution( regions, num_objects, PARTICLES );

	// Mostramos el tracking
	if( show_tracking ) {

		// Mostramos todas las partículas
		if( show_all )
			for( int k = 0; k < num_objects; ++k )
				for( int j = 0; j < PARTICLES; ++j )
					display_particle( first_frame, particles[k][j], color_azul );

		// Dibujamos la partícula más prometedora de cada objeto
		for( int k = 0; k < num_objects; ++k )
			display_particle( first_frame, particles[k][0], color_rojo );

		cvNamedWindow( "Video", 1 );
		cvShowImage( "Video", first_frame );
		cvWaitKey( 5 );
	}

	// Exportamos los histogramas de referencia y los frames
	if( exportar ) {
		export_ref_histos( ref_histos, num_objects );
		export_frame( first_frame, 1 );

		for( int k = 0; k < num_objects; ++k ) {
			sprintf( num, "%02d", k );
			strcpy( name, REGION_BASE);
			p1 = strrchr( argv[1], '/' );
			p2 = strrchr( argv[1], '.' );
			strncat( name, (++p1), p2-p1 );
			strcat( name, num );
			strcat( name, ".txt" );
			datos = fopen( name, "a+" );
			if( ! datos ) {
				printf("Error creando fichero para datos\n");
				return -1;
			}
			fprintf( datos, "%d\t%f\t%f\n", 0, particles[k][0].x, particles[k][0].y );
			fclose( datos );
		}
	}

	cvReleaseImage( &hsv_frame );
	
	// Inicializamos el generador de números aleatorios
	gsl_rng_env_setup();
	rng = gsl_rng_alloc( gsl_rng_mt19937 );
	gsl_rng_set(rng, (unsigned long) time(NULL));

	// Recordar que frame no se puede liberar debido al cvQueryFrame
	while( frame = cvQueryFrame( video ) ) {
		hsv_frame = bgr2hsv( frame );

		// Realizamos la predicción y medición de probabilidad para cada partícula
		for( int k = 0; k < num_objects; ++k )
			for( int j = 0; j < PARTICLES; ++j ) {
				transition( &particles[k][j], frame->width, frame->height, rng );
				particles[k][j].w = likelihood( hsv_frame, &particles[k][j], ref_histos[k], histo_aux );
			}
			
		// Normalizamos los pesos y remuestreamos un conjunto de partículas no ponderadas
		normalize_weights( particles, num_objects, PARTICLES );
		for (int k = 0; k < num_objects; ++k )
			resample( particles[k], PARTICLES, nuevas_particulas[k] );
		aux = particles;
		particles = nuevas_particulas;
		nuevas_particulas = aux;

		// Mostramos el tracking
		if( show_tracking ) {

			// Mostramos todas las partículas
			if( show_all )
				for( int k = 0; k < num_objects; ++k )
					for( int j = 0; j < PARTICLES; ++j )
						display_particle( frame, particles[k][j], color_azul );
		
			// Dibujamos la partícula más prometedora de cada objeto
			for( int k = 0; k < num_objects; ++k )
				display_particle( frame, particles[k][0], color_rojo );
			cvNamedWindow( "Video", 1 );
			cvShowImage( "Video", frame );
			cvWaitKey( 5 );
		}

		// Exportamos los histogramas de referencia y los frames
		if( exportar ) {
			export_frame( frame, i+1 );

			for( int k = 0; k < num_objects; ++k ) {
				sprintf( num, "%02d", k );
				strcpy( name, REGION_BASE);
				p1 = strrchr( argv[1], '/' );
				p2 = strrchr( argv[1], '.' );
				strncat( name, (++p1), p2-p1 );
				strcat( name, num );
				strcat( name, ".txt" );
				datos = fopen( name, "a+" );
				if( ! datos ) {
					printf("Error abriendo fichero para datos\n");
					return -1;
				}
				fprintf( datos, "%d\t%f\t%f\n", i, particles[k][0].x, particles[k][0].y );
				fclose( datos );
			}
		}

		cvReleaseImage( &hsv_frame );
		++i;
	}
	
	// Liberamos todos los recursos usados (mallocs, gsl y frames)
	cvReleaseCapture( &video );
	gsl_rng_free( rng );
	free( histo_aux );
	free( regions );

	for( int i = 0; i < num_objects; ++i ) {
		free( ref_histos[i] );
		free( particles[i] );
		free( nuevas_particulas[i] );
	}

	free( particles );
	free( nuevas_particulas );

	t_fin = clock();
	ms = ((double)(t_fin - t_ini) / CLOCKS_PER_SEC) * 1000.0;
	printf("%d\t%d\t%.10g\n", PARTICLES, num_objects, ms);
}