Example #1
0
// Initialize the filter using some model
void pf_init_model(pf_t *pf, pf_init_model_fn_t init_fn, void *init_data)
{
  int i;
  pf_sample_set_t *set;
  pf_sample_t *sample;

  set = pf->sets + pf->current_set;

  // Create the kd tree for adaptive sampling
  pf_kdtree_clear(set->kdtree);

  set->sample_count = pf->max_samples;

  // Compute the new sample poses
  for (i = 0; i < set->sample_count; i++)
  {
    sample = set->samples + i;
    sample->weight = 1.0 / pf->max_samples;
    sample->pose = (*init_fn) (init_data);

    // Add sample to histogram
    pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
  }

  // Re-compute cluster statistics
  pf_cluster_stats(pf, set);
  
  return;
}
Example #2
0
// Initialize the filter using a guassian
void pf_init(pf_t *pf, pf_vector_t mean, pf_matrix_t cov)
{
  int i;
  pf_sample_set_t *set;
  pf_sample_t *sample;
  pf_pdf_gaussian_t *pdf;
  
  set = pf->sets + pf->current_set;
  
  // Create the kd tree for adaptive sampling
  pf_kdtree_clear(set->kdtree);

  set->sample_count = pf->max_samples;

  pdf = pf_pdf_gaussian_alloc(mean, cov);
    
  // Compute the new sample poses
  for (i = 0; i < set->sample_count; i++)
  {
    sample = set->samples + i;
    sample->weight = 1.0 / pf->max_samples;
    sample->pose = pf_pdf_gaussian_sample(pdf);

    // Add sample to histogram
    pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
  }

  pf_pdf_gaussian_free(pdf);
    
  // Re-compute cluster statistics
  pf_cluster_stats(pf, set);

  return;
}
Example #3
0
void pf_update_current_cluster_stats(pf_t *pf){
  pf_sample_set_t *c_set = pf->sets + pf->current_set;
  int i;
  pf_sample_t *sample;
  //poses have changed - clear the kd tree and reinsert the nodes 
  pf_kdtree_clear(c_set->kdtree); 
  for (i = 0; i < c_set->sample_count; i++){
    sample = c_set->samples + i;
    pf_kdtree_insert(c_set->kdtree, sample->pose, sample->weight);
  }
  pf_cluster_stats(pf, c_set);
}
Example #4
0
// Initialize the filter using some model
void pf_init_model(pf_t *pf, pf_init_model_fn_t init_fn, void *init_data)
{
  int i;
  pf_sample_set_t *set;
  pf_sample_t *sample;

  set = pf->sets + pf->current_set;

  // Create the kd tree for adaptive sampling
  pf_kdtree_clear(set->kdtree);

  set->sample_count = pf->max_samples;

  // Compute the new sample poses
  for (i = 0; i < set->sample_count; i++)
  {
    sample = set->samples + i;
    sample->weight = 1.0 / pf->max_samples;
    sample->pose = (*init_fn) (init_data);

    // Add sample to histogram
    pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
  }

  pf->w_slow = pf->w_fast = 0.0;

  // Re-compute cluster statistics
  pf_cluster_stats(pf, set);
 
  //set converged to 0
  pf_init_converged(pf);

  //allow converged to be 1 if we are actually converged from the start
  pf_update_converged(pf); 

  return;
}
Example #5
0
// Resample the distribution
void pf_update_resample(pf_t *pf)
{
  int i;
  double total;
  //double *randlist;
  pf_sample_set_t *set_a, *set_b;
  pf_sample_t *sample_a, *sample_b;
  //pf_pdf_discrete_t *pdf;

  double r,c,U;
  int m;
  double count_inv;

  set_a = pf->sets + pf->current_set;
  set_b = pf->sets + (pf->current_set + 1) % 2;

  // Create the discrete distribution to sample from
  /*
  total = 0;
  randlist = calloc(set_a->sample_count, sizeof(double));
  for (i = 0; i < set_a->sample_count; i++)
  {
    total += set_a->samples[i].weight;
    randlist[i] = set_a->samples[i].weight;
  }
  */

  //printf("resample total %f\n", total);

  // Initialize the random number generator
  //pdf = pf_pdf_discrete_alloc(set_a->sample_count, randlist);

  // Create the kd tree for adaptive sampling
  pf_kdtree_clear(set_b->kdtree);
  
  // Draw samples from set a to create set b.
  total = 0;
  set_b->sample_count = 0;

  // Low-variance resampler, taken from Probabilistic Robotics, p110
  count_inv = 1.0/set_a->sample_count;
#if defined (WIN32)
  // TODO: this isn't quite the same behaviour: drand48 returns uniformly-distributed values
  r = ((double) rand() / (double) RAND_MAX) * count_inv;
#else
  r = drand48() * count_inv;
#endif
  c = set_a->samples[0].weight;
  i = 0;
  m = 0;
  while(set_b->sample_count < pf->max_samples)
  {
    U = r + m * count_inv;
    while(U>c)
    {
      i++;
      // Handle wrap-around by resetting counters and picking a new random
      // number
      if(i >= set_a->sample_count)
      {
#if defined (WIN32)
        // TODO: this isn't quite the same behaviour: drand48 returns uniformly-distributed values
        r = ((double) rand() / (double) RAND_MAX) * count_inv;
#else
        r = drand48() * count_inv;
#endif
        c = set_a->samples[0].weight;
        i = 0;
        m = 0;
        U = r + m * count_inv;
        continue;
      }
      c += set_a->samples[i].weight;
    }

    //i = pf_pdf_discrete_sample(pdf);    

    sample_a = set_a->samples + i;

    //printf("%d %f\n", i, sample_a->weight);
    assert(sample_a->weight > 0);

    // Add sample to list
    sample_b = set_b->samples + set_b->sample_count++;
    sample_b->pose = sample_a->pose;
    sample_b->weight = 1.0;
    total += sample_b->weight;

    // Add sample to histogram
    pf_kdtree_insert(set_b->kdtree, sample_b->pose, sample_b->weight);

    //fprintf(stderr, "resample %d %d %d\n", set_b->sample_count, set_b->kdtree->leaf_count,
            //pf_resample_limit(pf, set_b->kdtree->leaf_count));

    // See if we have enough samples yet
    if (set_b->sample_count > pf_resample_limit(pf, set_b->kdtree->leaf_count))
      break;

    m++;
  }

  //fprintf(stderr, "\n\n");

  //pf_pdf_discrete_free(pdf);
  //free(randlist);

  // Normalize weights
  for (i = 0; i < set_b->sample_count; i++)
  {
    sample_b = set_b->samples + i;
    sample_b->weight /= total;
  }

  // Re-compute cluster statistics
  pf_cluster_stats(pf, set_b);

  // Use the newly created sample set
  pf->current_set = (pf->current_set + 1) % 2;
  
  return;
}
Example #6
0
// Resample the distribution
void pf_update_resample(pf_t *pf)
{
  int i;
  double total;
  pf_sample_set_t *set_a, *set_b;
  pf_sample_t *sample_a, *sample_b;

  //double r,c,U;
  //int m;
  //double count_inv;
  double* c;

  double w_diff;

  set_a = pf->sets + pf->current_set;
  set_b = pf->sets + (pf->current_set + 1) % 2;

  // Build up cumulative probability table for resampling.
  // TODO: Replace this with a more efficient procedure
  // (e.g., http://www.network-theory.co.uk/docs/gslref/GeneralDiscreteDistributions.html)
  c = (double*)malloc(sizeof(double)*(set_a->sample_count+1));
  c[0] = 0.0;
  for(i=0;i<set_a->sample_count;i++)
    c[i+1] = c[i]+set_a->samples[i].weight;

  // Create the kd tree for adaptive sampling
  pf_kdtree_clear(set_b->kdtree);
  
  // Draw samples from set a to create set b.
  total = 0;
  set_b->sample_count = 0;

  w_diff = 1.0 - pf->w_fast / pf->w_slow;
  if(w_diff < 0.0)
    w_diff = 0.0;
  //printf("w_diff: %9.6f\n", w_diff);

  // Can't (easily) combine low-variance sampler with KLD adaptive
  // sampling, so we'll take the more traditional route.
  /*
  // Low-variance resampler, taken from Probabilistic Robotics, p110
  count_inv = 1.0/set_a->sample_count;
  r = drand48() * count_inv;
  c = set_a->samples[0].weight;
  i = 0;
  m = 0;
  */
  while(set_b->sample_count < pf->max_samples)
  {
    sample_b = set_b->samples + set_b->sample_count++;

    if(drand48() < w_diff)
      sample_b->pose = (pf->random_pose_fn)(pf->random_pose_data);
    else
    {
      // Can't (easily) combine low-variance sampler with KLD adaptive
      // sampling, so we'll take the more traditional route.
      /*
      // Low-variance resampler, taken from Probabilistic Robotics, p110
      U = r + m * count_inv;
      while(U>c)
      {
        i++;
        // Handle wrap-around by resetting counters and picking a new random
        // number
        if(i >= set_a->sample_count)
        {
          r = drand48() * count_inv;
          c = set_a->samples[0].weight;
          i = 0;
          m = 0;
          U = r + m * count_inv;
          continue;
        }
        c += set_a->samples[i].weight;
      }
      m++;
      */

      // Naive discrete event sampler
      double r;
      r = drand48();
      for(i=0;i<set_a->sample_count;i++)
      {
        if((c[i] <= r) && (r < c[i+1]))
          break;
      }
      assert(i<set_a->sample_count);

      sample_a = set_a->samples + i;

      assert(sample_a->weight > 0);

      //景恆ZACH add 1205 START
      if(drand48()<0.3){
          pf_sample_t *sample_tmp = sample_a;
          sample_tmp->pose.v[0] = sample_a->pose.v[0] + 0.01*(2*drand48()-1); 
          sample_tmp->pose.v[1] = sample_a->pose.v[1] + 0.01*(2*drand48()-1); 
          sample_tmp->pose.v[2] = sample_a->pose.v[2] + 0.01*(2*drand48()-1); 
          // Add sample to list
          sample_b->pose = sample_tmp->pose;
      }   
      else sample_b->pose = sample_a->pose;
      //景恆ZACH add 1205 END

      // Add sample to list
      //sample_b->pose = sample_a->pose;
    }

    sample_b->weight = 1.0;
    total += sample_b->weight;

    // Add sample to histogram
    pf_kdtree_insert(set_b->kdtree, sample_b->pose, sample_b->weight);

    // See if we have enough samples yet
    if (set_b->sample_count > pf_resample_limit(pf, set_b->kdtree->leaf_count))
      break;
  }
  
  // Reset averages, to avoid spiraling off into complete randomness.
  if(w_diff > 0.0)
    pf->w_slow = pf->w_fast = 0.0;

  //fprintf(stderr, "\n\n");

  // Normalize weights
  for (i = 0; i < set_b->sample_count; i++)
  {
    sample_b = set_b->samples + i;
    sample_b->weight /= total;
  }
  
  // Re-compute cluster statistics
  pf_cluster_stats(pf, set_b);

  // Use the newly created sample set
  pf->current_set = (pf->current_set + 1) % 2; 

  pf_update_converged(pf);

  free(c);
  return;
}