コード例 #1
0
ファイル: GLATPsfV3.cpp プロジェクト: adonath/gammalib
/***********************************************************************//**
 * @brief Evaluate PSF for a specific set of parameters
 *
 * @param[in] offset Offset angle (radians).
 * @param[in] energy Energy (MeV).
 * @param[in] index Parameter array index.
 *
 * Evaluates PSF as function of offset angle and energy for a specific set
 * of PSF parameters. The parameter set that is used is specified by the
 * index parameter. The energy parameter only serves to scale the score and
 * stail parameters of the PSF.
 ***************************************************************************/
double GLATPsfV3::eval_psf(const double& offset, const double& energy,
                           const int& index)
{
    // Get energy scaling
    double scale = scale_factor(energy);

    // Get parameters
    double ncore(m_ncore[index]);
    double ntail(m_ntail[index]);
    double score(m_score[index] * scale);
    double stail(m_stail[index] * scale);
    double gcore(m_gcore[index]);
    double gtail(m_gtail[index]);

    // Compute argument
    double rc = offset / score;
    double uc = 0.5 * rc * rc;
    double rt = offset / stail;
    double ut = 0.5 * rt * rt;

    // Evaluate PSF
    double psf = ncore * (base_fct(uc, gcore) + ntail * base_fct(ut, gtail));
    
    // Return PSF
    return psf;
}
コード例 #2
0
ファイル: tag_viewer.cpp プロジェクト: clavicule/BBTag
void TagViewer::mouseReleaseEvent(
    QMouseEvent* e
)
{
    if( !tagging_ || e->button() != Qt::LeftButton ) {
        return;
    }

    tag_end_ = e->pos();
    enforce_boundary_conditions( tag_end_ );

    float scale_f = scale_factor();

    // make a valid rectangle
    QRect tag( tag_start_ / scale_f, tag_end_ / scale_f );
    int left = tag.left();
    int top = tag.top();
    if( left > tag.right() ) {
        tag.setLeft( tag.right() );
        tag.setRight( left );
    }
    if( top > tag.bottom() ) {
        tag.setTop( tag.bottom() );
        tag.setBottom( top );
    }

    emit( tagged( tag ) );

    tag_start_ = QPoint( 0, 0 );
    tag_end_ = QPoint( 0, 0 );
}
コード例 #3
0
ファイル: pango-markup.c プロジェクト: soubok/libset
static gboolean
parse_absolute_size (OpenTag               *tag,
		     const char            *size)
{
  SizeLevel level = Medium;
  double factor;

  if (strcmp (size, "xx-small") == 0)
    level = XXSmall;
  else if (strcmp (size, "x-small") == 0)
    level = XSmall;
  else if (strcmp (size, "small") == 0)
    level = Small;
  else if (strcmp (size, "medium") == 0)
    level = Medium;
  else if (strcmp (size, "large") == 0)
    level = Large;
  else if (strcmp (size, "x-large") == 0)
    level = XLarge;
  else if (strcmp (size, "xx-large") == 0)
    level = XXLarge;
  else
    return FALSE;

  /* This is "absolute" in that it's relative to the base font,
   * but not to sizes created by any other tags
   */
  factor = scale_factor (level, 1.0);
  add_attribute (tag, pango_attr_scale_new (factor));
  if (tag)
    open_tag_set_absolute_font_scale (tag, factor);

  return TRUE;
}
コード例 #4
0
ファイル: mvarfit.c プロジェクト: kro/libmvar
static void set_intercept_vec_w(struct mvar_model *model, gsl_matrix *aug_A, gsl_vector *scale)
{
    gsl_vector_view vec_view = gsl_matrix_column(aug_A, 0);

    gsl_vector_memcpy(model->w, &vec_view.vector);
    gsl_vector_scale(model->w, scale_factor(scale));
}
コード例 #5
0
ファイル: groupies.c プロジェクト: markushaider/generate_tree
void lightcone_set_scale(float *pos) {
  int64_t i;
  float ds=0, dx=0, z;
  for (i=0; i<3; i++) { ds = pos[i]-LIGHTCONE_ORIGIN[i]; dx+=ds*ds; }
  z = comoving_distance_h_to_redshift(sqrt(dx));
  SCALE_NOW = scale_factor(z);
  calc_mass_definition();
}
コード例 #6
0
ファイル: GLATPsfV3.cpp プロジェクト: adonath/gammalib
/***********************************************************************//**
 * @brief Integrates PSF for a specific set of parameters
 *
 * @param[in] energy Energy (MeV).
 * @param[in] index Parameter array index.
 *
 * Integrates PSF for a specific set of parameters.
 *
 * Compile option G_APPROXIMATE_PSF_INTEGRAL:
 * If defined, a numerical PSF integral is only performed for energies
 * < 120 MeV, while for larger energies the small angle approximation is
 * used. In not defined, a numerical PSF integral is performed for all
 * energies.
 * This option is kept for comparison with the Fermi/LAT ScienceTools who
 * select the integration method based on the true photon energy. As the
 * normalization is only performed once upon loading of the PSF, CPU time
 * is not really an issue here, and we can afford the more precise numerical
 * integration. Note that the uncertainties of the approximation at energies
 * near to 120 MeV reaches 0.1%.
 *
 * @todo Implement gcore and gtail checking
 ***************************************************************************/
double GLATPsfV3::integrate_psf(const double& energy, const int& index)
{
    // Initialise integral
    double psf = 0.0;

    // Get energy scaling
    double scale = scale_factor(energy);

    // Get parameters
    double ncore(m_ncore[index]);
    double ntail(m_ntail[index]);
    double score(m_score[index] * scale);
    double stail(m_stail[index] * scale);
    double gcore(m_gcore[index]);
    double gtail(m_gtail[index]);

    // Make sure that gcore and gtail are not negative
    //if (gcore < 0 || gtail < 0) {
    //}

    // Do we need an exact integral?
    #if defined(G_APPROXIMATE_PSF_INTEGRAL)
    if (energy < 120) {
    #endif

        // Allocate integrand
        GLATPsfV3::base_integrand integrand(ncore, ntail, score, stail, gcore, gtail);

        // Allocate integral
        GIntegral integral(&integrand);

        // Integrate radially from 0 to 90 degrees
        psf = integral.romb(0.0, pihalf) * twopi;
    
    #if defined(G_APPROXIMATE_PSF_INTEGRAL)
    } // endif: exact integral was performed

    // No, so we use the small angle approximation
    else {

        // Compute arguments
        double rc = pihalf / score;
        double uc = 0.5 * rc * rc;
        double sc = twopi * score * score;
        double rt = pihalf / stail;
        double ut = 0.5 * rt * rt;
        double st = twopi * stail * stail;

        // Evaluate PSF integral (from 0 to 90 degrees)
        psf = ncore * (base_int(uc, gcore) * sc +
                       base_int(ut, gtail) * st * ntail);
    
    }
    #endif

    // Return PSF integral
    return psf;
}
コード例 #7
0
ファイル: tag_viewer.cpp プロジェクト: clavicule/BBTag
void TagViewer::paintEvent(
    QPaintEvent* /*e*/
)
{
    QPainter p( this );

    float scale_f = scale_factor();
    QRect drawing_area( 0, 0, size().width(), size().height() );

    if( !pix_.isNull() && !size().isNull() ) {
        p.drawPixmap( drawing_area, pix_ );

    } else {
        QTextOption options( Qt::AlignLeft );
        options.setWrapMode( QTextOption::WordWrap );
        p.setPen( QPen( Qt::red, 4 ) );
        p.drawText(
            QRectF( drawing_area ),
            "Image cannot be displayed. Check:\n"
            " - the same image is selected among the multiple selection (it's ok to select labels)\n"
            " - the image file is still at the same disk location when imported\n"
            " - the image format is valid and/or the file is not corrupted ",
            options
        );

        return;
    }

    QFont font;
    font.setPointSize( 10 );
    p.setFont( font );

    // draw bounding boxes
    for( QList<TagDisplayElement>::iterator tag_itr = elts_.begin(); tag_itr != elts_.end(); ++tag_itr ) {
        const TagDisplayElement& tag = *tag_itr;
        const QList<QRect>& bbox = tag._bbox;

        p.setPen( QPen( tag._color, 2 ) );

        for( QList<QRect>::const_iterator bbox_itr = bbox.begin(); bbox_itr != bbox.end(); ++bbox_itr ) {
            const QRect& box_rect = *bbox_itr;
            QRect scaled_box( scale_f * box_rect.topLeft(), scale_f * box_rect.bottomRight() );
            p.drawRect( scaled_box );
            p.drawText( scaled_box.x(), scaled_box.y(), tag._label );
        }
    }

    // draw current box being tagged
    p.setPen( QPen( current_color_, 2 ) );
    if( tagging_ && tag_start_ != tag_end_ ) {
        QRect current_rect( tag_start_, tag_end_ );
        p.drawRect( current_rect );
        p.drawText( current_rect.x(), current_rect.y(), current_label_ );
    }

}
コード例 #8
0
ファイル: GLATPsfV3.cpp プロジェクト: adonath/gammalib
/***********************************************************************//**
 * @brief Normalize PSF for all parameters
 *
 * Makes sure that PSF is normalized for all parameters. We assure this by
 * looping over all parameter nodes, integrating the PSF for each set of
 * parameters, and dividing the NCORE parameter by the integral.
 *
 * Compile option G_CHECK_PSF_NORM:
 * If defined, checks that the PSF is normalized correctly.
 ***************************************************************************/
void GLATPsfV3::normalize_psf(void)
{
    // Loop over all energy bins
    for (int ie = 0; ie < m_rpsf_bins.nenergies(); ++ie) {

        // Extract energy value (in MeV)
        double energy = m_rpsf_bins.energy(ie);

        // Loop over all cos(theta) bins
        for (int ic = 0; ic < m_rpsf_bins.ncostheta(); ++ic) {

            // Get parameter index
            int index = m_rpsf_bins.index(ie, ic);

            // Integrate PSF
            double norm = integrate_psf(energy, index);

            // Normalize PSF
            m_ncore[index] /= norm;

            // Compile option: check PSF normalization
            #if defined(G_CHECK_PSF_NORM)
            double scale = scale_factor(energy);
            double ncore(m_ncore[index]);
            double ntail(m_ntail[index]);
            double score(m_score[index] * scale);
            double stail(m_stail[index] * scale);
            double gcore(m_gcore[index]);
            double gtail(m_gtail[index]);
            GLATPsfV3::base_integrand integrand(ncore, ntail, score, stail, gcore, gtail);
            GIntegral integral(&integrand);
            double sum = integral.romb(0.0, pihalf) * twopi;
            std::cout << "Energy=" << energy;
            std::cout << " cos(theta)=" << m_rpsf_bins.costheta_lo(ic);
            std::cout << " error=" << sum-1.0 << std::endl;
            #endif

        } // endfor: looped over cos(theta)

    } // endfor: looped over energies
    
    // Return
    return;
}
コード例 #9
0
ファイル: tag_viewer.cpp プロジェクト: clavicule/BBTag
void TagViewer::mousePressEvent(
    QMouseEvent* e
)
{
    if( e->button() != Qt::LeftButton ) {
        return;
    }

    if( tagging_ ) {
        tag_start_ = e->pos();
        enforce_boundary_conditions( tag_start_ );
        tag_end_ = tag_start_;

    } else if( untagging_ ) {
        // searches the closest rectangle to the picked point
        // removes it only if point is deemed close enough
        // there won't be tons of label per image, so a brute force search
        // is perfectly acceptable, no need to go into quad-tree
        float scale_f = scale_factor();
        QRect rect_found;
        QString label_found;
        int distance_min = 200. / scale_f;
        QPoint p = e->pos();
        enforce_boundary_conditions( p );
        p /= scale_f;

        for( QList<TagDisplayElement>::iterator tag_itr = elts_.begin(); tag_itr != elts_.end(); ++tag_itr ) {
            const TagDisplayElement& tag = *tag_itr;
            const QList<QRect>& bbox = tag._bbox;

            for( QList<QRect>::const_iterator bbox_itr = bbox.begin(); bbox_itr != bbox.end(); ++bbox_itr ) {
                int d = shortest_distance( p, *bbox_itr );
                if( d < distance_min ) {
                    distance_min = d;
                    label_found = tag._label;
                    rect_found = *bbox_itr;
                }
            }
        }
        if( !label_found.isEmpty() && rect_found.isValid() ) {
            emit( untagged( label_found, rect_found ) );
        }
    }
}
コード例 #10
0
ファイル: subd_dice.cpp プロジェクト: mik0001/Blender
void QuadDice::dice(SubPatch& sub, EdgeFactors& ef)
{
	/* compute inner grid size with scale factor */
	int Mu = max(ef.tu0, ef.tu1);
	int Mv = max(ef.tv0, ef.tv1);

	float S = scale_factor(sub, ef, Mu, Mv);
	Mu = max((int)ceil(S*Mu), 2); // XXX handle 0 & 1?
	Mv = max((int)ceil(S*Mv), 2); // XXX handle 0 & 1?

	/* reserve space for new verts */
	int offset = mesh->verts.size();
	reserve(ef, Mu, Mv);

	/* corners and inner grid */
	add_corners(sub);
	add_grid(sub, Mu, Mv, offset);

	/* bottom side */
	vector<int> outer, inner;

	add_side_u(sub, outer, inner, Mu, Mv, ef.tu0, 0, offset);
	stitch_triangles(outer, inner);

	/* top side */
	add_side_u(sub, outer, inner, Mu, Mv, ef.tu1, 1, offset);
	stitch_triangles(inner, outer);

	/* left side */
	add_side_v(sub, outer, inner, Mu, Mv, ef.tv0, 0, offset);
	stitch_triangles(inner, outer);

	/* right side */
	add_side_v(sub, outer, inner, Mu, Mv, ef.tv1, 1, offset);
	stitch_triangles(outer, inner);

	assert(vert_offset == mesh->verts.size());
}
コード例 #11
0
ファイル: pango-markup.c プロジェクト: Distrotech/pango
static void
markup_data_close_tag (MarkupData *md)
{
  OpenTag *ot;
  GSList *tmp_list;

  if (md->attr_list == NULL)
    return;

  /* pop the stack */
  ot = md->tag_stack->data;
  md->tag_stack = g_slist_delete_link (md->tag_stack,
				       md->tag_stack);

  /* Adjust end indexes, and push each attr onto the front of the
   * to_apply list. This means that outermost tags are on the front of
   * that list; if we apply the list in order, then the innermost
   * tags will "win" which is correct.
   */
  tmp_list = ot->attrs;
  while (tmp_list != NULL)
    {
      PangoAttribute *a = tmp_list->data;

      a->start_index = ot->start_index;
      a->end_index = md->index;

      md->to_apply = g_slist_prepend (md->to_apply, a);

      tmp_list = g_slist_next (tmp_list);
    }

  if (ot->scale_level_delta != 0)
    {
      /* We affected relative font size; create an appropriate
       * attribute and reverse our effects on the current level
       */
      PangoAttribute *a;

      if (ot->has_base_font_size)
	{
	  /* Create a font using the absolute point size
	   * as the base size to be scaled from
	   */
	  a = pango_attr_size_new (scale_factor (ot->scale_level,
						 1.0) *
				   ot->base_font_size);
	}
      else
	{
	  /* Create a font using the current scale factor
	   * as the base size to be scaled from
	   */
	  a = pango_attr_scale_new (scale_factor (ot->scale_level,
						  ot->base_scale_factor));
	}

      a->start_index = ot->start_index;
      a->end_index = md->index;

      md->to_apply = g_slist_prepend (md->to_apply, a);
    }

  g_slist_free (ot->attrs);
  g_slice_free (OpenTag, ot);
}
コード例 #12
0
ファイル: meta_io.c プロジェクト: markushaider/generate_tree
void read_particles(char *filename) {
  int64_t i, j, gadget = 0, gadget_internal = 0;
  int64_t p_start = num_p;
  float dx, ds, z, a, vel_mul;
  double *origin, origin_offset[3] = {0};
  if (!strcasecmp(FILE_FORMAT, "ASCII")) load_particles(filename, &p, &num_p);
  else if (!strncasecmp(FILE_FORMAT, "GADGET", 6)) {
    if (!strcasecmp(FILE_FORMAT, "GADGET_INTERNAL") ||
	!strcasecmp(FILE_FORMAT, "GADGET2_INTERNAL")) gadget_internal = 1;
    load_particles_gadget2(filename, &p, &num_p);
    gadget = 1;
  }
  else if (!strncasecmp(FILE_FORMAT, "ART", 3)) 
    load_particles_art(filename, &p, &num_p);
  else if (!strncasecmp(FILE_FORMAT, "INTERNAL", 8)) {
    load_particles_internal(filename, &p, &num_p);
  }
  else if (!strncasecmp(FILE_FORMAT, "GENERIC", 7)) {
    assert(load_particles_generic != NULL);
    load_particles_generic(filename, &p, &num_p);
  }
  else if (!strncasecmp(FILE_FORMAT, "TIPSY", 5)) {
    load_particles_internal(filename, &p, &num_p);
  }
  else {
    fprintf(stderr, "[Error] Unknown filetype %s!\n", FILE_FORMAT);
    exit(1);
  }

  if (LIMIT_RADIUS) {
    for (i=p_start; i<num_p; i++) {
      for (j=0, ds=0; j<3; j++) { dx = p[i].pos[j]-LIMIT_CENTER[j]; ds+=dx*dx; }
      if (ds > LIMIT_RADIUS*LIMIT_RADIUS) {
	num_p--;
	p[i] = p[num_p];
	i--;
      }
    }
  }

  if (LIGHTCONE) {
    init_cosmology();
    if (strlen(LIGHTCONE_ALT_SNAPS)) {
      for (i=0; i<3; i++)
	if (LIGHTCONE_ORIGIN[i] || LIGHTCONE_ALT_ORIGIN[i]) break;
      if (i<3) { //Same box coordinates, different intended locations
	if (LIGHTCONE == 1) {
	  for (i=0; i<3; i++) origin_offset[i] = LIGHTCONE_ORIGIN[i] - 
				LIGHTCONE_ALT_ORIGIN[i];
	}
      } else { //Offset everything
	for (i=0; i<3; i++) origin_offset[i] = -BOX_SIZE;
      }
      BOX_SIZE *= 2.0;
    }
    origin = (LIGHTCONE == 2) ? LIGHTCONE_ALT_ORIGIN : LIGHTCONE_ORIGIN;
    for (i=p_start; i<num_p; i++) {
      if (LIGHTCONE == 2) p[i].id = -p[i].id; //Make ids different
      for (j=0,dx=0; j<3; j++) {
	ds = p[i].pos[j] - origin[j];
	dx += ds*ds;
	p[i].pos[j] -= origin_offset[j];
      }
      if (!gadget) continue;
      dx = sqrt(dx);
      z = comoving_distance_h_to_redshift(dx);
      a = scale_factor(z);
      vel_mul = (gadget_internal) ? (1.0/a) : sqrt(a);
      for (j=0; j<3; j++) p[i].pos[j+3] *= vel_mul;
    }
  }
  output_config(NULL);
}
コード例 #13
0
ファイル: permutohedral.cpp プロジェクト: VictorLamoine/pcl
void
pcl::Permutohedral::init (const std::vector<float> &feature, const int feature_dimension, const int N)
{
  N_ = N;
  d_ = feature_dimension;
  
  // Create hash table
  std::vector<std::vector<short> > keys;
  keys.reserve ((d_+1) * N_);
  std::multimap<size_t, int> hash_table;

  // reserve class memory
  if (offset_.size () > 0) 
    offset_.clear ();
  offset_.resize ((d_ + 1) * N_);

  if (barycentric_.size () > 0) 
    barycentric_.clear ();
  barycentric_.resize ((d_ + 1) * N_);

  // create vectors and matrices
  Eigen::VectorXf scale_factor = Eigen::VectorXf::Zero (d_);
  Eigen::VectorXf elevated = Eigen::VectorXf::Zero (d_ + 1);
  Eigen::VectorXf rem0 = Eigen::VectorXf::Zero (d_+1);
  Eigen::VectorXf barycentric = Eigen::VectorXf::Zero (d_+2);
  Eigen::VectorXi rank = Eigen::VectorXi::Zero (d_+1);
  Eigen::Matrix<int, Eigen::Dynamic, Eigen::Dynamic> canonical;
  canonical = Eigen::Matrix<int, Eigen::Dynamic, Eigen::Dynamic>::Zero (d_+1, d_+1);
  //short * key = new short[d_+1];
  std::vector<short> key (d_+1);

  // Compute the canonical simple
  for (int i = 0; i <= d_; i++)
  {
    for (int j = 0; j <= (d_ - i); j++)
      canonical (j, i) = i;
    for (int j = (d_ - i + 1); j <= d_; j++)
      canonical (j, i) = i - (d_ + 1);
  }

  // Expected standard deviation of our filter (p.6 in [Adams etal 2010])
  float inv_std_dev = std::sqrt (2.0f / 3.0f) * static_cast<float> (d_ + 1);
  
  // Compute the diagonal part of E (p.5 in [Adams etal 2010])
  for (int i = 0; i < d_; i++)
    scale_factor (i) = 1.0f / std::sqrt (static_cast<float> (i + 2) * static_cast<float> (i + 1)) * inv_std_dev;

  // Compute the simplex each feature lies in
  for (int k = 0; k < N_; k++)
    //for (int k = 0; k < 5; k++)
  {

    // Elevate the feature  (y = Ep, see p.5 in [Adams etal 2010])
    int index = k * feature_dimension;
    // sm contains the sum of 1..n of our faeture vector
    float sm = 0;
    for (int j = d_; j > 0; j--)
    {
      float cf = feature[index + j-1] * scale_factor (j-1);      
      elevated (j) = sm - static_cast<float> (j) * cf;
      sm += cf;
    }
    elevated (0) = sm;

    // Find the closest 0-colored simplex through rounding
    float down_factor = 1.0f / static_cast<float>(d_+1);
    float up_factor = static_cast<float>(d_+1);
    int sum = 0;
    for (int j = 0; j <= d_; j++){
      float rd = floorf (0.5f + (down_factor * elevated (j))) ;
      rem0 (j) = rd * up_factor;
      sum += static_cast<int> (rd);
    }
    
    // rank differential to find the permutation between this simplex and the canonical one.         
    // (See pg. 3-4 in paper.)    
    rank.setZero ();
    Eigen::VectorXf tmp = elevated - rem0;
    for (int i = 0; i < d_; i++){
      for (int j = i+1; j <= d_; j++)
        if (tmp (i) < tmp (j))
          rank (i)++;
        else
          rank (j)++;
    }

    // If the point doesn't lie on the plane (sum != 0) bring it back
    for (int j = 0; j <= d_; j++){
      rank (j) += sum;
      if (rank (j) < 0){
        rank (j) += d_+1;
        rem0 (j) += static_cast<float> (d_ + 1);
      }
      else if (rank (j) > d_){
        rank (j) -= d_+1;
        rem0 (j) -= static_cast<float> (d_ + 1);
      }
    }

    // Compute the barycentric coordinates (p.10 in [Adams etal 2010])
    barycentric.setZero ();
    Eigen::VectorXf v = (elevated - rem0) * down_factor;
    for (int j = 0; j <= d_; j++){
      barycentric (d_ - rank (j)    ) += v (j);
      barycentric (d_ + 1 - rank (j)) -= v (j);
    }
    // Wrap around
    barycentric (0) += 1.0f + barycentric (d_+1);

    // Compute all vertices and their offset
    for (int remainder = 0; remainder <= d_; remainder++)
    {
      for (int j = 0; j < d_; j++)
        key[j] = static_cast<short> (rem0 (j) + static_cast<float> (canonical ( rank (j), remainder)));

      // insert key in hash table      
      size_t hash_key = generateHashKey (key);
      auto it = hash_table.find (hash_key);
      int key_index = -1;
      if (it != hash_table.end ())
      {
        key_index = it->second;
        
        // check if key is the right one
        int tmp_key_index = -1;
        //for (int ii = key_index; ii < keys.size (); ii++)
        for (; it != hash_table.end (); ++it)
        {
          int ii = it->second;
          bool same = true;
          std::vector<short> k = keys[ii];
          for (size_t i_k = 0; i_k < k.size (); i_k++)
          {
            if (key[i_k] != k[i_k])
            {
              same = false;
              break;
            }
          }

          if (same)
          {
            tmp_key_index = ii;
            break;
          }
        }
      
        if (tmp_key_index == -1)
        {
          key_index = static_cast<int> (keys.size ());
          keys.push_back (key);
          hash_table.insert (std::pair<size_t, int> (hash_key, key_index));
        }
        else
          key_index = tmp_key_index;
      }
      
      else
      {  
        key_index = static_cast<int> (keys.size ());
        keys.push_back (key);
        hash_table.insert (std::pair<size_t, int> (hash_key, key_index));
      }
      offset_[ k * (d_ + 1) + remainder ] = static_cast<float> (key_index);
      
      barycentric_[ k * (d_ + 1) + remainder ] = barycentric (remainder);
    }
  }

  // Find the Neighbors of each lattice point
		
  // Get the number of vertices in the lattice
  M_ = static_cast<int> (hash_table.size());
		
  // Create the neighborhood structure
  if (blur_neighbors_.size () > 0) 
    blur_neighbors_.clear ();
  blur_neighbors_.resize ((d_+1)*M_);

  std::vector<short> n1 (d_+1);
  std::vector<short> n2 (d_+1);

  // For each of d+1 axes,
  for (int j = 0; j <= d_; j++)
  {
    for (int i = 0; i < M_; i++)
    {
      std::vector<short> key = keys[i];

      for (int k=0; k<d_; k++){
        n1[k] = static_cast<short> (key[k] - 1);
        n2[k] = static_cast<short> (key[k] + 1);
      }
      n1[j] = static_cast<short> (key[j] + d_);
      n2[j] = static_cast<short> (key[j] - d_);

      std::multimap<size_t ,int>::iterator it;
      size_t hash_key;
      int key_index = -1;      
      hash_key = generateHashKey (n1);
      it = hash_table.find (hash_key);
      if (it != hash_table.end ())
        key_index = it->second;
      blur_neighbors_[j*M_+i].n1 = key_index;

      key_index = -1;
      hash_key = generateHashKey (n2);
      it = hash_table.find (hash_key);
      if (it != hash_table.end ())
        key_index = it->second;
      blur_neighbors_[j*M_+i].n2 = key_index;
    }
  }
}
コード例 #14
0
// Compute the gradient of a^T K b
void Permutohedral::gradient ( float* df, const float * a, const float* b, int value_size ) const
{
    // Shift all values by 1 such that -1 -> 0 (used for blurring)
    float * values = new float[ (M_+2)*value_size ];
    float * new_values = new float[ (M_+2)*value_size ];

    // Set the results to 0
    std::fill( df, df+N_*d_, 0.f );

    // Initialize some constants
    std::vector<float> scale_factor( d_ );
    float inv_std_dev = sqrt(2.0 / 3.0)*(d_+1);
    for( int i=0; i<d_; i++ )
        scale_factor[i] = 1.0 / sqrt( double((i+2)*(i+1)) ) * inv_std_dev;

    // Alpha is a magic scaling constant multiplied by down_factor
    float alpha = 1.0f / (1+powf(2, -d_)) / (d_+1);

    for( int dir=0; dir<2; dir++ ) {
        for( int i=0; i<(M_+2)*value_size; i++ )
            values[i] = new_values[i] = 0;

        // Splatting
        for( int i=0;  i<N_; i++ ){
            for( int j=0; j<=d_; j++ ){
                int o = offset_[i*(d_+1)+j]+1;
                float w = barycentric_[i*(d_+1)+j];
                for( int k=0; k<value_size; k++ )
                    values[ o*value_size+k ] += w * (dir?b:a)[ i*value_size+k ];
            }
        }

        // BLUR
        for( int j=dir?d_:0; j<=d_ && j>=0; dir?j--:j++ ){
            for( int i=0; i<M_; i++ ){
                float * old_val = values + (i+1)*value_size;
                float * new_val = new_values + (i+1)*value_size;

                int n1 = blur_neighbors_[j*M_+i].n1+1;
                int n2 = blur_neighbors_[j*M_+i].n2+1;
                float * n1_val = values + n1*value_size;
                float * n2_val = values + n2*value_size;
                for( int k=0; k<value_size; k++ )
                    new_val[k] = old_val[k]+0.5*(n1_val[k] + n2_val[k]);
            }
            std::swap( values, new_values );
        }

        // Slicing gradient computation
        std::vector<float> r_a( (d_+1)*value_size ), sm( value_size );

        for( int i=0; i<N_; i++ ){
            // Rotate a
            std::fill( r_a.begin(), r_a.end(), 0.f );
            for( int j=0; j<=d_; j++ ){
                int r0 = d_ - rank_[i*(d_+1)+j];
                int r1 = r0+1>d_?0:r0+1;
                int o0 = offset_[i*(d_+1)+r0]+1;
                int o1 = offset_[i*(d_+1)+r1]+1;
                for( int k=0; k<value_size; k++ ) {
                    r_a[ j*value_size+k ] += alpha*values[ o0*value_size+k ];
                    r_a[ j*value_size+k ] -= alpha*values[ o1*value_size+k ];
                }
            }
            // Multiply by the elevation matrix
            std::copy( r_a.begin(), r_a.begin()+value_size, sm.begin() );
            for( int j=1; j<=d_; j++ ) {
                float grad = 0;
                for( int k=0; k<value_size; k++ ) {
                    // Elevate ...
                    float v = scale_factor[j-1]*(sm[k]-j*r_a[j*value_size+k]);
                    // ... and add
                    grad += (dir?a:b)[ i*value_size+k ]*v;

                    sm[k] += r_a[j*value_size+k];
                }
                // Store the gradient
                df[i*d_+j-1] += grad;
            }
        }
    }
    delete[] values;
    delete[] new_values;
}
コード例 #15
0
ファイル: mvarfit.c プロジェクト: kro/libmvar
static void rescale_R11(gsl_matrix *R11, gsl_vector *scale)
{
    gsl_vector_view vec_view = gsl_matrix_column(R11, 0);
    gsl_vector_scale(&vec_view.vector, scale_factor(scale));
}