示例#1
0
int MonolingualModel::trainSentence(const string& sent, int sent_id) {
    auto nodes = getNodes(sent);  // same size as sent, OOV words are replaced by <UNK>

    // counts the number of words that are in the vocabulary
    int words = nodes.size() - count(nodes.begin(), nodes.end(), HuffmanNode::UNK);

    if (config.subsampling > 0) {
        subsample(nodes); // puts <UNK> tokens in place of the discarded tokens
    }

    if (nodes.empty()) {
        return words;
    }

    // remove <UNK> tokens
    nodes.erase(
        remove(nodes.begin(), nodes.end(), HuffmanNode::UNK),
        nodes.end());

    // Monolingual training
    for (int pos = 0; pos < nodes.size(); ++pos) {
        trainWord(nodes, pos, sent_id);
    }

    return words; // returns the number of words processed, for progress estimation
}
示例#2
0
QVector<QPoint>
CurveGroup::push(QVector<QPoint> c, QPoint cen, int rad, bool closed)
{
  QVector<QPoint> newc;
  newc = c;
  int npts = c.count();
  for(int i=0; i<npts; i++)
    {
      QPoint v = newc[i] - cen;
      float len = qSqrt(QPoint::dotProduct(v,v));
      if (len <= rad)
	{
	  v /= qMax(0.0f, len);	      
	  for(int j=-rad; j<=rad; j++)
	    {
	      int idx = i+j;
	      if (idx < 0) idx = npts + idx;
	      else if (idx > npts-1) idx = idx - npts;
	      
	      QPoint v0 = newc[idx] - cen;
	      int v0len = qSqrt(QPoint::dotProduct(v0,v0));
	      if (v0len <= rad)
		{
		  float frc = (float)qAbs(qAbs(j)-rad)/(float)rad;
		  newc[idx] = newc[idx] + frc*(rad-len)*v;
		}
	    }
	}
    }
  
  QVector<QPoint> w;
  w = subsample(newc, 1.2, closed);
  
  return w;
}
示例#3
0
文件: tracker.hpp 项目: syjman/cuimg
 void tracker<F>::subsample_input(const I& in)
 {
   subsample(in, input_);
   fill_border_clamp(input_);
   if (upper_tracker_)
     upper_tracker_->subsample_input(input_);
 }
示例#4
0
void
CurveGroup::joinPolygonAt(int key, QVector<QPoint> pts)
{
  int npts = pts.count();

  int mc = getActiveMorphedCurve(key, pts[0].x(), pts[0].y());
  if (mc >= 0)
    {
      Curve c = m_mcg[mc].value(key);
      QVector<QPoint> w = c.pts;
      int ncpts = w.count();
      int start = -1;
      QPoint startPt = pts[0];
      QPoint endPt = pts[npts-1];
      for(int j=0; j<ncpts; j++)
	{
	  int ml = (startPt-w[j]).manhattanLength();
	  if (ml < 3)
	    {
	      start = j;
	      break;
	    }
	}
      int end = -1;
      for(int j=0; j<ncpts; j++)
	{
	  int ml = (endPt-w[j]).manhattanLength();
	  if (ml < 3)
	    {
	      end = j;
	      break;
	    }
	}

      if (start <0 || end < 0)
	return;
      
      // insert pts into the curve
      QVector<QPoint> newc;
      newc = pts;
      int jend = ncpts;
      if (start > end)
	jend = start;
      for(int j=end; j<jend; j++)
	newc << w[j];
      if (start < end)
	{
	  for(int j=0; j<start; j++)
	    newc << w[j];
	}      

      w = subsample(newc, 1.2, c.closed);
      c.pts = w; // replace pts with the pushed version
      m_mcg[mc].insert(key, c);
    }
}
示例#5
0
文件: inmo.c 项目: krushev36/src
void inmo_oper(int nx, const float *trace, float *trace2, void* user_data)
/*< operator to invert by GMRES >*/
{
    int it;
    
    /* forward operator */
    inmo(trace,dense2);
    subsample(dense2,sparse2);
    /* backward operator */
    interpolate(sparse2,dense2);
    nmostack(dense2,trace2);

    /* I + S (BF - I) */
    for (it=0; it < nt; it++) {
	trace2[it] -= trace[it];
    }
    bandpass(trace2);
    for (it=0; it < nt; it++) {
	trace2[it] += trace[it];
    }
}
示例#6
0
void RGBDCamera::update(const RawFrame* this_frame) {
  //Check the timestamp, and skip if we have already seen this frame
  if (this_frame->timestamp <= latest_stamp_) {
    return;
  } else {
    latest_stamp_ = this_frame->timestamp;
  }

  //Apply bilateral filter to incoming depth
  uint16_t* filtered_depth;
  cudaMalloc((void**)&filtered_depth, this_frame->width*this_frame->height*sizeof(uint16_t));
  bilateralFilter(this_frame->depth, filtered_depth, this_frame->width, this_frame->height);

  //Convert the input color data to intensity
  float* temp_intensity;
  cudaMalloc((void**)&temp_intensity, this_frame->width*this_frame->height*sizeof(float));
  colorToIntensity(this_frame->color, temp_intensity, this_frame->width*this_frame->height);

  //Create pyramids
  for (int i = 0; i < PYRAMID_DEPTH; i++) {
    //Fill in sizes the first two times through
    if (pass_ < 2) {
      current_icp_frame_[i] = new ICPFrame(this_frame->width/pow(2,i), this_frame->height/pow(2,i));
      current_rgbd_frame_[i] = new RGBDFrame(this_frame->width/pow(2,i), this_frame->height/pow(2,i));
    }

    //Add ICP data
    generateVertexMap(filtered_depth, current_icp_frame_[i]->vertex, current_icp_frame_[i]->width, current_icp_frame_[i]->height, focal_length_, make_int2(this_frame->width, this_frame->height));
    generateNormalMap(current_icp_frame_[i]->vertex, current_icp_frame_[i]->normal, current_icp_frame_[i]->width, current_icp_frame_[i]->height);

    //Add RGBD data
    cudaMemcpy(current_rgbd_frame_[i]->vertex, current_icp_frame_[i]->vertex, current_rgbd_frame_[i]->width*current_rgbd_frame_[i]->height*sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
    cudaMemcpy(current_rgbd_frame_[i]->intensity, temp_intensity, current_rgbd_frame_[i]->width*current_rgbd_frame_[i]->height*sizeof(float), cudaMemcpyDeviceToDevice);

    //Downsample depth and color if not the last iteration
    if (i != (PYRAMID_DEPTH-1)) {
      subsampleDepth(filtered_depth, current_icp_frame_[i]->width, current_icp_frame_[i]->height);
      subsample(temp_intensity, current_rgbd_frame_[i]->width, current_rgbd_frame_[i]->height);
      cudaDeviceSynchronize();
    }
  }

  //Clear the filtered depth and temporary color since they are no longer needed
  cudaFree(filtered_depth);
  cudaFree(temp_intensity);

  if (pass_ >= 1) {
    glm::mat4 update_trans(1.0f);

    //Loop through pyramids backwards (coarse first)
    for (int i = PYRAMID_DEPTH - 1; i >= 0; i--) {

      //Get a copy of the ICP frame for this pyramid level
      ICPFrame icp_f(current_icp_frame_[i]->width, current_icp_frame_[i]->height);
      cudaMemcpy(icp_f.vertex, current_icp_frame_[i]->vertex, icp_f.width*icp_f.height*sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
      cudaMemcpy(icp_f.normal, current_icp_frame_[i]->normal, icp_f.width*icp_f.height*sizeof(glm::vec3), cudaMemcpyDeviceToDevice);

      //Get a copy of the RGBD frame for this pyramid level
      //RGBDFrame rgbd_f(current_rgbd_frame_[i]->width, current_rgbd_frame_[i]->height);
      //cudaMemcpy(rgbd_f.vertex, current_rgbd_frame_[i]->vertex, rgbd_f.width*rgbd_f.height*sizeof(glm::vec3), cudaMemcpyDeviceToDevice);
      //cudaMemcpy(rgbd_f.intensity, current_rgbd_frame_[i]->intensity, rgbd_f.width*rgbd_f.height*sizeof(float), cudaMemcpyDeviceToDevice);

      //Apply the most recent update to the points/normals
      if (i < (PYRAMID_DEPTH-1)) {
        transformVertexMap(icp_f.vertex, update_trans, icp_f.width*icp_f.height);
        transformNormalMap(icp_f.normal, update_trans, icp_f.width*icp_f.height);
        cudaDeviceSynchronize();
      }

      //Loop through iterations
      for (int j = 0; j < PYRAMID_ITERS[i]; j++) {

        //Get the Geometric ICP cost values
        float A1[6 * 6];
        float b1[6];
        computeICPCost2(last_icp_frame_[i], icp_f, A1, b1);

        //Get the Photometric RGB-D cost values
        //float A2[6*6];
        //float b2[6];
        //compueRGBDCost(last_rgbd_frame_, rgbd_f, A2, b2);

        //Combine the two
        //for (size_t k = 0; k < 6; k++) {
          //for (size_t l = 0; l < 6; l++) {
            //A1[6 * k + l] += A2[6 * k + l];
          //}
          //b1[k] += b2[k];
        //}

        //Solve for the optimized camera transformation
        float x[6];
        solveCholesky(6, A1, b1, x);

        //Check for NaN/divergence
        if (isnan(x[0]) || isnan(x[1]) || isnan(x[2]) || isnan(x[3]) || isnan(x[4]) || isnan(x[5])) {
          printf("Camera tracking is lost.\n");
          break;
        }

        //Update position/orientation of the camera
        glm::mat4 this_trans = 
            glm::rotate(glm::mat4(1.0f), -x[2] * 180.0f / 3.14159f, glm::vec3(0.0f, 0.0f, 1.0f)) 
          * glm::rotate(glm::mat4(1.0f), -x[1] * 180.0f / 3.14159f, glm::vec3(0.0f, 1.0f, 0.0f))
          * glm::rotate(glm::mat4(1.0f), -x[0] * 180.0f / 3.14159f, glm::vec3(1.0f, 0.0f, 0.0f)) 
          * glm::translate(glm::mat4(1.0f), glm::vec3(x[3], x[4], x[5]));

        update_trans = this_trans * update_trans;
        
        //Apply the update to the points/normals
        if (j < (PYRAMID_ITERS[i] - 1)) {
          transformVertexMap(icp_f.vertex, this_trans, icp_f.width*icp_f.height);
          transformNormalMap(icp_f.normal, this_trans, icp_f.width*icp_f.height);
          cudaDeviceSynchronize();
        }

      }
    }
    //Update the global transform with the result
    position_ = glm::vec3(glm::vec4(position_, 1.0f) * update_trans);
    orientation_ = glm::mat3(glm::mat4(orientation_) * update_trans);
  }

  if (pass_ < 2) {
    pass_++;
  }

  //Swap current and last frames
  for (int i = 0; i < PYRAMID_DEPTH; i++) {
    ICPFrame* temp = current_icp_frame_[i];
    current_icp_frame_[i] = last_icp_frame_[i];
    last_icp_frame_[i] = temp;
    //TODO: Longterm, only RGBD should do this. ICP should not swap, as last_frame should be updated by a different function
    RGBDFrame* temp2 = current_rgbd_frame_[i];
    current_rgbd_frame_[i] = last_rgbd_frame_[i];
    last_rgbd_frame_[i] = temp2;
  }

}
    typename EMSubpixelCorrelatorView<ImagePixelT>::prerasterize_type
    EMSubpixelCorrelatorView<ImagePixelT>::prerasterize(BBox2i const& bbox) const {
      vw_out(InfoMessage, "stereo") << "EMSubpixelCorrelatorView: rasterizing image block " << bbox << ".\n";

      // Find the range of disparity values for this patch.
      // int num_good; // not used
      BBox2i search_range;
      try {
        search_range = get_disparity_range(crop(m_course_disparity, bbox));
      }
      catch (const std::exception& e) {
        search_range = BBox2i();
      }


#ifdef USE_GRAPHICS
      ImageWindow window;
      if(debug_level >= 0) {
        window = vw_create_window("disparity");
      }
#endif

      // The area in the right image that we'll be searching is
      // determined by the bbox of the left image plus the search
      // range.
      BBox2i left_crop_bbox(bbox);
      BBox2i right_crop_bbox(bbox.min() + search_range.min(),
                             bbox.max() + search_range.max());

      // The correlator requires the images to be the same size. The
      // search bbox will always be larger than the given left image
      // bbox, so we just make the left bbox the same size as the
      // right bbox.
      left_crop_bbox.max() = left_crop_bbox.min() + Vector2i(right_crop_bbox.width(), right_crop_bbox.height());

      // Finally, we must adjust both bounding boxes to account for
      // the size of the kernel itself.
      right_crop_bbox.min() -= Vector2i(m_kernel_size[0], m_kernel_size[1]);
      right_crop_bbox.max() += Vector2i(m_kernel_size[0], m_kernel_size[1]);
      left_crop_bbox.min() -= Vector2i(m_kernel_size[0], m_kernel_size[1]);
      left_crop_bbox.max() += Vector2i(m_kernel_size[0], m_kernel_size[1]);

      // We crop the images to the expanded bounding box and edge
      // extend in case the new bbox extends past the image bounds.
      ImageView<ImagePixelT> left_image_patch, right_image_patch;
      ImageView<disparity_pixel> disparity_map_patch_in;
      ImageView<result_type> disparity_map_patch_out;


      left_image_patch = crop(edge_extend(m_left_image, ZeroEdgeExtension()),
                              left_crop_bbox);
      right_image_patch = crop(edge_extend(m_right_image, ZeroEdgeExtension()),
                               right_crop_bbox);
      disparity_map_patch_in = crop(edge_extend(m_course_disparity, ZeroEdgeExtension()),
                                    left_crop_bbox);
      disparity_map_patch_out.set_size(disparity_map_patch_in.cols(), disparity_map_patch_in.rows());


      // Adjust the disparities to be relative to the cropped
      // image pixel locations
      for (int v = 0; v < disparity_map_patch_in.rows(); ++v) {
        for (int u = 0; u < disparity_map_patch_in.cols(); ++u) {
          if (disparity_map_patch_in(u,v).valid())  {
            disparity_map_patch_in(u,v).child().x() -= search_range.min().x();
            disparity_map_patch_in(u,v).child().y() -= search_range.min().y();
          }
        }
      }


      double blur_sigma_progressive = .5; // 3*sigma = 1.5 pixels

      // create the pyramid first
      std::vector<ImageView<ImagePixelT> > left_pyramid(pyramid_levels), right_pyramid(pyramid_levels);
      std::vector<BBox2i> regions_of_interest(pyramid_levels);
      std::vector<ImageView<Matrix2x2> > warps(pyramid_levels);
      std::vector<ImageView<disparity_pixel> > disparity_map_pyramid(pyramid_levels);


      // initialize the pyramid at level 0
      left_pyramid[0] = channels_to_planes(left_image_patch);
      right_pyramid[0] = channels_to_planes(right_image_patch);
      disparity_map_pyramid[0] = disparity_map_patch_in;
      regions_of_interest[0] = BBox2i(m_kernel_size[0], m_kernel_size[1],
                                      bbox.width(),bbox.height());


      // downsample the disparity map and the image pair to initialize the intermediate levels
      for(int i = 1; i < pyramid_levels; i++) {
        left_pyramid[i] = subsample(gaussian_filter(left_pyramid[i-1], blur_sigma_progressive), 2);
        right_pyramid[i] = subsample(gaussian_filter(right_pyramid[i-1], blur_sigma_progressive), 2);

        disparity_map_pyramid[i] = detail::subsample_disp_map_by_two(disparity_map_pyramid[i-1]);
        regions_of_interest[i] = BBox2i(regions_of_interest[i-1].min()/2, regions_of_interest[i-1].max()/2);
      }

      // initialize warps at the lowest resolution level
      warps[pyramid_levels-1].set_size(left_pyramid[pyramid_levels-1].cols(),
                                       left_pyramid[pyramid_levels-1].rows());
      for(int y = 0; y < warps[pyramid_levels-1].rows(); y++) {
        for(int x = 0; x < warps[pyramid_levels-1].cols(); x++) {
          warps[pyramid_levels-1](x, y).set_identity();
        }
      }

#ifdef USE_GRAPHICS
      vw_initialize_graphics(0, NULL);
      if(debug_level >= 0) {
        for(int i = 0; i < pyramid_levels; i++) {
          vw_show_image(window, left_pyramid[i]);
          usleep((int)(.2*1000*1000));
        }
      }
#endif

      // go up the pyramid; first run refinement, then upsample result for the next level
      for(int i = pyramid_levels-1; i >=0; i--) {
        vw_out() << "processing pyramid level "
                  << i << " of " << pyramid_levels-1 << std::endl;

        if(debug_level >= 0) {
          std::stringstream stream;
          stream << "pyramid_level_" << i << ".tif";
          write_image(stream.str(), disparity_map_pyramid[i]);
        }

        ImageView<ImagePixelT> process_left_image = left_pyramid[i];
        ImageView<ImagePixelT> process_right_image = right_pyramid[i];

        if(i > 0) { // in this case take refine the upsampled disparity map from the previous level,
          // and upsample for the next level
          m_subpixel_refine(edge_extend(process_left_image, ZeroEdgeExtension()), edge_extend(process_right_image, ZeroEdgeExtension()),
                            disparity_map_pyramid[i], disparity_map_pyramid[i], warps[i],
                            regions_of_interest[i], false, debug_level == i);

          // upsample the warps and the refined map for the next level of processing
          int up_width = left_pyramid[i-1].cols();
          int up_height = left_pyramid[i-1].rows();
          warps[i-1] = copy(resize(warps[i], up_width , up_height, ConstantEdgeExtension(), NearestPixelInterpolation())); //upsample affine transforms
          disparity_map_pyramid[i-1] = copy(detail::upsample_disp_map_by_two(disparity_map_pyramid[i], up_width, up_height));
        }
        else { // here there is no next level so we refine directly to the output patch
          m_subpixel_refine(edge_extend(process_left_image, ZeroEdgeExtension()), edge_extend(process_right_image, ZeroEdgeExtension()),
                            disparity_map_pyramid[i], disparity_map_patch_out, warps[i],
                            regions_of_interest[i], true, debug_level == i);
        }
      }

#ifdef USE_GRAPHICS
      if(debug_level >= 0) {
        vw_show_image(window, .5 + select_plane(channels_to_planes(disparity_map_patch_out)/6., 0));
        usleep(10*1000*1000);
      }
#endif

      // Undo the above adjustment
      for (int v = 0; v < disparity_map_patch_out.rows(); ++v) {
        for (int u = 0; u < disparity_map_patch_out.cols(); ++u) {
          if (disparity_map_patch_out(u,v).valid())  {
            disparity_map_patch_out(u,v).child().x() += search_range.min().x();
            disparity_map_patch_out(u,v).child().y() += search_range.min().y();
          }
        }
      }

#ifdef USE_GRAPHICS
      if(debug_level >= 0 ) {
        vw_destroy_window(window);
      }
#endif

      return crop(disparity_map_patch_out, BBox2i(m_kernel_size[0]-bbox.min().x(),
                                                  m_kernel_size[1]-bbox.min().y(),
                                                  m_left_image.cols(),
                                                  m_left_image.rows()));
    }
示例#8
0
//---------------------------------------------------------------
// START FUNC DECL
int 
parsort1(
       char *tbl,
       char *f1,
       char *f2,
       char *up_or_down /* not used right now */
       )
// STOP FUNC DECL
{
  int status = 0;
  char *f1_X = NULL; size_t f1_nX = 0;
  char *op_X = NULL; size_t op_nX = 0;
  char *cnt_X = NULL; size_t cnt_nX = 0;
  char *t2f2_X = NULL; size_t t2f2_nX = 0;
  FLD_TYPE *f1_meta = NULL; 
  FLD_TYPE *f2_meta = NULL; 
  FLD_TYPE *t2f2_meta = NULL; 
  FLD_TYPE *cnt_meta = NULL; 
  long long nR, nR2;
  int tbl_id = INT_MIN, f1_id = INT_MIN, f2_id = INT_MIN, cnt_id = INT_MIN;
  int t2f2_id = INT_MIN;
  char str_meta_data[1024];
  FILE *ofp = NULL; char *opfile = NULL;
  FILE *tfp = NULL; char *tempfile = NULL;
  char str_rslt[32]; zero_string(str_rslt, 32);
  char t2[MAX_LEN_TBL_NAME]; 
  int itemp;
  int *xxx = NULL, *f1lb = NULL, *f1ub = NULL; 
  long long *count = NULL, *chk_count = NULL;
  int **offsets = NULL, **bak_offsets = NULL;
  int *inptr = NULL;
  // For multi-threading 
  int rc; // result code for thread create 
  pthread_t threads[MAX_NUM_THREADS];
  pthread_attr_t attr;
  void *thread_status;
  // START: For timing
  struct timeval Tps;
  struct timezone Tpf;
  void *Tzp = NULL;
  long long t_before_sec = 0, t_before_usec = 0, t_before = 0;
  long long t_after_sec, t_after_usec, t_after;
  long long t_delta_usec;
  // STOP : For timing
  //----------------------------------------------------------------
  if ( ( tbl == NULL ) || ( *tbl == '\0' ) ) { go_BYE(-1); }
  if ( ( f1 == NULL ) || ( *f1 == '\0' ) ) { go_BYE(-1); }
  if ( ( f2 == NULL ) || ( *f2 == '\0' ) ) { go_BYE(-1); }
  zero_string(str_meta_data, 1024);
  /* t2 isa temporary table */
  zero_string(t2, MAX_LEN_TBL_NAME);
  status = qd_uq_str(t2, MAX_LEN_TBL_NAME);
  strcpy(t2, "t2"); // TODO DELETE THIS 
  g_offsets = NULL;
  g_count = NULL;
  //--------------------------------------------------------
  status = is_tbl(tbl, &tbl_id); cBYE(status);
  chk_range(tbl_id, 0, g_n_tbl);
  nR = g_tbl[tbl_id].nR;
  status = is_fld(NULL, tbl_id, f1, &f1_id); cBYE(status);
  chk_range(f1_id, 0, g_n_fld);
  f1_meta = &(g_fld[f1_id]);
  status = rs_mmap(f1_meta->filename, &f1_X, &f1_nX, 0); 
  cBYE(status);
  // Not implemented for following cases 
  if ( g_fld[f1_id].nn_fld_id >= 0 ) { go_BYE(-1); }
  if ( strcmp(f1_meta->fldtype, "int") != 0 ) { go_BYE(-1); }
  if ( nR <= 1048576 ) { go_BYE(-1); }
  //---------------------------------------------
  status = gettimeofday(&Tps, &Tpf); cBYE(status);
  t_before_sec  = (long long)Tps.tv_sec;
  t_before_usec = (long long)Tps.tv_usec;
  t_before = t_before_sec * 1000000 + t_before_usec;

  int reduction_factor = (int)(sqrt((double)nR));
  sprintf(str_rslt, "%d", reduction_factor);
  status = subsample(tbl, f1, str_rslt, t2, "f2"); cBYE(status);

  status = gettimeofday(&Tps, &Tpf); cBYE(status);
  t_after_sec  = (long long)Tps.tv_sec;
  t_after_usec = (long long)Tps.tv_usec;
  t_after = t_after_sec * 1000000 + t_after_usec;
  fprintf(stderr, "TIME0 = %lld \n", t_after - t_before); 
  t_before = t_after;


  // Must have sufficient diversity of values
  status = f1opf2(t2, "f2", "op=shift:val=-1", "nextf2"); cBYE(status);
  status = drop_nn_fld(t2, "nextf2"); cBYE(status);
  status = f1f2opf3(t2, "f2", "nextf2", "==", "x"); cBYE(status);
  status = f_to_s(t2, "x", "sum", str_rslt);
  char *endptr;
  long long lltemp = strtoll(str_rslt, &endptr, 10);
  if ( lltemp != 0 ) { go_BYE(-1); }
  //-------------------------------------------------
  // Get range of values of f1 
  status = f_to_s(tbl, f1, "max", str_rslt);
  int f1max = strtoll(str_rslt, &endptr, 10);
  status = f_to_s(tbl, f1, "min", str_rslt);
  int f1min = strtoll(str_rslt, &endptr, 10);
  //-------------------------------------------------
  // Now we sort the values that we sampled
  status = fop(t2, "f2", "sortA"); cBYE(status);
  // status = pr_fld(t2, "f2", "", stdout);
  status = get_nR(t2, &nR2);
  // Now each thread selects a range to work on
  int nT;
  for ( int i = 0; i < MAX_NUM_THREADS; i++ ) { 
    g_thread_id[i] = i;
  }
  status = get_num_threads(&nT);
  cBYE(status);
  //--------------------------------------------
#define MIN_ROWS_FOR_PARSORT1 1048576
  if ( nR <= MIN_ROWS_FOR_PARSORT1 ) {
    nT = 1;
  }
  /* Don't create more threads than you can use */
  if ( nT > nR ) { nT = nR; }
  //--------------------------------------------

  double block_size = (double)nR2 / (double)nT;
  status = is_fld(t2, -1, "f2", &t2f2_id); cBYE(status);
  chk_range(t2f2_id, 0, g_n_fld);
  t2f2_meta = &(g_fld[t2f2_id]);
  status = rs_mmap(t2f2_meta->filename, &t2f2_X, &t2f2_nX, 0); 
  cBYE(status);
  int *iptr = (int *)t2f2_X;
  xxx = malloc(nT * sizeof(int)); return_if_malloc_failed(xxx);
  f1lb = malloc(nT * sizeof(int)); return_if_malloc_failed(f1lb);
  f1ub = malloc(nT * sizeof(int)); return_if_malloc_failed(f1ub);
  /* FOR OLD_WAY 
  count = malloc(nT * sizeof(long long)); return_if_malloc_failed(count);
  */
  chk_count = malloc(nT * sizeof(long long));
  return_if_malloc_failed(chk_count);
  g_count = malloc(nT * sizeof(long long)); return_if_malloc_failed(g_count);

  for ( int i = 0; i < nT; i++ ) { 
    // FOR OLD_WAY count[i]= 0;
    chk_count[i]= 0;
    int j = i+1;
    long long idx = j * block_size;
    if ( idx >= nR2 ) { idx = nR2 -1 ; }
    int y = iptr[idx];
    xxx[i] = y;
    // fprintf(stdout,"idx = %lld: j = %d: y = %d \n", idx, j, y);
  }
  for ( int i = 0; i < nT; i++ ) { 
    if ( ( i == 0 ) && ( i == (nT - 1 ) ) ) {
      f1lb[i] = f1min;
      f1ub[i] = f1max;
    }
    else if ( i == 0 ) { 
      f1lb[i] = f1min;
      f1ub[i] = xxx[i];
    }
    else if ( i == (nT -1 ) ) {
      f1lb[i] = xxx[i-1] + 1;
      f1ub[i] = f1max;
    }
    else {
      f1lb[i] = xxx[i-1] + 1;
      f1ub[i] = xxx[i];
    }
  }
  // STOP: Each thread has now a range to work on
  // Create a temporary table t3 to store ranges
  char t3[MAX_LEN_TBL_NAME]; int t3_id;
  zero_string(t3, MAX_LEN_TBL_NAME);
  status = qd_uq_str(t3, MAX_LEN_TBL_NAME);
  strcpy(t3, "t3"); // TODO DELETE THIS 
  sprintf(str_rslt, "%d", nT);
  status = add_tbl(t3, str_rslt, &t3_id);

  // Add lower bound to t3
  status = open_temp_file(&tfp, &tempfile, -1); cBYE(status);
  fclose_if_non_null(tfp);
  tfp = fopen(tempfile, "wb"); return_if_fopen_failed(tfp, tempfile, "wb");
  fwrite(f1lb, sizeof(int),  nT, tfp); 
  fclose_if_non_null(tfp);
  sprintf(str_meta_data, "fldtype=%s:n_sizeof=%d:filename=%s",
      f1_meta->fldtype, f1_meta->n_sizeof, tempfile);
  status = add_fld(t3, "lb", str_meta_data, &itemp); cBYE(status);
  free_if_non_null(tempfile);

  // Add upper bound to t3
  status = open_temp_file(&tfp, &tempfile, -1); cBYE(status);
  fclose_if_non_null(tfp);
  tfp = fopen(tempfile, "wb"); return_if_fopen_failed(tfp, tempfile, "wb");
  fwrite(f1ub, sizeof(int),  nT, tfp); 
  fclose_if_non_null(tfp);
  sprintf(str_meta_data, "fldtype=%s:n_sizeof=%d:filename=%s",
      f1_meta->fldtype, f1_meta->n_sizeof, tempfile);
  status = add_fld(t3, "ub", str_meta_data, &itemp); cBYE(status);
  free_if_non_null(tempfile);

#undef OLD_WAY
#ifdef OLD_WAY
  // Now we count how much there is in each range 
  inptr = (int *)f1_X;
  for ( long long i = 0; i < nR; i++ ) { 
    int ival = *inptr++;
    int range_idx = INT_MIN;
    // TODO: Improve sequential search
    for ( int j = 0; j < nT; j++ ) { 
      if ( ival >= f1lb[j] && ( ival <= f1ub[j] ) ) {
	range_idx = j;
	break;
      }
    }
    count[range_idx]++;
  }
  /*
  for ( int i = 0; i < nT; i++ ) { 
    fprintf(stdout,"%d: (%d, %d) = %lld \n", i, f1lb[i], f1ub[i], count[i]);
  }
  */
#else
  status = num_in_range(tbl, f1, t3, "lb", "ub", "cnt"); cBYE(status);
  // Get a pointer to the count field 
  status = is_tbl(t3, &t3_id);
  chk_range(t3_id, 0, g_n_tbl);
  status = is_fld(NULL, t3_id, "cnt", &cnt_id);
  chk_range(cnt_id, 0, g_n_fld);
  cnt_meta = &(g_fld[cnt_id]); 
  status = rs_mmap(cnt_meta->filename, &cnt_X, &cnt_nX, 0); cBYE(status);
  count = (long long *)cnt_X;
#endif
  status = gettimeofday(&Tps, &Tpf); cBYE(status);
  t_after_sec  = (long long)Tps.tv_sec;
  t_after_usec = (long long)Tps.tv_usec;
  t_after = t_after_sec * 1000000 + t_after_usec;
  fprintf(stderr, "TIME1 = %lld \n", t_after - t_before); 
  t_before = t_after;


  bak_offsets = malloc(nT * sizeof(int *)); return_if_malloc_failed(bak_offsets);
  g_offsets = malloc(nT * sizeof(int *)); return_if_malloc_failed(g_offsets);
#ifdef OLD_WAY
  // Make space for output 
  long long filesz = nR * f1_meta->n_sizeof;
  status = open_temp_file(&ofp, &opfile, filesz); cBYE(status);
  status = mk_file(opfile, filesz); cBYE(status);
  status = rs_mmap(opfile, &op_X, &op_nX, 1); cBYE(status);
  offsets = malloc(nT * sizeof(int *)); return_if_malloc_failed(offsets);
  long long cum_count = 0;
  for ( int i = 0; i < nT; i++ ) {
    bak_offsets[i] = offsets[i] = (int *)op_X;
    if ( i > 0 ) {
      cum_count += count[i-1];
      offsets[i] += cum_count;
      bak_offsets[i] = offsets[i];
    }
  }

  inptr = (int *)f1_X;
  // Now we place each item into its thread bucket
  for ( long long i = 0; i < nR; i++ ) { 
    int ival = *inptr++;
    int range_idx = INT_MIN;
    // TODO: Improve sequential search
    for ( int j = 0; j < nT; j++ ) { 
      if ( ival >= f1lb[j] && ( ival <= f1ub[j] ) ) {
	range_idx = j;
	break;
      }
    }
    int *xptr = offsets[range_idx];
    *xptr = ival;
    offsets[range_idx]++;
    chk_count[range_idx]++;
    if ( chk_count[range_idx] > count[range_idx] ) {
      go_BYE(-1);
    }
  }
  cum_count = 0;
  for ( int i = 0; i < nT-1; i++ ) { 
    if ( offsets[i] != bak_offsets[i+1] ) { 
      go_BYE(-1);
    }
  }
#else
  status = mv_range(tbl, f1, f2, t3, "lb", "ub", "cnt"); 
  cBYE(status);
  status = is_fld(NULL, tbl_id, f2, &f2_id);
  chk_range(f2_id, 0, g_n_fld);
  f2_meta = &(g_fld[f2_id]); 
  status = rs_mmap(f2_meta->filename, &op_X, &op_nX, 1); cBYE(status);
#endif

  long long cum_count = 0;
  for ( int i = 0; i < nT; i++ ) {
    bak_offsets[i] = (int *)op_X;
    if ( i > 0 ) {
      cum_count += count[i-1];
      bak_offsets[i] += cum_count;
    }
  }

  status = gettimeofday(&Tps, &Tpf); cBYE(status);
  t_after_sec  = (long long)Tps.tv_sec;
  t_after_usec = (long long)Tps.tv_usec;
  t_after = t_after_sec * 1000000 + t_after_usec;
  fprintf(stderr, "TIME2 = %lld \n", t_after - t_before); 
  t_before = t_after;

  // Set up global variables
  g_nT = nT;
  for ( int i = 0; i < nT; i++ ) { 
    g_offsets[i] = bak_offsets[i];
    g_count[i] = count[i];
  }
  if ( g_nT == 1 ) { 
    core_parsort1(&(g_thread_id[0]));
  }
  else {
    pthread_attr_init(&attr);
    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
    for ( int t = 0; t < g_nT; t++ ) { 
      rc = pthread_create(&threads[t], NULL, core_parsort1,
	  &(g_thread_id[t]));
      if ( rc ) { go_BYE(-1); }
    }
    /* Free attribute and wait for the other threads */
    pthread_attr_destroy(&attr);
    for ( int t = 0; t < g_nT; t++ ) { 
      rc = pthread_join(threads[t], &thread_status);
      if ( rc ) { go_BYE(-1); }
    }
  }
  /* SEQUENTIAL CODE 
  for ( int i = 0; i < nT; i++ ) { 
    qsort_asc_int(bak_offsets[i], count[i], sizeof(int), NULL);
  }
  */
  status = gettimeofday(&Tps, &Tpf); cBYE(status);
  t_after_sec  = (long long)Tps.tv_sec;
  t_after_usec = (long long)Tps.tv_usec;
  t_after = t_after_sec * 1000000 + t_after_usec;
  fprintf(stderr, "TIME3 = %lld \n", t_after - t_before); 

  // Indicate the dst_fld is sorted ascending
  status = set_fld_info(tbl, f2, "sort=1");


  rs_munmap(op_X, op_nX);
  status = del_tbl(t2, -1); cBYE(status);
  status = del_tbl(t3, -1); cBYE(status);
BYE:
  rs_munmap(op_X, op_nX);
  rs_munmap(cnt_X, cnt_nX);
  free_if_non_null(xxx);
  free_if_non_null(f1lb);
  free_if_non_null(f1ub);
  // Do not delete unless using OLD_WAY free_if_non_null(count);
  free_if_non_null(g_count);
  free_if_non_null(g_offsets);
  free_if_non_null(offsets);
  free_if_non_null(bak_offsets);
  free_if_non_null(chk_count);

  fclose_if_non_null(ofp);
  g_write_to_temp_dir = false;
  rs_munmap(f1_X, f1_nX);
  rs_munmap(op_X, op_nX);
  free_if_non_null(opfile);
  return(status);
}
示例#9
0
QVector<QPoint>
CurveGroup::smooth(QVector<QPoint> c, QPoint cen, int rad, bool closed)
{
  QVector<QPoint> newc;
  newc = c;
  int npts = c.count();
  int sz = rad;

//  QMultiMap<int, int> inRad;
//  for(int i=1; i<npts-1; i++)
//    {
//      QPoint v = c[i] - cen;
//      if (v.manhattanLength() <= rad)
//	inRad.insert(v.manhattanLength(), i);
//    }
//
//  QList<int> keys = inRad.keys();
//  for(int k=0; k<keys.count(); k++)
//    {
//      QList<int> ipts = inRad.values(keys[k]);
//      for(int p=0; p<ipts.count(); p++)
//	{
//	  int i = ipts[p];
//	  QPoint v = c[i];
//	  for(int j=-sz; j<=sz; j++)
//	    {
//	      int idx = i+j;
//	      if (closed)
//		{
//		  if (idx < 0) idx = npts + idx;
//		  else if (idx > npts-1) idx = idx - npts;
//		}
//	      else
//		idx = qBound(0, idx, npts-1);
//	      v += newc[idx];
//	    }
//	  v /= (2*sz+2);
//	  newc[i] = v;
//	}
//    }

  for(int i=0; i<npts-1; i++)
    {
      QPoint v = c[i] - cen;
      if (v.manhattanLength() <= rad)
	{
	  //v = QPoint(0,0);
	  v = c[i];
	  for(int j=-sz; j<=sz; j++)
	    {
	      int idx = i+j;
	      if (closed)
		{
		  if (idx < 0) idx = npts + idx;
		  else if (idx > npts-1) idx = idx - npts;
		}
	      else
		idx = qBound(0, idx, npts-1);
	      v += c[idx];
	    }
	  v /= (2*sz+2);
	  newc[i] = v;
	}
    }

  QVector<QPoint> w;
  w = subsample(newc, 1.2, closed);

  return w;
}
示例#10
0
int main( int argc, char *argv[] )
{
    if ( (argc == 2) &&
         (   (GF2::console::find_switch(argc,argv,"--help"))
          || (GF2::console::find_switch(argc,argv,"-h"    ))
         )
       )
    {
        std::cout << "[Usage]:\n"
                  << "\t--generate\n"
                  << "\t--generate3D\n"
                  << "\t--formulate\n"
                  << "\t--formulate3D\n"
                  << "\t--solver mosek|bonmin|gurobi\n"
                  << "\t--solver3D mosek|bonmin|gurobi\n"
                  << "\t--merge\n"
                  << "\t--merge3D\n"
                  << "\t--datafit\n"
                  << "\t--corresp\n"
                  //<< "\t--show\n"
                  << std::endl;

        return EXIT_SUCCESS;
    }
    else if ( GF2::console::find_switch(argc,argv,"--segment") || GF2::console::find_switch(argc,argv,"--segment3D") )
    {
       return segment( argc, argv );
    }
    else if ( GF2::console::find_switch(argc,argv,"--generate") || GF2::console::find_switch(argc,argv,"--generate3D") )
    {
        return generate(argc,argv);
    }
    else if ( GF2::console::find_switch(argc,argv,"--formulate") || GF2::console::find_switch(argc,argv,"--formulate3D"))
    {
        return formulate( argc, argv );
        //return GF2::ProblemSetup::formulateCli<GF2::Solver::PrimitiveContainerT, GF2::Solver::PointContainerT>( argc, argv );
    }
    else if ( GF2::console::find_switch(argc,argv,"--solver") || GF2::console::find_switch(argc,argv,"--solver3D") ) // Note: "solver", not "solve" :-S
    {
        return solve( argc, argv );
        //return GF2::Solver::solve( argc, argv );
    }
    else if ( GF2::console::find_switch(argc,argv,"--datafit") || GF2::console::find_switch(argc,argv,"--datafit3D") )
    {
        return datafit( argc, argv );
        //return GF2::Solver::datafit( argc, argv );
    }
    else if ( GF2::console::find_switch(argc,argv,"--merge") || GF2::console::find_switch(argc,argv,"--merge3D") )
    {
        return merge(argc, argv);
    }
    else if ( GF2::console::find_switch(argc,argv,"--show") )
    {
        std::cerr << "[" << __func__ << "]: " << "the show option has been moved to a separate executable, please use thatt one" << std::endl;
        return 1;
        //return GF2::Solver::show( argc, argv );
    }
    else if ( GF2::console::find_switch(argc,argv,"--subsample") )
    {
        return subsample( argc, argv );
    }
//    else if ( GF2::console::find_switch(argc,argv,"--corresp") || GF2::console::find_switch(argc,argv,"--corresp3D") )
//    {
//        return corresp( argc, argv );
//    }

    std::cerr << "[" << __func__ << "]: " << "unrecognized option" << std::endl;
    return 1;

    // --show --dir . --cloud cloud.ply --scale 0.05f --assoc points_primitives.txt --use-tags --no-clusters --prims primitives.bonmin.txt

//    std::string img_path( "input2.png" );
//    pcl::console::parse_argument( argc, argv, "--img", img_path );
//    float scale = 0.1f;
//    pcl::console::parse_argument( argc, argv, "--scale", scale );

//    return GF2::Solver::run( img_path, scale, {0, M_PI_2, M_PI}, argc, argv );
}