Example #1
0
void energy::Fitting::cleanup() {
    kernel_cleanup(); ///< disposes of static resources

    render_color.cleanup();
    render_xyz.cleanup();
    render_normals.cleanup();
    sensor_depth.cleanup();
    distance_transform.cleanup();

    cudax::CublasHelper::cleanup();
    cudax::CudaHelper::cleanup();
}
Example #2
0
void energy::Fitting::init(Worker *worker)
{
    this->camera = worker->camera;
    this->offscreenrend = &(worker->offscreenrend);
    this->sensor_depth_texture = worker->sensor_depth_texture;
    this->skeleton = worker->skeleton;
    this->cylinders = worker->cylinders;
    this->handfinder = worker->handfinder;

    ///--- 3D fitting
    tw_settings->tw_add(settings->fit3D_enable,"E_3D (enable)","group=Fitting");
    tw_settings->tw_add(settings->fit3D_weight,"E_3D (weight)","group=Fitting");
    tw_settings->tw_add(settings->fit3D_reweight,"E_3D (l1nrm?)","group=Fitting");
    tw_settings->tw_add(settings->fit3D_backface_check,"E_3D (occlus?)","group=Fitting");
    tw_settings->tw_add(settings->fit3D_point2plane,"E_3D (p2P?)","group=Fitting");
    ///--- 2D fitting
    tw_settings->tw_add(settings->fit2D_enable,"E_2D (enable)","group=Fitting");
    tw_settings->tw_add(settings->fit2D_weight,"E_2D (weight)","group=Fitting");

#ifdef WITH_CUDA
    cudax::CudaHelper::init();
    cudax::CublasHelper::init();

    ///--- Run some tests before we get started
    kernel_memory_tests();

    ///--- Init worker for GPU computation of normals
    distance_transform.init(camera->width(), camera->height());

    ///--- init resource mapper for cuda
    render_color.init(offscreenrend->fb->color_tex_id());
    render_xyz.init(offscreenrend->fb->extra_tex_id());
    render_normals.init(offscreenrend->fb->norms_tex_id());
    sensor_depth.init(sensor_depth_texture->texid());

    // LOG(INFO ) << camera->inv_projection_matrix();
    kernel_init(this->settings, camera->width(), camera->height(), num_thetas, camera->focal_length_x(), camera->focal_length_y(), camera->inv_projection_matrix().data());
    CHECK_ERROR_GL();
#endif
}
Example #3
0
void mexFunction(int nlhs, mxArray * plhs[], int nrhs, const mxArray * prhs[]){
    mexPrintf("welcome to dtform!\n");
	if( nrhs!=1 )
		mexErrMsgTxt("This function requires 1 arguments\n");    
	if( !mxIsNumeric(prhs[0]) )
		mexErrMsgTxt("varargin{0} must be a numeric image\n");
    if( !(mxGetNumberOfDimensions(prhs[0])==2) )
		mexErrMsgTxt("varargin{0} must be an image\n");
    int num_rows = mxGetM(prhs[0]);
    int num_cols = mxGetN(prhs[0]);
    if( !mxIsDouble(prhs[0]) )
		mexErrMsgTxt("varargin{0} must be a _double_ image\n");
        
    mexPrintf("Processing size(I)=[%dx%d]\n", num_rows, num_cols);
    Scalar* image = (Scalar*) mxGetPr(prhs[0]);
	    
    DistanceTransform dt;
    dt.init(num_rows, num_cols);
    dt.exec(image, Scalar(.5));
    
    ///--- allocate & copy output (could be improved
    {
        Scalar* out_dt = dt.dsts_image_ptr();
        plhs[0] = mxCreateDoubleMatrix(num_rows, num_cols, mxREAL);
        Scalar* out = (Scalar*) mxGetPr(plhs[0]);
        for(int i=0; i<(num_rows*num_cols); i++)
            out[i] = out_dt[i];
    }
    
    ///--- Also save corresp (indexes)
    if(nlhs==2){        
        plhs[1] = mxCreateDoubleMatrix(num_rows, num_cols, mxREAL);
        Scalar* out = (Scalar*) mxGetPr(plhs[1]);
        int* out_dti = dt.idxs_image_ptr();
        for(int i=0; i<(num_rows*num_cols); i++)
            out[i] = out_dti[i];
    }
    
    dt.cleanup();    
}
Example #4
0
void energy::Fitting::track(DataFrame& frame, LinearSystem& sys, bool rigid_only, bool eval_error, float & push_error, float & pull_error) {
    // TICTOC_SCOPE(timer,"Energy::Fitting");

    ///--- Make sure sensor has necessary data
    assert( sensor_depth_texture->check_loaded(frame.id) );

    // TICTOC_BLOCK(timer,"Worker::track_cuda::(KinematicChainTransfer)")
    {
        kernel_upload_kinematic(skeleton->getJointTransformations(),skeleton->getKinematicChain());
        kernel_upload_cylinders(cylinders->serialize());
    }


    {
        cv::Mat& sensor_silhouette = handfinder->sensor_silhouette;
        static int last_uploaded_id=-1; ///< avoid multiple uploads
        static cv::Mat sensor_silhouette_flipped;
        if(last_uploaded_id!=frame.id){
            // TICTOC_SCOPE(t_dtform,"Energy::Fitting::dtform");
            cv::flip(sensor_silhouette, sensor_silhouette_flipped, 0 /*flip rows*/ );
            distance_transform.exec(sensor_silhouette_flipped.data, 125);
            kernel_upload_dtform_idxs(distance_transform.idxs_image_ptr());

            //---- WARNING THIS CORRUPTS DATA!!
            // cv::normalize(distance_transform.dsts_image(), distance_transform.dsts_image(), 0.0, 1.0, cv::NORM_MINMAX);
            // cv::imshow("dt", distance_transform.dsts_image());
            kernel_upload_sensor_data(sensor_silhouette_flipped.data);
            last_uploaded_id = frame.id;
        }
    }

    ///---------------------------------------------------
    ///---------------------------------------------------
    // cudaDeviceSynchronize();
    ///---------------------------------------------------
    ///---------------------------------------------------


    ///--- Map resources to CUDA context
    // TIMED_BLOCK(timer,"Worker::track_cuda::(bind+kernel)")
    {
        // TICTOC_BLOCK(timer,"Worker::track_cuda::(BindOpenGL)")
        {
            offscreenrend->fb->bind(); ///< with glFinish() only takes 20 microseconds
            cudax::render_color   = render_color.bind();
            cudax::render_points  = render_xyz.bind();
            cudax::sensor_depth   = sensor_depth.bind();
        }

        // TICTOC_BLOCK(timer,"Worker::track_cuda::(kernel)")
        {
            kernel_bind();
            bool reweight = settings->fit3D_reweight;
            if(rigid_only && settings->fit3D_reweight && !(settings->fit3D_reweight_rigid))
                reweight = false; ///< allows fast rigid motion (mostly visible on PrimeSense @60FPS)
            // std::cout << "rigid?" << rigid_only << "reweight?" << reweight << std::endl;
            kernel(sys.lhs.data(), sys.rhs.data(), push_error, pull_error, eval_error, reweight);
            kernel_unbind();
        }

        ///--- debug
        // std::ofstream("mat/JtJ_gpu.txt") << sys.lhs << std::endl;
        // std::ofstream("mat/Jte_gpu.txt") << sys.rhs << std::endl;

        if(settings->debug_show_constraints_image){
            int w = camera->height(), h = camera->width();
            cv::Mat opencv_image = cv::Mat(w, h, CV_8UC3, cv::Scalar(0,0,0));
            kernel_constraint_type_image(opencv_image.data, w, h);
            cv::flip(opencv_image, opencv_image, 0);
            cv::imshow("constraint types", opencv_image);
        }

        // TICTOC_BLOCK(timer,"Worker::track_cuda::(unbind)")
        {
            render_color.unbind();
            render_xyz.unbind();
            sensor_depth.unbind();
            offscreenrend->fb->unbind();
        }
    }

    ///--- @note debug
    // cv::imshow("debug_image", debug_image);
}
Example #5
0
DImage _DistanceTransform<T,T2>::do_transform_3d(const DMultiDMatrix & in_im, _DMatrix<T> &sigma, 
					  T s_z)
{
  int rows = in_im.rows();
  int cols = in_im.cols();
  int planes = in_im.planes();

  pair<DPlane, pair<DPlane, DPlane> > *result = 
    new pair<DPlane, pair<DPlane, DPlane> >[planes];
  DistanceTransform dt;

  for(int k=0; k<planes; k++)
    {
      result[k] = dt.do_transform_2d(in_im.get(k), sigma);
    }

  int width = in_im.cols();
  int height = in_im.rows();
  int *z, *v;
  float *vref;
  int k;
  float s;
  float sp;
  int x, y;

  z = (int *)malloc(sizeof(int)*planes);
  v = (int *)malloc(sizeof(int)*planes);
  vref = (float *)malloc(sizeof(float)*planes);
  if ((z == NULL) || (v == NULL) || (vref == NULL)) {
    assert(0);
  }

  /* do z transform */
  for (int p = 0; p < planes; p++) {
    k = 0;  /* Number of boundaries between parabolas */
    z[0] = 0;   /* Indexes of locations of boundaries,
		   order by increasing x */
    z[1] = width;    
    v[0] = 0;     /* Indexes of locations of visible parabola bases,
		     ordered by increasing x */

    T *im_p=result.get(p);
    T2 *closest_rows_y = closest_rows.get(p);
    T2 *closest_cols_y = closest_cols.get(p);

    for(int i=0; i<height; i++)
      for(int j=0; j<width; j++)
	{
	  do {
	    /* compute Vornoi border: intersection of parabola at x
	       with rightmost currently visible parabola */
	    s = ((im_p[i][j] + scaling_z*p*p) - (im_y[v[k]] + scaling_z*v[k]*v[k])) /
	      (2 * scaling_z * (p - v[k]));
	
	    sp = ceil(s); // floor(ceil(s));
	
	    /* case one: intersection is to the right of the array, 
	       so this parabola is not visible (and nothing to do) */
	    if (sp >= width)
	      { 
		break;
	      }

	    /* case two: intersection is at larger x than rightmost current
	       intersection, so this parabola is visible on the right (add
	       it to the end) */
	    if (sp > z[k]) {
	      z[k+1] = int(sp);
	      z[k+2] = width;
	      v[k+1] = x;
	      k++; 
	      break;
	    }
	    
	    /* case three: intersection is at smaller x than the
	       rightmost current intersection, so this parabola hides the
	       rightmost parabola (remove the rightmost parabola, if there
	       are still remaining potentially visible parabolas iterate to
	       complete the addition of this parabola). */
	    
	    if (k == 0) {
	      v[k] = x;
	      break;
	    } else {
	      z[k] = width;
	      k--;
	    }
	  } while (1);
	  
	}
    
    /* compute transform values from visible parabolas */
    
    /* get value of input image at each parabola base */
    for (x = 0; x <= k; x++) {
      vref[x] = im_y[v[x]];
    }
    k = 0;
    
    /* iterate over pixels, calculating value for closest parabola */
    v_k=v[k];
    for (x = 0; x < width; x++) {
      if (x == z[k+1])
	k++, v_k=v[k];
      im_y[x] = vref[k] + (v_k-x)*(v_k-x) * scaling_x;

      closest_rows_y[x] = y;
      closest_cols_y[x] = v_k;
    }

  }








  DMultiDMatrix result2(3, planes, rows, cols);

  for(int i=0; i<rows; i++)
    for(int j=0; j<cols; j++)
      {
	for(int k=0; k<planes; k++)
	  {

	    // k=0
	    result2[0][i][j] = min(
				   min(result[0].first[i][j],
				       result[1].first[i][j] + s_z),
				   min(result[2].first[i][j] + 2*s_z,
				       result[3].first[i][j] + s_z));
	    
	    // k=1
	    result2[1][i][j] = min(
				   min(result[0].first[i][j] + s_z,
				       result[1].first[i][j]),
				   min(result[2].first[i][j] + s_z,
				       result[3].first[i][j] + 2*s_z));
	    
	    // k=2
	    result2[2][i][j] = min(
				   min(result[0].first[i][j] + 2*s_z,
				       result[1].first[i][j] + s_z),
				   min(result[2].first[i][j],
				       result[3].first[i][j] + s_z));
	    
	    // k=3
	    result2[3][i][j] = min(
				   min(result[0].first[i][j] + s_z,
				       result[1].first[i][j] + 2*s_z),
				   min(result[2].first[i][j] + s_z,
				       result[3].first[i][j]));
	    
	  }
      }

  delete[] result;  

  return result2;
}