bool CubicSplineInterpolation::caltridiagonalMatrices( 
    cv::Mat_<double> &input_a, 
    cv::Mat_<double> &input_b, 
    cv::Mat_<double> &input_c,
    cv::Mat_<double> &input_d,
    cv::Mat_<double> &output_x )
{
    int rows = input_a.rows;
    int cols = input_a.cols;

    if ( ( rows == 1 && cols > rows ) || 
        (cols == 1 && rows > cols ) )
    {
        const int count = ( rows > cols ? rows : cols ) - 1;

        output_x = cv::Mat_<double>::zeros(rows, cols);

        cv::Mat_<double> cCopy, dCopy;
        input_c.copyTo(cCopy);
        input_d.copyTo(dCopy);

        if ( input_b(0) != 0 )
        {
            cCopy(0) /= input_b(0);
            dCopy(0) /= input_b(0);
        }
        else
        {
            return false;
        }

        for ( int i=1; i < count; i++ )
        {
            double temp = input_b(i) - input_a(i) * cCopy(i-1);
            if ( temp == 0.0 )
            {
                return false;
            }

            cCopy(i) /= temp;
            dCopy(i) = ( dCopy(i) - dCopy(i-1)*input_a(i) ) / temp;
        }

        output_x(count) = dCopy(count);
        for ( int i=count-2; i > 0; i-- )
        {
            output_x(i) = dCopy(i) - cCopy(i)*output_x(i+1);
        }
        return true;
    }
    else
    {
        return false;
    }
}
cv::Mat_<uchar> ocmu_maxconnecteddomain(cv::Mat_<uchar> binImg)
{	
	cv::Mat_<uchar> maxRegion; // Returned matrix;

	// 查找轮廓,对应连通域 
	cv::Mat_<uchar> contourImg;
	binImg.copyTo(contourImg);
	std::vector<std::vector<cv::Point>> contourVecs;  
	cv::findContours(contourImg, contourVecs,CV_RETR_EXTERNAL, \
		CV_CHAIN_APPROX_NONE);
		 
	if (contourVecs.size() > 0) { // 存在多个连通域,寻找最大连通域 
		double maxArea = 0;  
		std::vector<cv::Point> maxContour;  
		for(size_t i = 0; i < contourVecs.size(); i++) {  
			double area = cv::contourArea(contourVecs[i]);  
			if (area > maxArea) {  
				maxArea = area;  
				maxContour = contourVecs[i];  
			}  
		}  

		// 将轮廓转为矩形框  
		cv::Rect maxRect = cv::boundingRect(maxContour);  
		int xBegPos = maxRect.y;
		int yBegPos = maxRect.x;
		int xEndPos = xBegPos + maxRect.height;
		int yEndPos = yBegPos + maxRect.width;

		maxRegion = binImg(cv::Range(xBegPos, xEndPos), \
			cv::Range(yBegPos, yEndPos));
	}

	return maxRegion;
}
Exemplo n.º 3
0
	void AddDescriptor(cv::Mat_<double>& descriptors, cv::Mat_<double> new_descriptor, int curr_frame, int num_frames_to_keep)
	{
		if(descriptors.empty())
		{
			descriptors = Mat_<double>(num_frames_to_keep, new_descriptor.cols, 0.0);
		}

		int row_to_change = curr_frame % num_frames_to_keep;

		new_descriptor.copyTo(descriptors.row(row_to_change));
	}	
Exemplo n.º 4
0
illumestimators::Illum OrthodoxTanAdapter::estimate(cv::Mat_<cv::Vec3b> the_image, cv::Mat_<unsigned char> mask)
{
	// mask out white pixels
	assert((the_image.rows == mask.rows) && (the_image.cols == mask.cols));

	// alter copy, not original image;
	cv::Mat_<cv::Vec3d> use_this;
	the_image.copyTo(use_this);

	for (int y = 0; y < the_image.rows; ++y)
		for (int x = 0; x < the_image.cols; ++x)
			if (mask[y][x] == 255)
				use_this[y][x] = cv::Vec3d(0, 0, 0);

	return estimate(use_this);
}
Exemplo n.º 5
0
WTLF::FilteredImg::FilteredImg(int kernelSize, const cv::Mat_<float> &img)
: smoothingKernelSize(kernelSize), cumsum(img.rows*img.cols + 1, 0)
{
	if (smoothingKernelSize == 0)
		img.copyTo(filtImg);
	else
		cv::boxFilter(img, filtImg, -1 /* same depth as img */,
			Size(smoothingKernelSize, smoothingKernelSize), 
			Point(-1,-1) /* anchor at kernel center */,
			true /* normalize */);

	//calculate CDF
	cumsum[0] = 0.0f;
	Mat_<float>::const_iterator it = filtImg.begin(), 
		itEnd = filtImg.end();
	for (int i = 0; it != itEnd; ++it, ++i)
	{
		cumsum[i+1] = cumsum[i] + (*it)*(*it); /*pow(*it, 2)*/
	}
}
Exemplo n.º 6
0
// mex function call:
// x = mexFGS(input_image, guidance_image = NULL, sigma, lambda, fgs_iteration = 3, fgs_attenuation = 4);
void FGS(const cv::Mat_<float> &in, const cv::Mat_<cv::Vec3b> &color_guide, cv::Mat_<float> &out, double sigma, double lambda, int solver_iteration, int solver_attenuation)
{
	
	// image resolution
	W = in.cols;
	H = in.rows;

    nChannels = 1;
    nChannels_guide = 3;
	
	cv::Mat_<cv::Vec3f> image_guidance;
	color_guide.convertTo(image_guidance,CV_32FC3);

	cv::Mat_<float> image_filtered;
	in.copyTo(image_filtered);

	// run FGS
	sigma *= 255.0;
    
	FGS_simple(image_filtered, image_guidance, sigma, lambda, solver_iteration, solver_attenuation);
	image_filtered.copyTo(out);
}
cv::Mat_<cv::Vec3b> SuperpixelSegmentation::getSegmentedImage(cv::Mat_<cv::Vec3b> input_host, int options){
	
	if(options == Line){
	//cudaMemcpy(Labels_Host, Labels_Device, sizeof(int)*width*height, cudaMemcpyDeviceToHost);
	input_host.copyTo(SegmentedColor);

	for(int y=0; y<height-1; y++){
		for(int x=0; x<width-1; x++){
			if(Labels_Host[y*width+x] !=  Labels_Host[(y+1)*width+x]){
				//SegmentedColor.at<cv::Vec3b>(y, x) = cv::Vec3b(0, 0, 0);
				SegmentedColor.at<cv::Vec3b>(y, x) = cv::Vec3b(255, 255, 255);
			}
			if(Labels_Host[y*width+x] !=  Labels_Host[y*width+x+1]){
				//SegmentedColor.at<cv::Vec3b>(y, x) = cv::Vec3b(0, 0, 0);
				SegmentedColor.at<cv::Vec3b>(y, x) = cv::Vec3b(255, 255, 255);
			}
		}
	}

	}
	else{
	//int* size = new int[40*80];
	//int3* rgb = new int3[40*80];
	//int3* center = new int3[40*80];
	//int3 init;
	//init.x = 0;
	//init.y = 0;
	//init.z = 0;
	//for(int i=0; i<40*80; i++){
	//	size[i] = 0;
	//	rgb[i] = init;
	//	center[i] = init;
	//}
	//for(int y=0; y<height; y++){
	//	for(int x=0; x<width; x++){
	//		int id = Labels_Host[y*width+x];
	//		if(id != -1){
	//			size[id]++;
	//			rgb[id].x += (int)input_host.at<cv::Vec3b>(y, x).val[0];
	//			rgb[id].y += (int)input_host.at<cv::Vec3b>(y, x).val[1];
	//			rgb[id].z += (int)input_host.at<cv::Vec3b>(y, x).val[2];
	//			center[id].x += x;
	//			center[id].y += y;
	//		}
	//	}
	//}
	//cudaMemcpy(Labels_Host, Labels_Device, sizeof(int)*width*height, cudaMemcpyDeviceToHost);
	cudaMemcpy(meanData_Host, meanData_Device, sizeof(superpixel)*ClusterNum.x*ClusterNum.y, cudaMemcpyDeviceToHost);
	for(int y=0; y<height; y++){
		for(int x=0; x<width; x++){
			int id = Labels_Host[y*width+x];
			if(id != -1){
				//std::cout << id <<std::endl;
				SegmentedColor.at<cv::Vec3b>(y, x).val[0] = meanData_Host[id].r;
				SegmentedColor.at<cv::Vec3b>(y, x).val[1] = meanData_Host[id].g;
				SegmentedColor.at<cv::Vec3b>(y, x).val[2] = meanData_Host[id].b;
				//if(SegmentedColor.at<cv::Vec3b>(y, x) == cv::Vec3b(0, 0, 0))
				//	std::cout << "id: "<< id <<std::endl;
				//SegmentedColor.at<cv::Vec3b>(y, x).val[0] = (unsigned char)(rgb[id].x/size[id]);
				//SegmentedColor.at<cv::Vec3b>(y, x).val[1] = (unsigned char)(rgb[id].y/size[id]);
				//SegmentedColor.at<cv::Vec3b>(y, x).val[2] = (unsigned char)(rgb[id].z/size[id]);
			}												
			else
				SegmentedColor.at<cv::Vec3b>(y, x) = cv::Vec3b(0, 0, 0);
		}
	}
	}
	

	
	//delete [] size;
	//delete [] rgb;
	//delete [] center;
	//
	return SegmentedColor;
}
Exemplo n.º 8
0
void markersCallback(const markers_msgs::Markers& msg)
{
  // This callback only makes sense if we have more than one marker
  if( msg.num_markers < 1 )
   return;

  /// Before using the markers information, we need to update the estimated
  /// pose considering the amount of time that elapsed since the last update
  /// from odometry.
  /// We will use the linear and angular velocity of the robot for that.

  /// Step 1 - Update particles with robot movement:
  if( odom_first_update == false )
  {
    double dt = msg.header.stamp.toSec() - last_step1_time;
    double distance = odo_lin_vel*dt; // Distance travelled
    double dtheta = odo_ang_vel*dt; // Rotation performed
    ParticleFilterStep1(distance, dtheta);
    last_step1_time = msg.header.stamp.toSec();

    // Store updated odometry pose
    odo_robot_pose.x += distance*cos(odo_robot_pose.theta);
    odo_robot_pose.y += distance*sin(odo_robot_pose.theta);
    odo_robot_pose.theta += dtheta;

  }

  /// Step 2 - Update the particle weights given the sensor model and map
  /// knowledge
#ifdef STEP_UPDATE
  // For now we only perform this step if there was any marker detected.
  // We could use the expected and not detected beacons for each particle,
  // but it woul become too expensive.
  //
  // The weight for each particle will be:
  //   w(j) = mean(1/(1+sqrt((x_e(i)-x_r(i))^2+(y_e(i)-y_r(i))^2)*DISTANCE_ERROR_GAIN))
  // where x_e(i)/y_e(i) and x_r(i)/y_r(i) are the expected and obtained
  // x/y world coordinates of the detected marker respectively. The GAIN are
  // constant gains wich can be tuned to value smaller detection errors.
  //

  // Reset normalization factor
  double norm_factor = 0;
  for(int j = 0; j < NUM_PARTICLES; j++)
  {
    // Compute the weight for each particle
    // For each obtained beacon value
    particles_weight(j,0) = 0;
    for( uint n=0; n < msg.num_markers; n++ )
    {
      // Obtain beacon position in world coordinates
      geometry_msgs::Pose2D particle;
      particle.x = particles_x(j,0);
      particle.y = particles_y(j,0);
      particle.theta = particles_theta(j,0);

      point_2d marker_lpos = {msg.range[n]*cos(msg.bearing[n]),
                              msg.range[n]*sin(msg.bearing[n])};
      point_2d marker_wpos;
      local2World( particle, marker_lpos, &marker_wpos );

      particles_weight(j,0) +=
          1.0/(1+sqrt(pow(markers_wpos[msg.id[n]-1].x-marker_wpos.x,2)
                      +pow(markers_wpos[msg.id[n]-1].y-marker_wpos.y,2)*DISTANCE_ERROR_GAIN));
    }
    // Perform the mean. We summed all elements above and now divide by
    // the number of elements summed.
    particles_weight(j,0) /= msg.num_markers;
    // Update the normalization factor
    norm_factor += particles_weight(j,0);
  }

  // Normalize the weight
  max_weight = 0.0;
  for(int j = 0; j < particles_x.size().height; j++)
  {
    particles_weight(j,0) /= norm_factor;
    // Store the particle with the best weight has our posture estimate
    if( particles_weight(j,0) > max_weight )
    {
      posture_estimate.x = particles_x(j,0);
      posture_estimate.y = particles_y(j,0);
      posture_estimate.theta = particles_theta(j,0);
      // This max_factor is just used for debug, so that we have more
      // different colors between particles.
      max_weight = particles_weight(j,0);
    }

  }
#endif

  // Show debug information
  showDebugInformation();

  /// Step 3 - Resample
#ifdef STEP_RESAMPLE
  // The resampling step is the exact implementation of the algorithm
  // described in the theoretical classes, i.e., the "Importance resampling
  // algorithm".

  // Save the current values
  // The old particles will be needed in the resampling algorithm
  cv::Mat_<double> old_particles_weight(NUM_PARTICLES, 1, 1.0/NUM_PARTICLES);

  particles_x.copyTo(old_particles_x);
  particles_y.copyTo(old_particles_y);
  particles_theta.copyTo(old_particles_theta);
  particles_weight.copyTo(old_particles_weight);

  double delta = rng.uniform(0.0, 1.0/NUM_PARTICLES);
  double c = old_particles_weight(0,0);
  int i = 0;
  for(int j = 0; j < NUM_PARTICLES; j++)
  {
    double u = delta + j/(1.0*NUM_PARTICLES);
    while( u > c )
    {
      i++;
      c += old_particles_weight(i,0);
    }
    particles_x(j,0) = old_particles_x(i,0);
    particles_y(j,0) = old_particles_y(i,0);
    particles_theta(j,0) = old_particles_theta(i,0);
    // The weight is indicative only, it will be recomputed on marker detection
    particles_weight(j,0) = old_particles_weight(i,0);
  }
#endif
  ///
  /// Particle filter steps end here
  ///

  // Save map from time to time
  iteration++;
  if( iteration == DELTA_SAVE )
  {
    cv::imwrite("mapa.png", map);
    iteration = 0;
  }

}