コード例 #1
0
ファイル: ImageProcessorCV.cpp プロジェクト: junaidnaseer/ivt
void ImageProcessorCV::CalculateGradientImageHSV(CByteImage *pInputImage, CByteImage *pOutputImage)
{
	if (pInputImage->width != pOutputImage->width || pInputImage->height != pOutputImage->height ||
		pInputImage->type != CByteImage::eRGB24 || pOutputImage->type != CByteImage::eGrayScale)
		return;

	IplImage *pIplInputImage = IplImageAdaptor::Adapt(pInputImage);
	IplImage *pIplOutputImage = IplImageAdaptor::Adapt(pOutputImage);

	// Determine Gradient Image by Irina Wchter
	// instead of normal norm sqrt(x*x +y*y) use |x|+|y| because it is much faster
	IplImage *singleChannel0 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel1 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel2 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *diff = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
	IplImage *abs = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_8U, 1);
		
	cvCvtPixToPlane(pIplInputImage, singleChannel0, singleChannel1, singleChannel2, NULL);
	
	// calculate gradients on S-channel
	//cvSmooth(singleChannel1, singleChannel1, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel1, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel1, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, pIplOutputImage);
	cvAdd(abs, pIplOutputImage, pIplOutputImage, 0);
	
	// threshold S-channel for creating a maskfor gradients of H-channel
	cvThreshold(singleChannel1, singleChannel1, 60, 255, CV_THRESH_BINARY);
	cvDilate(singleChannel1, singleChannel1);
	
	// calculate gradients on H-channel
	//cvSmooth(singleChannel0, singleChannel0, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel0, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel0, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, singleChannel0);
	cvAdd(abs, singleChannel0, singleChannel0, 0);
	
	// filter gradients of H-channel with mask
	cvAnd(singleChannel0, singleChannel1, singleChannel0);
	
	// combine to gradient images
	cvMax(pIplOutputImage, singleChannel0, pIplOutputImage);
	
	// free memory
	cvReleaseImage(&singleChannel0);
	cvReleaseImage(&singleChannel1);
	cvReleaseImage(&singleChannel2);
	cvReleaseImage(&diff);
	cvReleaseImage(&abs);
	
	cvReleaseImageHeader(&pIplInputImage);
	cvReleaseImageHeader(&pIplOutputImage);
}
//--------------------------------------------------------------
void DepthHoleFiller::performProperClose		  ( ofxCvGrayscaleImage &input, int diameter){
	
	// http://homepages.inf.ed.ac.uk/rbf/HIPR2/close.htm 
	// Defined as Max(f, O(C(O(f))))
	
	ofxCv8uC1_Temp1 = input; // temp copy of original
	
	performMorphologicalOpen	(input, diameter);
	performMorphologicalClose	(input, diameter);
	performMorphologicalOpen	(input, diameter);
	
	cvMax(input.getCvImage(), 
		  ofxCv8uC1_Temp1.getCvImage(), 
		  input.getCvImage());
}
コード例 #3
0
ファイル: Morphology1.cpp プロジェクト: CareShaw/OCR
//形态学测地腐蚀和腐蚀重建运算
void lhMorpRErode(const IplImage* src,  const IplImage* msk, IplImage* dst, IplConvKernel* se = NULL, int iterations=-1)
{

	assert(src != NULL  && msk != NULL && dst != NULL && src != dst );

	if(iterations < 0)
	{
		//腐蚀重建
		cvMax(src, msk, dst);
		cvErode(dst, dst, se);
		cvMax(dst, msk, dst);

		IplImage*  temp1 = cvCreateImage(cvGetSize(src), 8, 1);
		//IplImage*  temp2 = cvCreateImage(cvGetSize(src), 8, 1);

		do
		{
			//record last result
			cvCopy(dst, temp1);
			cvErode(dst, dst, se);
			cvMax(dst, msk, dst);
			//cvCmp(temp1, dst, temp2, CV_CMP_NE);

		}
		//while(cvSum(temp2).val[0] != 0);
		while(lhImageCmp(temp1, dst)!= 0);

		cvReleaseImage(&temp1);
		//cvReleaseImage(&temp2);

		return;	

	}
	else if (iterations == 0)
	{
		cvCopy(src, dst);
	}
	else
	{
		//普通测地腐蚀 p137(6.2)
		cvMax(src, msk, dst);
		cvErode(dst, dst, se);
		cvMax(dst, msk, dst);

		for(int i=1; i<iterations; i++)
		{
			cvErode(dst, dst, se);
			cvMax(dst, msk, dst);
		}
	}
}
//--------------------------------------------------------------
void DepthHoleFiller::fillHolesUsingContourFinder (ofxCvGrayscaleImage &depthImage, 
												   int maxContourArea, 
												   int maxNContours ){
	
	// Find just the holes, as geometric contours.
	computeBlobContours (depthImage, maxContourArea, maxNContours);
	
	// Rasterize those holes as filled polys, white on a black background.
	computeBlobImage();
	
	// Re-color the blobs with graylevels interpolated from the depth image.
	fillBlobsWithInterpolatedData (depthImage);
	
	// Add the interpolated holes back into the depth image
	cvMax(depthImage.getCvImage(), 
		  ofxCv8uC1_Blobs.getCvImage(), 
		  depthImage.getCvImage());
	
	// Flag changes to the images. 
	ofxCv8uC1_Blobs.flagImageChanged();
	depthImage.flagImageChanged();
}
コード例 #5
0
void MapMaker::image_callback(const sensor_msgs::ImageConstPtr& msg) {
//  printf("callback called\n");
  try
	{
	
	// if you want to work with color images, change from mono8 to bgr8
	  if(input_image==NULL){
		  input_image = cvCloneImage(bridge.imgMsgToCv(msg, "mono8"));
		  rotationImage=cvCloneImage(input_image);
		 // printf("cloned image\n");
		}
		else{
		  cvCopy(bridge.imgMsgToCv(msg, "mono8"),input_image);
		 // printf("copied image\n");
		}
	}
	catch (sensor_msgs::CvBridgeException& e)
	{
		ROS_ERROR("Could not convert from '%s' to 'mono8'.", msg->encoding.c_str());
		return;
	}
	
	if(input_image!=NULL) {
    //get tf transform here and put in map
    ros::Time acquisition_time = msg->header.stamp;
    geometry_msgs::PoseStamped basePose;
    geometry_msgs::PoseStamped mapPose;
    basePose.pose.orientation.w=1.0;
    ros::Duration timeout(3);
    basePose.header.frame_id="/base_link";
    mapPose.header.frame_id="/map";
    try {
      tf_listener_.waitForTransform("/base_link", "/map", acquisition_time, timeout);
       
      tf_listener_.transformPose("/map", acquisition_time,basePose,"/base_link",mapPose);
	    
	    printf("pose #%d %f %f %f\n",pic_number,mapPose.pose.position.x, mapPose.pose.position.y, tf::getYaw(mapPose.pose.orientation));
	    
	    
	    /*
	    char buffer [50];
	    sprintf (buffer, "/tmp/test%02d.jpg", pic_number);
			if(!cvSaveImage(buffer,input_image,0)) printf("Could not save: %s\n",buffer);
			else printf("picture taken!!!\n");
	    pic_number++;
	    */
	    
	    cv::Point_<double> center;
      center.x=input_image->width/2;
      center.y=input_image->height/2;
      double tranlation_arr[2][3];
      CvMat translation;
      cvInitMatHeader(&translation,2,3,CV_64F,tranlation_arr);
      
      cvSetZero(&translation);
      cv2DRotationMatrix(center, (tf::getYaw(mapPose.pose.orientation)*180/3.14159) -90,1.0,&translation);
      cvSetZero(rotationImage);
      cvWarpAffine(input_image,rotationImage,&translation,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0));
      
      
      CvRect roi;
      roi.width=rotationImage->width;
      roi.height=rotationImage->height;
      
      if(init_zero_x==0){
        init_zero_x=(int)(mapPose.pose.position.x*(1.0/map_meters_per_pixel));
        init_zero_y=(int)(mapPose.pose.position.y*(-1.0/map_meters_per_pixel));
      }
      
      roi.x=(int)(mapPose.pose.position.x*(1.0/map_meters_per_pixel))-init_zero_x+map_zero_x-roi.width/2;
      roi.y=(int)(mapPose.pose.position.y*(-1.0/map_meters_per_pixel))-init_zero_y+map_zero_y-roi.height/2;
      
      printf("x %d, y %d, rot %f\n",roi.x,roi.y, (tf::getYaw(mapPose.pose.orientation)*180/3.14159) -90);
      
      cvSetImageROI(map,roi);
      
      cvMax(map,rotationImage,map);
      
      cvResetImageROI(map);
	    cvShowImage("map image",map);	    
    }
    catch (tf::TransformException& ex) {
      ROS_WARN("[map_maker] TF exception:\n%s", ex.what());
      printf("[map_maker] TF exception:\n%s", ex.what());
      return;
    }
    catch(...){
      printf("opencv shit itself cause our roi is bad\n");
    }
  }
}
コード例 #6
0
void testApp::doVision() {
	float startTime = ofGetElapsedTimef();
	float img[VISION_WIDTH*VISION_HEIGHT];
	float *distPix = kinect.getDistancePixels();
	
	
	float kinectRangeFar = 4000;
	float kinectRangeNear = 500;
	
	
	float denom = (kinectRangeFar - kinectRangeNear) * (maxWaterDepth - waterThreshold);
	if(denom==0) denom = 0.0001;
	float subtractor = kinectRangeFar - waterThreshold*(kinectRangeFar - kinectRangeNear);
	
	for(int i = 0; i < VISION_WIDTH*VISION_HEIGHT; i++) {
		
		int x = i % VISION_WIDTH;
		int y = i / VISION_WIDTH;
		
		int ii = x*2 + y * 2 * KINECT_WIDTH;
		
		// this is slow
		//img[i] = ofMap(ofMap(distPix[i], 500, 4000, 1, 0), maxWaterDepth, waterThreshold, 1, 0);
		
		img[i] = (subtractor - distPix[ii])/denom;
		if(img[i]>1) img[i] = 0;
		if(img[i]<0) img[i] = 0;
	}
	depthImg.setFromPixels(img,VISION_WIDTH,VISION_HEIGHT);
	
	if(flipX || flipY) depthImg.mirror(flipY, flipX);
	
	rangeScaledImg = depthImg;
	
	for(int i = 0; i < blurIterations; i++) {
		rangeScaledImg.blur(2*blurSize+1);
	}
	if(erode) {
		rangeScaledImg.erode();
	}
	if(dilate) {
		rangeScaledImg.dilate();
	}
	
	
	
	if(accumulateBackground) {
		cvMax(bgImg.getCvImage(), rangeScaledImg.getCvImage(), bgImg.getCvImage());
		bgImg.flagImageChanged();
		
		
		backgroundAccumulationCount++;
		if(backgroundAccumulationCount>100) {
			accumulateBackground = false;
			backgroundAccumulationCount = 0;
		}
	}
	
	
	float *fgPix = rangeScaledImg.getPixelsAsFloats();
	float *bgPix = bgImg.getPixelsAsFloats();
	
	int numPix = VISION_WIDTH * VISION_HEIGHT;
	
	for(int i = 0; i < numPix; i++) {
		if(fgPix[i]<=bgPix[i]+backgroundHysteresis) {
			fgPix[i] = 0;
		}
	}
	
	rangeScaledImg.setFromPixels(fgPix, VISION_WIDTH, VISION_HEIGHT);
	
	
	maskedImg = rangeScaledImg;
	
	CvPoint points[NUM_MASK_POINTS];
	for(int i = 0; i < NUM_MASK_POINTS; i++) {
		points[i] = cvPoint(mask[i].x, mask[i].y);
	}
	
	
	CvPoint fill[4];
	fill[0] = cvPoint(0, 0);
	fill[1] = cvPoint(VISION_WIDTH, 0);
	fill[2] = cvPoint(VISION_WIDTH, VISION_HEIGHT);
	fill[3] = cvPoint(0, VISION_HEIGHT);
	
	CvPoint *allPoints[2];
	allPoints[0] = points;
	allPoints[1] = fill;
	
	int numPoints[2];
	numPoints[0] = NUM_MASK_POINTS;
	numPoints[1] = 4;
	
	// mask out the bit we're interested in
	cvFillPoly(
			   maskedImg.getCvImage(), allPoints,
			   numPoints,
			   2, cvScalar(0)
			   );
	maskedImg.flagImageChanged();
	
	contourFinder.findContours(maskedImg, minBlobSize*minBlobSize, maxBlobSize*maxBlobSize, 20, false);
	
	
	// compare each blob against the other and eradicate any blobs that are too close to eachother
	if(blobs.size()>0) {
		for(int i = 0; i < contourFinder.blobs.size(); i++) {
			for(int j = i+1; j < contourFinder.blobs.size(); j++) {
				float dist = tricks::math::getClosestDistanceBetweenTwoContours(
																				contourFinder.blobs[i].pts, 
																				contourFinder.blobs[j].pts, 3);
				
				// find which one is closest to any other blob and delete the other one
				if(dist<20) {
					float distToI = FLT_MAX;
					float distToJ = FLT_MAX;
					for(map<int,Blob>::iterator it = blobs.begin(); it!=blobs.end(); it++) {
						
						distToI = MIN(distToI, (*it).second.distanceSquared(ofVec2f(contourFinder.blobs[i].centroid)));
						distToJ = MIN(distToJ, (*it).second.distanceSquared(ofVec2f(contourFinder.blobs[j].centroid)));
					}
					if(distToI<distToJ) {
						// merge the jth into the ith, and delete the jth one
						mergeBlobIntoBlob(contourFinder.blobs[i], contourFinder.blobs[j]);
						contourFinder.blobs.erase(contourFinder.blobs.begin() + j);
						j--;
					} else {
						
						// merge the ith into the jth, and delete the ith one
						mergeBlobIntoBlob(contourFinder.blobs[j], contourFinder.blobs[i]);
						contourFinder.blobs.erase(contourFinder.blobs.begin() + i);
						i--;
						break;
					}
				}
			}
		}
		
	}
	blobTracker.trackBlobs(contourFinder.blobs);
	
	
}
コード例 #7
0
ファイル: ImageProcessorCV.cpp プロジェクト: junaidnaseer/ivt
void ImageProcessorCV::CalculateGradientImage(CByteImage *pInputImage, CByteImage *pOutputImage)
{
	if (pInputImage->width != pOutputImage->width || pInputImage->height != pOutputImage->height ||
		pOutputImage->type != CByteImage::eGrayScale)
		return;

	IplImage *pIplInputImage = IplImageAdaptor::Adapt(pInputImage);
	IplImage *pIplOutputImage = IplImageAdaptor::Adapt(pOutputImage);

	if (pInputImage->type == CByteImage::eGrayScale)
	{
		IplImage *diff = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
		IplImage *abs = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_8U, 1);
		
		cvSmooth(pIplInputImage, abs, CV_GAUSSIAN, 3, 3);
		cvSobel(abs, diff, 1, 0, 3);
		cvConvertScaleAbs(diff, pIplOutputImage);
		cvSobel(abs, diff, 0, 1, 3);
		cvConvertScaleAbs(diff, abs);
		cvAdd(abs, pIplOutputImage, pIplOutputImage, 0);
		
		cvReleaseImage(&diff);
		cvReleaseImage(&abs);
	}
	else if (pInputImage->type == CByteImage::eRGB24)
	{
		//	Determine Gradient Image by Irina Wchter
		//	instead of normal norm sqrt(x*x +y*y) use |x|+|y| because it is much faster
		IplImage *singleChannel0 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
		IplImage *singleChannel1 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
		IplImage *singleChannel2 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
		IplImage *diff = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
		IplImage *abs = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_8U, 1);
		
		cvCvtPixToPlane(pIplInputImage, singleChannel0, singleChannel1, singleChannel2, NULL);
	
		cvSmooth(singleChannel0, singleChannel0, CV_GAUSSIAN, 3, 3);
		cvSobel(singleChannel0, diff, 1, 0, 3);
		cvConvertScaleAbs(diff, abs);
		cvSobel(singleChannel0, diff, 0, 1, 3);
		cvConvertScaleAbs(diff, singleChannel0);
		cvAdd(abs, singleChannel0, pIplOutputImage, 0);
	
		cvSmooth(singleChannel1, singleChannel1, CV_GAUSSIAN, 3, 3);
		cvSobel(singleChannel1, diff, 1, 0, 3);
		cvConvertScaleAbs(diff, abs);
		cvSobel(singleChannel1, diff, 0, 1, 3);
		cvConvertScaleAbs(diff, singleChannel1);
		cvAdd(abs, singleChannel1, singleChannel1, 0);
		cvMax(pIplOutputImage, singleChannel1, pIplOutputImage);
	
		cvSmooth(singleChannel2, singleChannel2, CV_GAUSSIAN, 3, 3);
		cvSobel(singleChannel2, diff, 1, 0, 3);
		cvConvertScaleAbs(diff, abs);
		cvSobel(singleChannel2, diff, 0, 1, 3);
		cvConvertScaleAbs(diff, singleChannel2);
		cvAdd(abs, singleChannel2, singleChannel2, 0);
		cvMax(pIplOutputImage, singleChannel2, pIplOutputImage);
	
		cvReleaseImage(&singleChannel0);
		cvReleaseImage(&singleChannel1);
		cvReleaseImage(&singleChannel2);
		cvReleaseImage(&diff);
		cvReleaseImage(&abs);
	}
	
	cvReleaseImageHeader(&pIplInputImage);
	cvReleaseImageHeader(&pIplOutputImage);
}
コード例 #8
0
void Utils::geoErode(IplImage* src, IplImage* mask, IplImage* dst)
{
    cvErode(src,dst);
    cvMax(dst, mask, dst);
}