예제 #1
0
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImage (ofxCvGrayscaleImage& grayImage, int whichPlane){
	if( !bAllocated ){
		ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImage(): image not allocated";	
		return;	
	}
	
	if( !grayImage.bAllocated ){
		grayImage.allocate(width, height);
	} 
		
	ofRectangle roi = getROI();
    ofRectangle grayRoi = grayImage.getROI();
   
	if( grayRoi.width == roi.width && grayRoi.height == roi.height ){

		switch (whichPlane){
				
			case 0:
				cvCvtPixToPlane(cvImage, grayImage.getCvImage(), NULL, NULL, NULL);
				grayImage.flagImageChanged();
				break;
			case 1:
				cvCvtPixToPlane(cvImage, NULL, grayImage.getCvImage(), NULL, NULL);
				grayImage.flagImageChanged();
				break;
			case 2:
				cvCvtPixToPlane(cvImage, NULL, NULL, grayImage.getCvImage(), NULL);
				grayImage.flagImageChanged();
				break;
		}
			
	} else {
		ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image size or region of interest mismatch";
	}
}
예제 #2
0
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImage (ofxCvGrayscaleImage& grayImage, int whichPlane){
	
	ofRectangle roi = getROI();
    ofRectangle grayRoi = grayImage.getROI();
   
	if( grayRoi.width == roi.width && grayRoi.height == roi.height ){

		switch (whichPlane){
				
			case 0:
				cvCvtPixToPlane(cvImage, grayImage.getCvImage(), NULL, NULL, NULL);
				grayImage.flagImageChanged();
				break;
			case 1:
				cvCvtPixToPlane(cvImage, NULL, grayImage.getCvImage(), NULL, NULL);
				grayImage.flagImageChanged();
				break;
			case 2:
				cvCvtPixToPlane(cvImage, NULL, NULL, grayImage.getCvImage(), NULL);
				grayImage.flagImageChanged();
				break;
		}
			
	} else {
    
		ofLog(OF_LOG_ERROR, "in convertToGrayscalePlanarImages, ROI/size mismatch");
	
	}
	
}
예제 #3
0
void CamShiftPlugin::ProcessStatic
( int i, ImagePlus *img, ImagePlus *oimg, int *hsizes, CvTermCriteria criteria,
IplImage** &planes, CvHistogram* &hist, IplImage* &backproject, CvRect &orect, CvPoint &ocenter, CvRect &searchwin, CvMat* &rotation, CvMat* &shift, bool oready){
	if (hist && hist->mat.dim[0].size!=hsizes[0])
		cvReleaseHist(&hist);
	if( !hist )
        hist = cvCreateHist( 3, hsizes, CV_HIST_ARRAY, NULL, 0);
    if( !backproject )
		backproject = cvCreateImage( cvGetSize(img->orig), IPL_DEPTH_8U, 1 );
	if( !planes ){
	    planes = (IplImage**) malloc(3 * sizeof(IplImage*));
        for (int p=0; p<3; p++)
			planes[p] = cvCreateImage( cvGetSize(img->orig), 8, 1 );
	}
	if (!rotation)
		rotation = cvCreateMat(2,3,CV_32FC1);
	if (!shift)
		shift = cvCreateMat(2,1,CV_32FC1);

	if (!oready){
		orect = cvBoundingRect(oimg->contourArray[i],1);
		cvCvtPixToPlane( oimg->orig, planes[0], planes[1], planes[2], 0 );
        for (int p=0; p<3; p++)
            cvSetImageROI(planes[p],orect);
        cvCalcHist( planes, hist, 0, NULL );
		cvNormalizeHist(hist, 255);
        for (int p=0; p<3; p++)
            cvResetImageROI(planes[p]);
		searchwin = orect; //cvRect(0,0,img->orig->width, img->orig->height);
		ocenter = cvPoint(orect.x+orect.width/2, orect.y+orect.height/2);
	}
	//The following checks shouldn't be needed.
	RestrictRect(searchwin, cvRect(0,0,backproject->width,backproject->height));

	cvCvtPixToPlane( img->orig, planes[0], planes[1], planes[2], 0 );
    cvCalcBackProject( planes, backproject, hist );
	CvBox2D track_box;
	CvConnectedComp track_comp;
    cvCamShift( backproject, searchwin,
                criteria,
                &track_comp, &track_box );
	searchwin = track_comp.rect;
	cvmSet(shift,0,0,track_box.center.x - ocenter.x);
	cvmSet(shift,1,0,track_box.center.y - ocenter.y);
//	shift->data.fl[0] = track_box.center.x - ocenter.x;
//	shift->data.fl[1] = track_box.center.y - ocenter.y;
	cv2DRotationMatrix(track_box.center, track_box.angle, 1.0, rotation);
	cvTransform(oimg->contourArray[i],img->contourArray[i],rotation,shift);
//	CvMat *ofm = FeatPointsToMat(oimg->feats[i]);
//	Cvmat *fm  = FeatPointsToMat(img->feats[i]);
//	cvTransform(ofm,img->contourArray[i],rotation,shift);
	TransformFeatPoints(oimg->feats[i], img->feats[i], rotation, shift);
}
예제 #4
0
Mask* Histogram::calcBackProjection(const Image* rgbImage) const
{
    if (!m_histogram || !rgbImage)
        return NULL;
    if (!(rgbImage->cvImage()))
        return NULL;
    
    IplImage* hsv = cvCreateImage(rgbImage->size(), 8, 3);	//Create HSV image from BGR image
    cvCvtColor(rgbImage->cvImage(), hsv, CV_BGR2HSV);
	
    IplImage* h = cvCreateImage(rgbImage->size(), 8, 1);	// create diferents planes
    IplImage* s = cvCreateImage(rgbImage->size(), 8, 1);
    IplImage* v = cvCreateImage(rgbImage->size(), 8, 1);
	
    IplImage* planes[] = {h, s};
	
    cvCvtPixToPlane(hsv, h, s, v, NULL);
	
    IplImage* backProject = cvCreateImage(rgbImage->size(), 8, 1);
	
    cvCalcBackProject(planes, backProject, m_histogram);
	
    cvReleaseImage(&hsv);
    cvReleaseImage(&h);
    cvReleaseImage(&s);
    cvReleaseImage(&v);
	
    return new Image(backProject);
}
예제 #5
0
Histogram* Histogram::createHSHistogram(const Image* image, const Mask* mask, int sizeH, int sizeS)
{
    Histogram* hist = new Histogram();
    IplImage* hsv = cvCreateImage(image->size(), 8, 3);	//Create HSV image from BGR image
    cvCvtColor(image->cvImage(), hsv, CV_BGR2HSV);
	
    IplImage* h = cvCreateImage(image->size(), 8, 1);	// create diferents planes
    IplImage* s = cvCreateImage(image->size(), 8, 1);
    IplImage* v = cvCreateImage(image->size(), 8, 1);
	
    IplImage* planes[] = {h, s};
	
    cvCvtPixToPlane(hsv, h, s, v, NULL);
	
    int histSize[] = {sizeH, sizeS};
    float hRanges[] = { 0, 180 }; /* hue varies from 0 (~0°red) to 180 (~360°red again) */
    float sRanges[] = { 0, 255 }; /* saturation varies from 0 (black-gray-white) to 255 (pure spectrum color) */
    float* ranges[] = {hRanges, sRanges};
	
    hist->m_histogram = cvCreateHist(2, histSize, CV_HIST_ARRAY, ranges, 1);
    cvCalcHist(planes, hist->m_histogram, false, mask ? mask->cvImage() : NULL);

    cvReleaseImage(&hsv);
    cvReleaseImage(&h);
    cvReleaseImage(&s);
    cvReleaseImage(&v);
    return hist;
}
예제 #6
0
void THISCLASS::OnStart() {
	// Load mask image
	wxString filename_string = GetConfigurationString(wxT("MaskImage"), wxT(""));
	wxFileName filename = mCore->GetProjectFileName(filename_string);
	if (filename.IsOk()) {
		mMaskImage = cvLoadImage(filename.GetFullPath().mb_str(wxConvFile), CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
	}
	if (! mMaskImage) {
		AddError(wxT("Cannot open mask file."));
		return;
	}

	// Convert mask image
	if (mMaskImage->nChannels == 3) {
		// BGR case, we convert to gray
		IplImage *img = cvCreateImage(cvSize(mMaskImage->width, mMaskImage->height), mMaskImage->depth, 1);
		cvCvtColor(mMaskImage, img, CV_BGR2GRAY);
		cvReleaseImage(&mMaskImage);
		mMaskImage = img;
	} else if (mMaskImage->nChannels == 1) {
		// Already in gray, do nothing
	} else {
		// Other cases, we take the first channel
		IplImage *img = cvCreateImage(cvSize(mMaskImage->width, mMaskImage->height), mMaskImage->depth, 1);
		cvCvtPixToPlane(mMaskImage, img, NULL, NULL, NULL);
		cvReleaseImage(&mMaskImage);
		mMaskImage = img;
	}

	// load other parameters:
	OnReloadConfiguration();
	return;
}
예제 #7
0
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImages(ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){
	if( !bAllocated ){
		ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image not allocated";	
		return;	
	}
	
    ofRectangle roi = getROI();
    ofRectangle redRoi = red.getROI();
    ofRectangle greenRoi = green.getROI();
    ofRectangle blueRoi = blue.getROI();
	
	if( !red.bAllocated ){
		red.allocate(width, height);
	} 
	if( !green.bAllocated ){
		green.allocate(width, height);
	} 
	if( !blue.bAllocated ){
		blue.allocate(width, height);
	} 
			
	if( redRoi.width == roi.width && redRoi.height == roi.height &&
        greenRoi.width == roi.width && greenRoi.height == roi.height &&
        blueRoi.width == roi.width && blueRoi.height == roi.height )
    {
        cvCvtPixToPlane(cvImage, red.getCvImage(), green.getCvImage(), blue.getCvImage(), NULL);
        red.flagImageChanged();
        green.flagImageChanged();
        blue.flagImageChanged();
	} else {
        ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image size or region of interest mismatch";
	}
}
void THISCLASS::OnStep() {
	IplImage *inputimage = mCore->mDataStructureInput.mImage;
	if (! inputimage) {
		return;
	}

	try {
		// We convert the input image to a color image
		if (inputimage->nChannels == 3) {
			// We already have a color image
			mCore->mDataStructureImageColor.mImage = inputimage;
		} else if (inputimage->nChannels == 1) {
			// Gray, convert to BGR
			PrepareOutputImage(inputimage);
			cvCvtColor(inputimage, mOutputImage, CV_GRAY2BGR);
			mCore->mDataStructureImageColor.mImage = mOutputImage;
		} else {
			// Other cases, we take the first channel and transform it in BGR
			PrepareOutputImage(inputimage);
			cvCvtPixToPlane(inputimage, mOutputImage, NULL, NULL, NULL);
			cvCvtColor(mOutputImage, mOutputImage, CV_GRAY2BGR);
			mCore->mDataStructureImageColor.mImage = mOutputImage;
		}
	} catch (...) {
		AddError(wxT("Conversion to gray failed."));
	}

	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(mCore->mDataStructureImageColor.mImage);
	}
}
예제 #9
0
//default values h_bins=30,s_bins=32,scale=10
CvHistogram * Histogram::getHShistogramFromRGB(IplImage* src){
	IplImage* h_plane = cvCreateImage( cvGetSize(src), 8, 1 );
	IplImage* s_plane = cvCreateImage( cvGetSize(src), 8, 1 );
	IplImage* v_plane = cvCreateImage( cvGetSize(src), 8, 1 );
	IplImage* planes[] = { h_plane, s_plane };
	IplImage* hsv = cvCreateImage( cvGetSize(src), 8, 3 );
	
	int hist_size[] = {this->h_bins, this->s_bins};
	float h_ranges[] = { 0, 180 }; /* hue varies from 0 (~0°red) to 180 (~360°red again) */
	float s_ranges[] = { 0, 255 }; /* saturation varies from 0 (black-gray-white) to 255 (pure spectrum color) */
	float* ranges[] = { h_ranges, s_ranges };
	
	CvHistogram* hist;

	
	cvCvtColor( src, hsv, CV_BGR2HSV );
	cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );
	hist = cvCreateHist( 2, hist_size, CV_HIST_ARRAY, ranges, 1 );
	cvCalcHist( planes, hist, 0, 0 );
	cvNormalizeHist(hist,1.0);
	
	cvReleaseImage(&hsv);
	cvReleaseImage(&h_plane);
	cvReleaseImage(&s_plane);
	cvReleaseImage(&v_plane);
	
	
	return hist;
	
}
예제 #10
0
IplImage* Panoramic::GetHsvFeature(IplImage* src,int H,int S,int V,int Scale ,int Scale_1 ,int Scale_2)
{
	IplImage *colorImg	  = cvCreateImage(cvGetSize(src),8,3);
    IplImage *hsvImg	  = cvCreateImage(cvGetSize(src),8,3);
	cvCopy(src,colorImg);
	IplImage *Plane_1	  = cvCreateImage( cvGetSize(colorImg), 8, 1);//H plane
    IplImage *Plane_2	  = cvCreateImage( cvGetSize(colorImg), 8, 1);//S plane
	IplImage *Plane_3	  = cvCreateImage( cvGetSize(colorImg), 8, 1);//V plane
	IplImage *dst	      = cvCreateImage( cvGetSize(src),8,1);
	cvCvtColor(colorImg,hsvImg,CV_BGR2HSV);
	cvCvtPixToPlane( hsvImg, Plane_1, Plane_2, Plane_3, 0 );

	cvEqualizeHist(Plane_2,Plane_2);//s_plane
	cvEqualizeHist(Plane_3,Plane_3);//v_plane
	cvMerge(Plane_1,Plane_2,Plane_3,0,hsvImg);
	cvInRangeS(hsvImg, cvScalar(H,S, V), cvScalar(5*Scale+H,5*Scale_1+S,5*Scale_2+V), dst);//cvScalar(0,40, 40), cvScalar(60, 170, 255)
	cvErode(dst,dst,0,2);

	/*cvNamedWindow("HSV_ROI",0);
	cvShowImage ("HSV_ROI",dst);*/

	cvReleaseImage(&colorImg);
	cvReleaseImage(&hsvImg);
	cvReleaseImage(&Plane_1);
	cvReleaseImage(&Plane_2);
	cvReleaseImage(&Plane_3);

	return dst;
}
예제 #11
0
IplImage* BouyBaseObject::HistogramMask(const IplImage * imgIn) const
{
    IplImage * imgOut = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1);
    IplImage * temp = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1);
    //IplImage * norm = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_32F, 1);
    IplImage* hsv = cvCreateImage( cvGetSize(imgIn), 8, 3 );
    cvCvtColor( imgIn, hsv, CV_BGR2HSV );
    IplImage* h_plane = cvCreateImage( cvGetSize(imgIn), 8, 1 );
    IplImage* s_plane = cvCreateImage( cvGetSize(imgIn), 8, 1 );
    IplImage* v_plane = cvCreateImage( cvGetSize(imgIn), 8, 1 );
    IplImage* planes[] = { h_plane, s_plane };
    cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );

    cvCalcBackProject(planes,imgOut,mNearHist);
    //cvCalcBackProject(planes,temp,mFarHist);
    //VisionUtils::CombineMasks(imgOut,temp,imgOut);
    //cvNormalize(norm,imgOut,255,0,CV_MINMAX);
    cvSmooth(imgOut,imgOut,2);
    //cvShowImage("histo", imgOut);
    cvReleaseImage(&h_plane);
    cvReleaseImage(&s_plane);
    cvReleaseImage(&v_plane);
    cvReleaseImage(&hsv);
    cvReleaseImage(&temp);

    return imgOut;
}
예제 #12
0
// Computes weights and drives the new location of object in the next frame
//frame: 图像
//histogram: 直方图
//计算权重, 更新目标的坐标
void CObjectTracker::FindWightsAndCOM(IplImage *frame, FLOAT32 (*histogram))
{
SINT16 i = 0;
SINT16 x = 0;
SINT16 y = 0;
UBYTE8 E = 0;
FLOAT32 sumOfWeights = 0;
SINT16 ptr = 0;
UBYTE8 qR = 0,qG = 0,qB = 0;
FLOAT32   newX = 0.0;
FLOAT32   newY = 0.0;
// ULONG_32 pixelValues = 0;
IplImage* r, * g, * b;


FLOAT32 *weights = new FLOAT32[HISTOGRAM_LENGTH];

for (i=0;i<HISTOGRAM_LENGTH;i++)
{
   if (histogram[i] >0.0 )
    weights[i] = m_sTrackingObjectTable[m_cActiveObject].initHistogram[i]/histogram[i]; //qu/pu(y0)
   else
    weights[i] = 0.0;
}

r = cvCreateImage( cvGetSize(frame), frame->depth, 1 );
g = cvCreateImage( cvGetSize(frame), frame->depth, 1 );
b = cvCreateImage( cvGetSize(frame), frame->depth, 1 );
cvCvtPixToPlane( frame, b, g, r, NULL ); //divide color image into separate planes r, g, b. The exact sequence doesn't matter.

for (y=max(m_sTrackingObjectTable[m_cActiveObject].Y-m_sTrackingObjectTable[m_cActiveObject].H/2,0);y<=min(m_sTrackingObjectTable[m_cActiveObject].Y+m_sTrackingObjectTable[m_cActiveObject].H/2,m_nImageHeight-1);y++)
   for (x=max(m_sTrackingObjectTable[m_cActiveObject].X-m_sTrackingObjectTable[m_cActiveObject].W/2,0);x<=min(m_sTrackingObjectTable[m_cActiveObject].X+m_sTrackingObjectTable[m_cActiveObject].W/2,m_nImageWidth-1);x++)
   {
    E = CheckEdgeExistance(r, g, b,x,y);

    qR = (UBYTE8)pixval8c( r, y, x )/16;
    qG = (UBYTE8)pixval8c( g, y, x )/16;
    qB = (UBYTE8)pixval8c( b, y, x )/16;

    ptr = 4096*E+256*qR+16*qG+qB; //some recalculation here. The bin number of (x, y) can be stroed somewhere in fact.

    newX += (weights[ptr]*x);
    newY += (weights[ptr]*y);

    sumOfWeights += weights[ptr];
   }

   if (sumOfWeights>0)
   {
    m_sTrackingObjectTable[m_cActiveObject].X = SINT16((newX/sumOfWeights) + 0.5); //update location
    m_sTrackingObjectTable[m_cActiveObject].Y = SINT16((newY/sumOfWeights) + 0.5);
   }

cvReleaseImage(&r);
cvReleaseImage(&g);
cvReleaseImage(&b);
   delete[] weights, weights = 0;
}
예제 #13
0
파일: laplace.c 프로젝트: runaway/OpenCV1.1
// À­ÆÕÀ­Ë¹±ä»»
int main( int argc, char** argv )
{
    IplImage* laplace = 0;
    IplImage* colorlaplace = 0;
    IplImage* planes[3] = { 0, 0, 0 };
    CvCapture* capture = 0;
    
    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromAVI( argv[1] ); 

    if( !capture )
    {
        fprintf(stderr,"Could not initialize capturing...\n");
        return -1;
    }
        
    cvNamedWindow( "Laplacian", 0 );

    for(;;)
    {
        IplImage* frame = 0;
        int i;

        frame = cvQueryFrame( capture );
        if( !frame )
            break;

        if( !laplace )
        {
            for( i = 0; i < 3; i++ )
                planes[i] = cvCreateImage( cvSize(frame->width,frame->height), 8, 1 );
            laplace = cvCreateImage( cvSize(frame->width,frame->height), IPL_DEPTH_16S, 1 );
            colorlaplace = cvCreateImage( cvSize(frame->width,frame->height), 8, 3 );
        }

        cvCvtPixToPlane( frame, planes[0], planes[1], planes[2], 0 );
        for( i = 0; i < 3; i++ )
        {
            cvLaplace( planes[i], laplace, 3 );
            cvConvertScaleAbs( laplace, planes[i], 1, 0 );
        }
        cvCvtPlaneToPix( planes[0], planes[1], planes[2], 0, colorlaplace );
        colorlaplace->origin = frame->origin;

        cvShowImage("Laplacian", colorlaplace );

        if( cvWaitKey(10) >= 0 )
            break;
    }

    cvReleaseCapture( &capture );
    cvDestroyWindow("Laplacian");

    return 0;
}
예제 #14
0
gint gstskin_find_skin_center_of_mass(struct _GstSkin *skin, gint display)
{
  int skin_under_seed = 0;

  IplImage* imageRGB = cvCreateImageHeader( cvSize(skin->width, skin->height), IPL_DEPTH_8U, 3);
  imageRGB->imageData = skin->cvRGB->imageData;

  IplImage* imageHSV = cvCreateImage( cvSize(skin->width, skin->height), IPL_DEPTH_8U, 3);
  cvCvtColor(imageRGB, imageHSV, CV_RGB2HSV);

  IplImage* planeH = cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Hue component.
  IplImage* planeH2= cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Hue component, 2nd threshold
  IplImage* planeS = cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Saturation component.
  IplImage* planeV = cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Brightness component.
  cvCvtPixToPlane(imageHSV, planeH, planeS, planeV, 0);	// Extract the 3 color components.

  // Detect which pixels in each of the H, S and V channels are probably skin pixels.
  // Assume that skin has a Hue between 0 to 18 (out of 180), and Saturation above 50, and Brightness above 80.
  cvThreshold(planeH , planeH2, 10, UCHAR_MAX, CV_THRESH_BINARY);         //(hue > 10)
  cvThreshold(planeH , planeH , 20, UCHAR_MAX, CV_THRESH_BINARY_INV);     //(hue < 20)
  cvThreshold(planeS , planeS , 48, UCHAR_MAX, CV_THRESH_BINARY);         //(sat > 48)
  cvThreshold(planeV , planeV , 80, UCHAR_MAX, CV_THRESH_BINARY);         //(val > 80)

  // erode the HUE to get rid of noise.
  cvErode(planeH, planeH, NULL, 1);

  // Combine all 3 thresholded color components, so that an output pixel will only
  // be white (255) if the H, S and V pixels were also white.
  IplImage* imageSkinPixels = cvCreateImage( cvGetSize(imageHSV), 8, 1);        // Greyscale output image.
  // imageSkin = (hue > 10) ^ (hue < 20) ^ (sat > 48) ^ (val > 80), where   ^ mean pixels-wise AND
  cvAnd(planeH         , planeS , imageSkinPixels);	
  cvAnd(imageSkinPixels, planeH2, imageSkinPixels);	
  cvAnd(imageSkinPixels, planeV , imageSkinPixels);	

  if(display){
    if( skin->showH )
      cvCvtColor(planeH, imageRGB, CV_GRAY2RGB);
    else if( skin->showS )
      cvCvtColor(planeS, imageRGB, CV_GRAY2RGB);
    else if( skin->showV )
      cvCvtColor(planeV, imageRGB, CV_GRAY2RGB);
    else
      cvCvtColor(imageSkinPixels, imageRGB, CV_GRAY2RGB);
  }


  cvReleaseImage( &imageSkinPixels );
  cvReleaseImage( &planeH );
  cvReleaseImage( &planeH2);
  cvReleaseImage( &planeS );
  cvReleaseImage( &planeV );
  cvReleaseImage( &imageHSV );
  cvReleaseImage( &imageRGB );

  return(skin_under_seed);
}
예제 #15
0
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImages(ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){
	if( red.width == width && red.height == height &&
        green.width == width && green.height == height &&
        blue.width == width && blue.height == height )
    {
        cvCvtPixToPlane(cvImage, red.getCvImage(), green.getCvImage(), blue.getCvImage(), NULL);
	} else {
        ofLog(OF_LOG_ERROR, "in convertToGrayscalePlanarImages, images are different sizes");
	}     
}
예제 #16
0
void ImageProcessorCV::CalculateGradientImageHSV(CByteImage *pInputImage, CByteImage *pOutputImage)
{
	if (pInputImage->width != pOutputImage->width || pInputImage->height != pOutputImage->height ||
		pInputImage->type != CByteImage::eRGB24 || pOutputImage->type != CByteImage::eGrayScale)
		return;

	IplImage *pIplInputImage = IplImageAdaptor::Adapt(pInputImage);
	IplImage *pIplOutputImage = IplImageAdaptor::Adapt(pOutputImage);

	// Determine Gradient Image by Irina Wchter
	// instead of normal norm sqrt(x*x +y*y) use |x|+|y| because it is much faster
	IplImage *singleChannel0 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel1 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel2 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *diff = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
	IplImage *abs = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_8U, 1);
		
	cvCvtPixToPlane(pIplInputImage, singleChannel0, singleChannel1, singleChannel2, NULL);
	
	// calculate gradients on S-channel
	//cvSmooth(singleChannel1, singleChannel1, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel1, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel1, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, pIplOutputImage);
	cvAdd(abs, pIplOutputImage, pIplOutputImage, 0);
	
	// threshold S-channel for creating a maskfor gradients of H-channel
	cvThreshold(singleChannel1, singleChannel1, 60, 255, CV_THRESH_BINARY);
	cvDilate(singleChannel1, singleChannel1);
	
	// calculate gradients on H-channel
	//cvSmooth(singleChannel0, singleChannel0, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel0, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel0, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, singleChannel0);
	cvAdd(abs, singleChannel0, singleChannel0, 0);
	
	// filter gradients of H-channel with mask
	cvAnd(singleChannel0, singleChannel1, singleChannel0);
	
	// combine to gradient images
	cvMax(pIplOutputImage, singleChannel0, pIplOutputImage);
	
	// free memory
	cvReleaseImage(&singleChannel0);
	cvReleaseImage(&singleChannel1);
	cvReleaseImage(&singleChannel2);
	cvReleaseImage(&diff);
	cvReleaseImage(&abs);
	
	cvReleaseImageHeader(&pIplInputImage);
	cvReleaseImageHeader(&pIplOutputImage);
}
예제 #17
0
파일: obsdet.c 프로젝트: jlefley/igvrt-uiuc
int detect_obstacles()
{
	CvCapture* capture = 0;
	capture = cvCaptureFromCAM(0);
	IplImage *frame, *imHSV;
    cvNamedWindow("result", 0);	
    int Hthresh = 0;
    int Vthresh = 0;
    cvCreateTrackbar("hue thresh", "result", &Hthresh, 10000, NULL);
    cvCreateTrackbar("value thresh", "result", &Vthresh, 100, NULL);
    IplImage* h_plane ;
    IplImage* s_plane ;
    IplImage* v_plane ;
	for(;;)
	{
		frame = cvQueryFrame( capture );
		if(frame){
		  cvSmooth(frame, frame, CV_GAUSSIAN, 25, 25, 0, 0);
		  cvSetImageROI(frame, cvRect(0,(frame->height/2),frame->width, (frame->height/2)));
		  
		  h_plane = cvCreateImage( cvGetSize(frame ), 8, 1 );
		  s_plane = cvCreateImage( cvGetSize( frame), 8, 1 );
		  v_plane = cvCreateImage( cvGetSize( frame ), 8, 1 );
		  imHSV = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
		  cvCvtColor(frame,imHSV,CV_BGR2HSV);
		  cvCvtPixToPlane( imHSV , h_plane, s_plane, v_plane, 0 );
		  CvHistogram* hist_h,*hist_v;
		  int h_bins = H_BINS, v_bins = V_BINS;
		  
		  hist_h = create_histogram(h_plane, H_MAX, &h_bins);
		  hist_v = create_histogram(v_plane, V_MAX, &v_bins);
		  
		  if(process_frame(frame, h_plane, v_plane, hist_h, hist_v, Hthresh, Vthresh)==-1)
		    break;
		}
		cvReleaseImage(&imHSV );
		cvReleaseImage(&h_plane );
		cvReleaseImage(&v_plane );
	}

	cvReleaseCapture( &capture );
	cvDestroyWindow("Display");

	//  cvDestroyWindow("FinalDisplay");
	cvDestroyWindow("VDisplay");
	cvDestroyWindow("Result");
	cvDestroyWindow("Display1");
	return 0;
}
예제 #18
0
	// Create a binary: 0,255 mask where 255 means forground pixel
	// I		Input image, 3 channel, 8u
	// Imask	mask image to be created, 1 channel 8u
	// num		camera number.
	//
void ofxBackground::backgroundDiff(IplImage *I,IplImage *Imask)  //Mask should be grayscale
{
	cvCvtScale(I,Iscratch,1,0); //To float;
								//Channel 1
	cvCvtPixToPlane( Iscratch, Igray1,Igray2,Igray3, 0 );
	cvInRange(Igray1,Ilow1,Ihi1,Imask);
		//Channel 2
	cvInRange(Igray2,Ilow2,Ihi2,Imaskt);
	cvOr(Imask,Imaskt,Imask);
		//Channel 3
	cvInRange(Igray3,Ilow3,Ihi3,Imaskt);
	cvOr(Imask,Imaskt,Imask);
		//Finally, invert the results
	cvSubRS( Imask, cvScalar(255), Imask);
}
예제 #19
0
//Draw box around object
void CObjectTracker::DrawObjectBox(IplImage *frame)
{
SINT16 x_diff = 0;
SINT16 x_sum = 0;
SINT16 y_diff = 0;
SINT16 y_sum = 0;
SINT16 x = 0;
SINT16 y = 0;
ULONG_32 pixelValues = 0;
IplImage* r, * g, * b;

r = cvCreateImage( cvGetSize(frame), frame->depth, 1 );
g = cvCreateImage( cvGetSize(frame), frame->depth, 1 );
b = cvCreateImage( cvGetSize(frame), frame->depth, 1 );
cvCvtPixToPlane( frame, b, g, r, NULL );

pixelValues = GetBoxColor();

//the x left and right bounds
x_sum = min(m_sTrackingObjectTable[m_cActiveObject].X+m_sTrackingObjectTable[m_cActiveObject].W/2+1,m_nImageWidth-1);//右边界
x_diff = max(m_sTrackingObjectTable[m_cActiveObject].X-m_sTrackingObjectTable[m_cActiveObject].W/2,0);//左边界
//the y upper and lower bounds
y_sum = min(m_sTrackingObjectTable[m_cActiveObject].Y+m_sTrackingObjectTable[m_cActiveObject].H/2+1,m_nImageHeight-1);//下边界
y_diff = max(m_sTrackingObjectTable[m_cActiveObject].Y-m_sTrackingObjectTable[m_cActiveObject].H/2,0);//上边界

for (y=y_diff;y<=y_sum;y++)
{
   SetPixelValues(r, g, b,pixelValues,x_diff,y);
   SetPixelValues(r, g, b,pixelValues,x_diff+1,y);

      SetPixelValues(r, g, b,pixelValues,x_sum-1,y);
      SetPixelValues(r, g, b,pixelValues,x_sum,y);
}
for (x=x_diff;x<=x_sum;x++)
{
   SetPixelValues(r, g, b,pixelValues,x,y_diff);
      SetPixelValues(r, g, b,pixelValues,x,y_diff+1);

      SetPixelValues(r, g, b,pixelValues,x,y_sum-1);
      SetPixelValues(r, g, b,pixelValues,x,y_sum);
}
cvCvtPlaneToPix(b, g, r, NULL, frame);

cvReleaseImage(&r);
cvReleaseImage(&g);
cvReleaseImage(&b);
}
예제 #20
0
//Extracts the histogram of box
//frame: 图像
//histogram: 直方图
//在图像frame中计算当前目标的直方图histogram
//直方图分为两部分,每部分大小4096,
//RGB的256*256*256种组合,缩减为16*16*16=4096种组合
//如果目标区域的点是边缘点,则计入直方图的后一部分,
//否则计入直方图的前一部分
void CObjectTracker::FindHistogram(IplImage *frame, FLOAT32 (*histogram))
{
SINT16 i = 0;
SINT16 x = 0;
SINT16 y = 0;
UBYTE8 E = 0;
UBYTE8 qR = 0,qG = 0,qB = 0;
// ULONG_32 pixelValues = 0;
UINT32 numberOfPixel = 0;
IplImage* r, * g, * b;

r = cvCreateImage( cvGetSize(frame), frame->depth, 1 );
g = cvCreateImage( cvGetSize(frame), frame->depth, 1 );
b = cvCreateImage( cvGetSize(frame), frame->depth, 1 );
cvCvtPixToPlane( frame, b, g, r, NULL ); //divide color image into separate planes r, g, b. The exact sequence doesn't matter.


for (i=0;i<HISTOGRAM_LENGTH;i++) //reset all histogram
   histogram[i] = 0.0;

//for all the pixels in the region
for (y=max(m_sTrackingObjectTable[m_cActiveObject].Y-m_sTrackingObjectTable[m_cActiveObject].H/2,0);y<=min(m_sTrackingObjectTable[m_cActiveObject].Y+m_sTrackingObjectTable[m_cActiveObject].H/2,m_nImageHeight-1);y++)
   for (x=max(m_sTrackingObjectTable[m_cActiveObject].X-m_sTrackingObjectTable[m_cActiveObject].W/2,0);x<=min(m_sTrackingObjectTable[m_cActiveObject].X+m_sTrackingObjectTable[m_cActiveObject].W/2,m_nImageWidth-1);x++)
   {
    //边缘信息: 当前点与上下左右4点灰度差异是否超过阈值
    E = CheckEdgeExistance(r, g, b,x,y);

    qR = (UBYTE8)pixval8c( r, y, x )/16;//quantize R component
    qG = (UBYTE8)pixval8c( g, y, x )/16;//quantize G component
    qB = (UBYTE8)pixval8c( b, y, x )/16;//quantize B component

    histogram[4096*E+256*qR+16*qG+qB] += 1; //根据边缘信息, 累计直方图//HISTOGRAM_LENGTH=8192

    numberOfPixel++;

   }

for (i=0;i<HISTOGRAM_LENGTH;i++) //normalize
   histogram[i] = histogram[i]/numberOfPixel;
//for (i=0;i<HISTOGRAM_LENGTH;i++)
//   printf("histogram[%d]=%d/n",i,histogram[i]);
     // printf("numberOfPixel=%d/n",numberOfPixel);
cvReleaseImage(&r);
cvReleaseImage(&g);
cvReleaseImage(&b);

}
예제 #21
0
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImages(ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){
    ofRectangle roi = getROI();
    ofRectangle redRoi = red.getROI();
    ofRectangle greenRoi = green.getROI();
    ofRectangle blueRoi = blue.getROI();
	if( redRoi.width == roi.width && redRoi.height == roi.height &&
        greenRoi.width == roi.width && greenRoi.height == roi.height &&
        blueRoi.width == roi.width && blueRoi.height == roi.height )
    {
        cvCvtPixToPlane(cvImage, red.getCvImage(), green.getCvImage(), blue.getCvImage(), NULL);
        red.flagImageChanged();
        green.flagImageChanged();
        blue.flagImageChanged();
	} else {
        ofLog(OF_LOG_ERROR, "in convertToGrayscalePlanarImages, ROI/size mismatch");
	}
}
예제 #22
0
    /********************************************************************
    Utils::CalculateColorHistogram
        CalculateColorHistogram
    Exceptions:
        None
    *********************************************************************/
    void Utils::CalculateColorHistogram( IplImage*     pIplImage,
                                         CvHistogram*  pHistogram,
                                         CvRect*       pRectangle )
    {
        try
        {
            IplImage* pObjectImage;

            if ( pRectangle != NULL )
            {
                pObjectImage = OpenCvWrapper::Utils::CropImage( pIplImage, 
                                                            pRectangle );
            }
            else
            {
                pObjectImage = pIplImage;
            }


            IplImage* h_plane = cvCreateImage( cvGetSize( pObjectImage ), 8, 1 );
            IplImage* s_plane = cvCreateImage( cvGetSize( pObjectImage ), 8, 1 );
            IplImage* v_plane = cvCreateImage( cvGetSize( pObjectImage ), 8, 1 );
            IplImage* planes[] = { h_plane, s_plane };

            //convert pixel to plane
            cvCvtPixToPlane( pObjectImage, h_plane, s_plane, v_plane, 0 );

            //calculate the histogram
            cvCalcHist( planes, pHistogram, 0, 0 );

            //normalize the histogram
            cvNormalizeHist( pHistogram, 1.0 );

            //release the locally created images
            cvReleaseImage( &h_plane );
            cvReleaseImage( &s_plane );
            cvReleaseImage( &v_plane );

            if ( pRectangle != NULL )
            {
                cvReleaseImage( &pObjectImage );
            }
        }
        EXCEPTION_CATCH_AND_ABORT( "Failed to Calculate Color Histogram" );
    }
예제 #23
0
void calcHSplanes(IplImage* src, IplImage *planes[])
{
    //
    IplImage* hsv = cvCreateImage( cvGetSize(src), 8, 3 );
    cvCvtColor( src, hsv, CV_BGR2HSV );

    IplImage* h_plane  = cvCreateImage( cvGetSize(src), 8, 1 );
    IplImage* s_plane  = cvCreateImage( cvGetSize(src), 8, 1 );
    IplImage* v_plane  = cvCreateImage( cvGetSize(src), 8, 1 );
    cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );
    planes[0] = h_plane;
    planes[1] = s_plane;

    cvReleaseImage(&hsv);
    cvReleaseImage(&v_plane);

    return;
}
예제 #24
0
void COpenCVMFCView::OnLaplace()
{
	// TODO: Add your command handler code here

	IplImage* pImage;
	IplImage* pImgLaplace = NULL;
	IplImage* pImgPlanes[3] = {0,0,0};
	int i;

	pImage = workImg;

	pImgLaplace = cvCreateImage(cvGetSize(pImage),
		IPL_DEPTH_16S,1);

	if (workImg->nChannels == 1) {
		cvLaplace(pImage,pImgLaplace,3);
		cvConvertScaleAbs(pImgLaplace,pImage, 1, 0 );
	}
	else {
		for (i = 0; i < 3; i++) {
			pImgPlanes[i] = cvCreateImage(cvGetSize(pImage),
				IPL_DEPTH_8U,1);
		}

		cvCvtPixToPlane(pImage,pImgPlanes[0],
			pImgPlanes[1],pImgPlanes[2],0);

		for (i = 0; i < 3; i++) {
			cvLaplace(pImgPlanes[i],pImgLaplace,3);
			cvConvertScaleAbs(pImgLaplace,pImgPlanes[i], 1, 0 );
		}

		cvCvtPlaneToPix(pImgPlanes[0],pImgPlanes[1],
			pImgPlanes[2],0,pImage);

		for (i = 0; i < 3; i++) {
			cvReleaseImage(&pImgPlanes[i]);
		}
	}

	cvReleaseImage(&pImgLaplace);

	Invalidate();
}
예제 #25
0
void COpenCVMFCView::OnSobel()
{
	// TODO: Add your command handler code here

	IplImage* pImage;
	IplImage* pImgSobel = NULL;
	IplImage* pImgPlanes[3] = {0,0,0};
	int i;

	pImage = workImg;

	pImgSobel = cvCreateImage(cvGetSize(pImage),
		IPL_DEPTH_16S,1);   //  Create Working Image

	if (workImg->nChannels == 1) {            //  Handle Single Channel
		cvSobel(pImage,pImgSobel,1,1,3);
		cvConvertScaleAbs(pImgSobel,pImage, 1, 0 );
	}
	else {                                  //  Handle Triad Ones
		for (i = 0; i < 3; i++) {
			pImgPlanes[i] = cvCreateImage(cvGetSize(pImage),
				IPL_DEPTH_8U,1);    //  Create Sub Image
		}

		cvCvtPixToPlane(pImage,pImgPlanes[0],
			pImgPlanes[1],pImgPlanes[2],0);  //  Get Sub

		for (i = 0; i < 3; i++) {                 //  Handle Sub Independently
			cvSobel(pImgPlanes[i],pImgSobel,1,1,3);
			cvConvertScaleAbs(pImgSobel,pImgPlanes[i], 1, 0 );
		}

		cvCvtPlaneToPix(pImgPlanes[0],pImgPlanes[1],
			pImgPlanes[2],0,pImage);    //  Form Color Image From Sub Images

		for (i = 0; i < 3; i++) {
			cvReleaseImage(&pImgPlanes[i]);  //  Release Sub Image
		}
	}

	cvReleaseImage(&pImgSobel);             //  Release Working Image

	Invalidate();
}
// Create a binary: 0,255 mask where 255 means forground pixel.
//
// Parameters:
//   I:     input image, 3 channel, 8u
//   Imask: mask image to be created, 1 channel 8u
//   num:   camera number
//
void backgroundDiff(IplImage *I, IplImage *Imask, int num)  // Mask should be grayscale
{
	cvCvtScale(I, Iscratch, 1, 0);  // To float.

	// Channel 1
	cvCvtPixToPlane( Iscratch, Igray1, Igray2, Igray3, 0 ); // TODO: book uses cvSplit: check!
	cvInRange( Igray1, Ilow1[num], Ihi1[num], Imask);

	// Channel 2
	cvInRange( Igray2, Ilow2[num], Ihi2[num], Imaskt );
	cvOr( Imask, Imaskt, Imask );

	// Channel 3
	cvInRange( Igray3, Ilow3[num], Ihi3[num], Imaskt );
	cvOr( Imask, Imaskt, Imask );

	// Finally, invert the results.
	cvSubRS( Imask, cvScalar(255), Imask);
}
예제 #27
0
void hue_horiz_project(IplImage* src, IplImage* proj_image)
{
	IplImage* hsv = cvCreateImage( cvGetSize(src), 8, 3 ); 
	cvCvtColor( src, hsv, CV_BGR2HSV );

	IplImage* h_plane  = cvCreateImage( cvGetSize(src), 8, 1 );
	IplImage* s_plane  = cvCreateImage( cvGetSize(src), 8, 1 );
	IplImage* v_plane  = cvCreateImage( cvGetSize(src), 8, 1 );
	IplImage* planes[] = { h_plane, s_plane };
	cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );

	
	int* v=new int[src->width];
	memset(v,0,src->width*4);

	int x,y;
	CvScalar s,t;
	for(x=0;x<src->width;x++)
	{
		for(y=0;y<src->height;y++)
		{
			s=cvGet2D(src,y,x);			
			if(s.val[0]==0) 
				v[x]++;					
		}		
	}

	for(x=0;x<src->width;x++)
	{
		for(y=0;y<v[x];y++)
		{		
			t.val[0]=0;
			cvSet2D(proj_image,y,x,t);		
		}		
	}
}
예제 #28
0
void calcHistgram(IplImage* srcImage, CvHistogram** hist, double* v_min, double* v_max) {

	CvSize size = cvGetSize(srcImage);
	IplImage* hsv = cvCreateImage(size, IPL_DEPTH_8U, 3);
	cvCvtColor( srcImage, hsv, CV_BGR2HSV );

	IplImage* h_plane  = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* s_plane  = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* v_plane  = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* planes[] = { h_plane, s_plane };
	cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );

	const int H_BINS = 18;
	const int S_BINS = 25;
	const int H_MIN  = 0;
	const int H_MAX  = 180;
	const int S_MIN  = 0;
	const int S_MAX  = 255;
	int    hist_size[] = { H_BINS, S_BINS };
	float  h_ranges[]  = { H_MIN, H_MAX };
	float  s_ranges[]  = { S_MIN, S_MAX };
	float* ranges[]    = { h_ranges, s_ranges };
	*hist = cvCreateHist(2,
						hist_size,
						CV_HIST_ARRAY,
						ranges,
						1);

	cvCalcHist(planes, *hist, 0, 0);
	cvReleaseImage(&h_plane);
	cvReleaseImage(&s_plane);

	cvMinMaxLoc(v_plane, v_min, v_max);
	*v_min = 1;
	cvReleaseImage(&v_plane);
}
예제 #29
0
std::list<utils::Garbage*> GarbageRecognition::garbageList(IplImage * src, IplImage * model){
	std::list<utils::Garbage*>::iterator it;
	for ( it=garbages.begin() ; it != garbages.end() ; it++ )
		delete *it;
	garbages.clear();

	//cvNamedWindow("output",CV_WINDOW_AUTOSIZE);
	//object model

	//image for the histogram-based filter
	//could be a parameter

	utils::Histogram * h = new Histogram(HIST_H_BINS,HIST_S_BINS);
	CvHistogram * testImageHistogram = h->getHShistogramFromRGB(model);

	//~ int frameWidth=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);
	//~ int frameHeight=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);



	//gets a frame for setting  image size
	//CvSize srcSize = cvSize(frameWidth,frameHeight);
	CvSize srcSize = cvGetSize(src);

	//images for HSV conversion
	IplImage* hsv = cvCreateImage( srcSize, 8, 3 );
	IplImage* h_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* s_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* v_plane = cvCreateImage( srcSize, 8, 1 );



	//Image for thresholding
	IplImage * threshImage=cvCreateImage(srcSize,8,1);

	//image for equalization
	IplImage * equalizedImage=cvCreateImage(srcSize,8,1);

	//image for Morphing operations(Dilate-erode)
	IplImage * morphImage=cvCreateImage(srcSize,8,1);

	//image for image smoothing
	IplImage * smoothImage=cvCreateImage(srcSize,8,1);

	//image for contour-finding operations
	IplImage * contourImage=cvCreateImage(srcSize,8,3);

	int frameCounter=1;
	int cont_index=0;

	//convolution kernel for morph operations
	IplConvKernel* element;

	CvRect boundingRect;

	//contours
	CvSeq * contours;

	//Main loop


	frameCounter++;

	//convert image to hsv
	cvCvtColor( src, hsv, CV_BGR2HSV );
	cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );

	//equalize Saturation Channel image
	cvEqualizeHist(s_plane,equalizedImage);

	//threshold the equalized Saturation channel image
	cvThreshold(equalizedImage,threshImage,THRESHOLD_VALUE,255,
	CV_THRESH_BINARY);

	//apply morphologic operations
	element = cvCreateStructuringElementEx( MORPH_KERNEL_SIZE*2+1,
		MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE, MORPH_KERNEL_SIZE,
		CV_SHAPE_RECT, NULL);

	cvDilate(threshImage,morphImage,element,MORPH_DILATE_ITER);
	cvErode(morphImage,morphImage,element,MORPH_ERODE_ITER);

	//apply smooth gaussian-filter
	cvSmooth(morphImage,smoothImage,CV_GAUSSIAN,3,0,0,0);

	//get all contours
	contours = myFindContours(smoothImage);

	cont_index=0;
	cvCopy(src,contourImage,0);
	


	while(contours!=NULL){
		CvSeq * aContour=getPolygon(contours);
		utils::Contours * ct = new Contours(aContour);

	
	    int	pf = ct->perimeterFilter(MINCONTOUR_PERIMETER,MAXCONTOUR_PERIMETER);

		int raf = ct->rectangularAspectFilter(CONTOUR_RECTANGULAR_MIN_RATIO, CONTOUR_RECTANGULAR_MAX_RATIO);

		// int af = ct->areaFilter(MINCONTOUR_AREA,MAXCONTOUR_AREA);
		int baf = ct->boxAreaFilter(BOXFILTER_TOLERANCE);

        int hmf = ct->histogramMatchingFilter(src,testImageHistogram, HIST_H_BINS,HIST_S_BINS,HIST_MIN);


		//apply filters

		if( pf && raf && baf && hmf	){

				//if passed filters
				ct->printContour(3,cvScalar(127,127,0,0),
					contourImage);
				
				//get contour bounding box
				boundingRect=cvBoundingRect(ct->getContour(),0);
				cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y),
						cvPoint(boundingRect.x+boundingRect.width,
						boundingRect.y+boundingRect.height),
						_GREEN,1,8,0);
				//build garbage List
			
				//printf(" c %d,%d\n",boundingRect.x,boundingRect.y);

				utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x,
					boundingRect.y,boundingRect.width,boundingRect.height);



				utils::Garbage * aGarbage = new utils::Garbage(r);
//				printf("%d , %d - %d , %d\n",boundingRect.x,boundingRect.y,boundingRect.width,boundingRect.height);

				garbages.push_back(aGarbage);


			}

		delete ct;
		cvReleaseMemStorage( &aContour->storage );
		contours=contours->h_next;
		cont_index++;
	}

   // cvShowImage("output",contourImage);
   // cvWaitKey(0);
	delete h;

	cvReleaseHist(&testImageHistogram);
	//Image for thresholding
	//cvReleaseMemStorage( &contours->storage );
	cvReleaseImage(&threshImage);
	cvReleaseImage(&equalizedImage);
	cvReleaseImage(&morphImage);
	cvReleaseImage(&smoothImage);
	cvReleaseImage(&contourImage);
	
	cvReleaseImage(&hsv);
	cvReleaseImage(&h_plane);
	cvReleaseImage(&s_plane);
	cvReleaseImage(&v_plane);


	return garbages;
}
예제 #30
0
gint compose_skin_matrix(IplImage* rgbin, IplImage* gray_out)
{
/*
  int skin_under_seed = 0;

  static IplImage* imageHSV = cvCreateImage( cvSize(rgbin->width, rgbin->height), IPL_DEPTH_8U, 3);
  cvCvtColor(rgbin, imageHSV, CV_RGB2HSV);

  static IplImage* planeH = cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Hue component.
  ///IplImage* planeH2= cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Hue component, 2nd threshold
  static IplImage* planeS = cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Saturation component.
  static IplImage* planeV = cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Brightness component.
  cvCvtPixToPlane(imageHSV, planeH, planeS, planeV, 0);	// Extract the 3 color components.

  // Detect which pixels in each of the H, S and V channels are probably skin pixels.
  // Assume that skin has a Hue between 0 to 18 (out of 180), and Saturation above 50, and Brightness above 80.
  ///cvThreshold(planeH , planeH2, 10, UCHAR_MAX, CV_THRESH_BINARY);         //(hue > 10)
  cvThreshold(planeH , planeH , 20, UCHAR_MAX, CV_THRESH_BINARY_INV);     //(hue < 20)
  cvThreshold(planeS , planeS , 48, UCHAR_MAX, CV_THRESH_BINARY);         //(sat > 48)
  cvThreshold(planeV , planeV , 80, UCHAR_MAX, CV_THRESH_BINARY);         //(val > 80)

  // erode the HUE to get rid of noise.
  cvErode(planeH, planeH, NULL, 1);

  // Combine all 3 thresholded color components, so that an output pixel will only
  // be white (255) if the H, S and V pixels were also white.

  // gray_out = (hue > 10) ^ (hue < 20) ^ (sat > 48) ^ (val > 80), where   ^ mean pixels-wise AND
  cvAnd(planeH  , planeS , gray_out);	
  //cvAnd(gray_out, planeH2, gray_out);	
  cvAnd(gray_out, planeV , gray_out);	

  return(skin_under_seed);
*/

  static IplImage* planeR  = cvCreateImage( cvGetSize(rgbin), 8, 1);	// R component.
  static IplImage* planeG  = cvCreateImage( cvGetSize(rgbin), 8, 1);	// G component.
  static IplImage* planeB  = cvCreateImage( cvGetSize(rgbin), 8, 1);	// B component.

  static IplImage* planeAll = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1);	// (R+G+B) component.
  static IplImage* planeR2  = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1);	// R component, 32bits
  static IplImage* planeRp  = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1);	// R' and >0.4
  static IplImage* planeGp  = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1);	// G' and > 0.28

  static IplImage* planeRp2 = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1);	// R' <0.6
  static IplImage* planeGp2 = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1);	// G' <0.4

  cvCvtPixToPlane(rgbin, planeR, planeG, planeB, 0);	// Extract the 3 color components.
  cvAdd( planeR, planeG,   planeAll, NULL);
  cvAdd( planeB, planeAll, planeAll, NULL);  // All = R + G + B
  cvDiv( planeR, planeAll, planeRp, 1.0);    // R' = R / ( R + G + B)
  cvDiv( planeG, planeAll, planeGp, 1.0);    // G' = G / ( R + G + B)

  cvConvertScale( planeR, planeR2, 1.0, 0.0);
  cvCopy(planeGp, planeGp2, NULL);
  cvCopy(planeRp, planeRp2, NULL);

  cvThreshold(planeR2 , planeR2,   60, UCHAR_MAX, CV_THRESH_BINARY);     //(R > 60)
  cvThreshold(planeRp , planeRp, 0.40, UCHAR_MAX, CV_THRESH_BINARY);     //(R'> 0.4)
  cvThreshold(planeRp2, planeRp2, 0.6, UCHAR_MAX, CV_THRESH_BINARY_INV); //(R'< 0.6)
  cvThreshold(planeGp , planeGp, 0.28, UCHAR_MAX, CV_THRESH_BINARY);     //(G'> 0.28)
  cvThreshold(planeGp2, planeGp2, 0.4, UCHAR_MAX, CV_THRESH_BINARY_INV); //(G'< 0.4)

  // R’ = R / (R+G+B), G’ = G / (R + G + B)
  //  Skin pixel if:
  // R > 60 AND R’ > 0.4 AND R’ < 0.6 AND G’ > 0.28 and G’ < 0.4  
  static IplImage* imageSkinPixels = cvCreateImage( cvGetSize(rgbin), IPL_DEPTH_32F, 1); 
  cvAnd( planeR2 ,         planeRp , imageSkinPixels);	
  cvAnd( planeRp , imageSkinPixels , imageSkinPixels);	
  cvAnd( planeRp2, imageSkinPixels , imageSkinPixels);	
  cvAnd( planeGp , imageSkinPixels , imageSkinPixels);	
  cvAnd( planeGp2, imageSkinPixels , imageSkinPixels);	
  
  cvConvertScale( imageSkinPixels, gray_out, 1.0, 0.0);
  return(0);
}