コード例 #1
0
ファイル: HandDetect.cpp プロジェクト: gunbaek/Hand2Mouse
void HandDetect::handDetecting()
{
	skinDetect();
	IplImage *tmp = cvCreateImage(cvGetSize(backproject), 8, 1);
	cvZero(tmp);

	if(track_comp.rect.height>0&&track_comp.rect.width>0)
	{
		cvCircle(tmp, handCen, track_box.size.width, CV_RGB(255, 255, 255), -1);
		cvDrawRect(tmp, cvPoint(track_window.x-(int)(track_box.size.width*0.2), track_window.y-(int)(track_box.size.height*0.2)), 
			cvPoint(track_window.x+(int)(track_box.size.width*1.2), track_window.y+track_box.size.height), CV_RGB(255, 255, 255), -1);
		
	}
	cvAnd(backproject, tmp, backproject, 0);
	cvDilate(backproject, backproject, 0, 1);
	cvErode(backproject, backproject, 0, 1);
	
	UsingYCbCr();
	cvAnd(gray, tmp, gray, 0);
	cvErode(gray, gray, 0, 1);
	cvDilate(gray, gray, 0, 1);
//	cvShowImage("52", gray);
	cvReleaseImage(&tmp);

	cvOr(gray, backproject, backproject, 0);
	
	handCen=cvPoint(track_box.center.x, track_box.center.y);
	
	setRad();
//	cvDrawRect(image, cvPoint(track_window.x, track_window.y), cvPoint(track_window.x+track_window.width, track_window.y+track_window.height), CV_RGB(255, 0, 0));
	cvCircle(image, handCen, 2, CV_RGB(255, 0, 0), 2);

}
コード例 #2
0
ファイル: Morphology1.cpp プロジェクト: CareShaw/OCR
//形态学约束击中-击不中变换 针对二值和灰度图像
void lhMorpHMTC(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL)
{
	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	if (sebg == NULL)
	{
		sebg = lhStructuringElementNot(sefg);

	}
	
	IplImage*  temp1 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp2 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp3 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp4 = cvCreateImage(cvGetSize(src), 8, 1);

	IplImage*  mask1 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask2 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask3 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask4 = cvCreateImage(cvGetSize(src), 8, 1);

	cvZero(mask1);
	cvZero(mask2);
	cvZero(mask3);
	cvZero(mask4);

	cvZero(dst);

	//P107 (5.5)
	cvErode( src, temp1, sebg);
	cvDilate( src, temp2, sebg);
	cvErode( src, temp3, sefg);
	cvDilate( src, temp4, sefg);

	cvCmp(src, temp3, mask1, CV_CMP_EQ);
	cvCmp(temp2, src,  mask2, CV_CMP_LT);
	cvAnd(mask1, mask2, mask2);

	cvCmp(src, temp4, mask3 , CV_CMP_EQ);
	cvCmp(temp1, src, mask4 , CV_CMP_GT);
	cvAnd(mask3, mask4, mask4);

	cvSub(src, temp2, dst, mask2);
	cvSub(temp1, src, dst, mask4);




	cvReleaseImage(&mask1);
	cvReleaseImage(&mask2);
	cvReleaseImage(&mask3);
	cvReleaseImage(&mask4);

	cvReleaseImage(&temp1);
	cvReleaseImage(&temp2);
	cvReleaseImage(&temp3);
	cvReleaseImage(&temp4);

	cvReleaseStructuringElement(&sebg);

}
コード例 #3
0
ファイル: textlocation.cpp プロジェクト: browny/invoiceq-core
void TextLocation::thresholdImg(const IplImage* grayImg, int dilateDeg) {

    cvSmooth(grayImg, m_smoothImg, CV_GAUSSIAN, ksmoothSize, ksmoothSize);

    getEdgeImg(m_smoothImg, m_simpleEdgeImg, dilateDeg);

    double globalTh = getThForGlobalThImg(m_smoothImg, khistoRatio);

    if (globalTh > 50) {

        getGlobalThImg(m_smoothImg, globalTh, m_globalThImg, dilateDeg);

        cvAnd(m_globalThImg, m_simpleEdgeImg, m_advancedEdgeImg);
        cvAnd(m_advancedEdgeImg, blackPixelImg, m_edgeAndBlackImg);

        thrImgMode = ADVANCED_EDGE; // globalAndEdge
#ifdef DEBUG
        cvShowImage("advancedEdgeImg", m_advancedEdgeImg);
#endif

    } else {

        cvAnd(m_simpleEdgeImg, blackPixelImg, m_edgeAndBlackImg);

        thrImgMode = SIMPLE_EDGE; // edgeImg

    }
#ifdef DEBUG
    cvShowImage("edgeAndBlackImg", m_edgeAndBlackImg);
#endif

}
コード例 #4
0
IplImage* crop(IplImage* src, CvPoint c, int r){
	IplImage* res, * roi;
 
    //src = cvLoadImage("x.jpg", 1);
    res = cvCreateImage(cvGetSize(src), 8, 3);
    roi = cvCreateImage(cvGetSize(src), 8, 1);
 
    /* prepare the 'ROI' image */
    cvZero(roi);
 
    /* Note that you can use any shape for the ROI */
    cvCircle(
        roi,
        c,
        r,
        CV_RGB(255, 255, 255),
        -1, 8, 0
    );
 
    /* extract subimage */
    cvAnd(src, src, res, roi);
 
 
    /* 'restore' subimage */
    IplImage* roi_C3 = cvCreateImage(cvGetSize(src), 8, 3);
    cvMerge(roi, roi, roi, NULL, roi_C3);
    cvAnd(res, roi_C3, res, NULL);
 
    return res;
 
   
}
コード例 #5
0
gint gstskin_find_skin_center_of_mass(struct _GstSkin *skin, gint display)
{
  int skin_under_seed = 0;

  IplImage* imageRGB = cvCreateImageHeader( cvSize(skin->width, skin->height), IPL_DEPTH_8U, 3);
  imageRGB->imageData = skin->cvRGB->imageData;

  IplImage* imageHSV = cvCreateImage( cvSize(skin->width, skin->height), IPL_DEPTH_8U, 3);
  cvCvtColor(imageRGB, imageHSV, CV_RGB2HSV);

  IplImage* planeH = cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Hue component.
  IplImage* planeH2= cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Hue component, 2nd threshold
  IplImage* planeS = cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Saturation component.
  IplImage* planeV = cvCreateImage( cvGetSize(imageHSV), 8, 1);	// Brightness component.
  cvCvtPixToPlane(imageHSV, planeH, planeS, planeV, 0);	// Extract the 3 color components.

  // Detect which pixels in each of the H, S and V channels are probably skin pixels.
  // Assume that skin has a Hue between 0 to 18 (out of 180), and Saturation above 50, and Brightness above 80.
  cvThreshold(planeH , planeH2, 10, UCHAR_MAX, CV_THRESH_BINARY);         //(hue > 10)
  cvThreshold(planeH , planeH , 20, UCHAR_MAX, CV_THRESH_BINARY_INV);     //(hue < 20)
  cvThreshold(planeS , planeS , 48, UCHAR_MAX, CV_THRESH_BINARY);         //(sat > 48)
  cvThreshold(planeV , planeV , 80, UCHAR_MAX, CV_THRESH_BINARY);         //(val > 80)

  // erode the HUE to get rid of noise.
  cvErode(planeH, planeH, NULL, 1);

  // Combine all 3 thresholded color components, so that an output pixel will only
  // be white (255) if the H, S and V pixels were also white.
  IplImage* imageSkinPixels = cvCreateImage( cvGetSize(imageHSV), 8, 1);        // Greyscale output image.
  // imageSkin = (hue > 10) ^ (hue < 20) ^ (sat > 48) ^ (val > 80), where   ^ mean pixels-wise AND
  cvAnd(planeH         , planeS , imageSkinPixels);	
  cvAnd(imageSkinPixels, planeH2, imageSkinPixels);	
  cvAnd(imageSkinPixels, planeV , imageSkinPixels);	

  if(display){
    if( skin->showH )
      cvCvtColor(planeH, imageRGB, CV_GRAY2RGB);
    else if( skin->showS )
      cvCvtColor(planeS, imageRGB, CV_GRAY2RGB);
    else if( skin->showV )
      cvCvtColor(planeV, imageRGB, CV_GRAY2RGB);
    else
      cvCvtColor(imageSkinPixels, imageRGB, CV_GRAY2RGB);
  }


  cvReleaseImage( &imageSkinPixels );
  cvReleaseImage( &planeH );
  cvReleaseImage( &planeH2);
  cvReleaseImage( &planeS );
  cvReleaseImage( &planeV );
  cvReleaseImage( &imageHSV );
  cvReleaseImage( &imageRGB );

  return(skin_under_seed);
}
コード例 #6
0
int filterByHSV(IplImage *src, CvScalar minHSV, CvScalar maxHSV, IplImage *dst) {
	IplImage *tmp3d = cvCloneImage(src);
	cvSmooth(tmp3d, tmp3d, CV_GAUSSIAN, 13, 0, 0, 0);

	cvCvtColor(tmp3d, tmp3d, CV_BGR2HSV);
	IplImage *tmp1dH_mask = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	IplImage *tmp1dS_mask = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	IplImage *tmp1dV_mask = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	cvSplit(tmp3d, tmp1dH_mask, tmp1dS_mask, tmp1dV_mask, NULL);

	//printf("\rmin: %03d,%03d,%03d", (int)minHSV.val[0], (int)minHSV.val[1], (int)minHSV.val[2]);
	//printf("\tmax: %03d,%03d,%03d", (int)maxHSV.val[0], (int)maxHSV.val[1], (int)maxHSV.val[2]);

	if (minHSV.val[0] < maxHSV.val[0]) {
		cvInRangeS(tmp1dH_mask, cvScalar(minHSV.val[0], 0, 0), cvScalar(maxHSV.val[0], 0, 0), tmp1dH_mask);
	} else {
		IplImage *tmp1d = cvCloneImage(tmp1dH_mask);
		cvInRangeS(tmp1dH_mask, cvScalar(0, 0, 0), cvScalar(maxHSV.val[0], 0, 0), tmp1d);
		cvInRangeS(tmp1dH_mask, cvScalar(minHSV.val[0], 0, 0), cvScalar(255, 0, 0), tmp1dH_mask);
		cvOr(tmp1d, tmp1dH_mask, tmp1dH_mask, NULL);
		cvReleaseImage(&tmp1d);
	}

	cvInRangeS(tmp1dS_mask, cvScalar(minHSV.val[1], 0, 0), cvScalar(maxHSV.val[1], 0, 0), tmp1dS_mask);
	cvInRangeS(tmp1dV_mask, cvScalar(minHSV.val[2], 0, 0), cvScalar(maxHSV.val[2], 0, 0), tmp1dV_mask);

	IplImage *tmp1d_mask = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	cvSet(tmp1d_mask, cvScalarAll(255), NULL);
	cvAnd(tmp1d_mask, tmp1dH_mask, tmp1d_mask, NULL);
	cvAnd(tmp1d_mask, tmp1dS_mask, tmp1d_mask, NULL);
	cvAnd(tmp1d_mask, tmp1dV_mask, tmp1d_mask, NULL);

	cvReleaseImage(&tmp1dH_mask);
	cvReleaseImage(&tmp1dS_mask);
	cvReleaseImage(&tmp1dV_mask);

	cvClose(tmp1d_mask, tmp1d_mask, NULL, 2);

#define CONTROLS_WIDTHA  640/2
#define CONTROLS_HEIGHTA 480/2
#if 1
	cvNamedWindow(CONTROL_WINDOW  "4", 0);
	cvResizeWindow(CONTROL_WINDOW "4", CONTROLS_WIDTHA, CONTROLS_HEIGHTA);
	cvShowImage(CONTROL_WINDOW    "4", tmp1d_mask);
#endif

	cvCopy2(src, dst, tmp1d_mask);

	cvReleaseImage(&tmp1d_mask);

	return 0;
}
コード例 #7
0
ファイル: iplimage.cpp プロジェクト: thenoseman/ruby-opencv
IplImage*
create_frequency_filtered_image(const IplImage *pImage, int low, int high)
{

  CvPoint2D32f  center;
  center.x = pImage->width / 2;
  center.y = pImage->height / 2;
  CvBox2D box;
  box.center = center;

  box.size.width = high;
  box.size.height = high;

  IplImage *pFilterMask = rb_cvCreateImage( cvGetSize(pImage), IPL_DEPTH_64F, 1 );
  IplImage *pFiltered = rb_cvCreateImage( cvGetSize(pImage), IPL_DEPTH_64F, 1 );

  cvZero(pFilterMask);
  cvZero(pFiltered);

  if(high > 0)
    cvEllipseBox(pFilterMask, box, cvScalar(255, 255, 255, 255), CV_FILLED, 8, 0);

  box.size.width = low;
  box.size.height = low;
  if(low > 0)
    cvEllipseBox(pFilterMask, box, cvScalar(0, 0, 0, 0), CV_FILLED, 8, 0);

  cvAnd(pImage, pFilterMask, pFiltered, NULL);

  cvReleaseImage(&pFilterMask);

  return pFiltered;
}
コード例 #8
0
ファイル: testApp.cpp プロジェクト: investic/mano_oscg
//--------------------------------------------------------------
void testApp::update()
{
	ofBackground(100, 100, 100);
	kinect.update();

	grayImage.setFromPixels(kinect.getDepthPixels(), kinect.width, kinect.height);
			
	if( bThreshWithOpenCV ){
		
		grayThreshFar = grayImage;
		grayThresh = grayImage;
		grayThreshFar.threshold(farThreshold, true);
		grayThresh.threshold(nearThreshold);
		cvAnd(grayThresh.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);
		 
	}

	// if one blob found, find nearest point in blob area		
	//update the cv image
	grayImage.flagImageChanged();
    // find contours which are between the size of 20 pixels and 1/3 the w*h pixels.
    // also, find holes is set to true so we will get interior contours as well....
    contourFinder.findContours(grayImage, 10, (kinect.width*kinect.height)/2, 200, false);
	ofSoundUpdate();
	
	
}
コード例 #9
0
CvBox2D CamShiftPatch::getTrackBox(CvScalar maskRange, CvHistogram *hist)
{
	IplImage* backproject = cvCreateImage(cvGetSize(originImage), 8, 1);//反投影空間,單通道
	IplImage* hue = 0;
	hue = cvCreateImage(cvGetSize(originImage), 8, 1);
	IplImage *mask = getInRangeMask(maskRange, hue);

	cvCalcBackProject(&hue, backproject, hist); //使用back project方法 ,計算hue的反投影圖
	cvAnd(backproject, mask, backproject, 0);   // 將backproject 與mask 做AND 運算 再放到backproject 

	CvConnectedComp track_comp;
	CvBox2D track_box; // tracking返回的區域box,帶角度 

	CvRect zero;
	zero.x = 0; zero.y = 0; zero.width = 320; zero.height = 240;
	track_window = zero;

	for (int i = 0; i < 10; i++)
	{
		cvCamShift(
			backproject,    //色彩概率分佈圖像
			track_window,   //Search Window的初始值
			cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1),//用來判斷搜尋是否停止的一個標準
			&track_comp,    //保存運算結果,包括新的Search Window的位置和面積
			&track_box     //包含被跟蹤物體的最小矩形
			);
		track_window = track_comp.rect;
	}

	cvReleaseImage(&backproject);
	cvReleaseImage(&hue);
	cvReleaseImage(&mask);

	return track_box;
}
コード例 #10
0
void ofApp::update() {
    kinect.update();
    
    if(kinect.isNewFrame()) {
        depthPixels = kinect.getDepthRawPixels();

        grayImage.setFromPixels(kinect.getDepthRawPixels());
        grayThreshNear = grayImage;
        grayThreshFar = grayImage;
        grayThreshNear.threshold(nearThreshold, true);
        grayThreshFar.threshold(farThreshold);
        cvAnd(grayThreshNear.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);
        grayImage.flagImageChanged();
        
        // set contour tracker parameters
        contourFinder.setMinArea(minArea);
        contourFinder.setMaxArea(maxArea);
        contourFinder.setThreshold(threshold);
        contourFinder.getTracker().setPersistence(persistence);
        contourFinder.getTracker().setMaximumDistance(maxDistance);
        
        // determine found contours
        contourFinder.findContours(grayImage);
    }
}
コード例 #11
0
ファイル: webcam.cpp プロジェクト: shaheeqa/plexydesk
void WebCamData::trackFace() {
  CvConnectedComp comps;
  updateHugeImage(d->data);

  cvCalcBackProject(&d->hueImage, d->prob, d->histogram);
  cvAnd(d->prob, d->mask, d->prob, 0);
  CvBox2D box;
  cvCamShift(d->prob, d->faceRect,
             cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1), &comps,
             &box);

  d->faceRect = comps.rect;

  int radius = cvRound((d->faceRect.width + d->faceRect.height) * 0.25);
  CvPoint center;
  center.x = cvRound(d->faceRect.x + d->faceRect.width * 0.5);
  center.y = cvRound(d->faceRect.y + d->faceRect.height * 0.5);
  /*
      qDebug() << Q_FUNC_INFO
      << comps.rect.x
      << comps.rect.y
      << comps.rect.width
      << comps.rect.height
      << box.angle
      << center.x
      << center.y
      << radius;
   */
  d->dataMap.clear();
  d->dataMap["z"] = QVariant(radius);
  d->dataMap["x"] = QVariant(center.x);
  d->dataMap["y"] = QVariant(center.y);
  d->dataMap["angle"] = QVariant(box.angle);
  Q_EMIT dataReady();
}
bool AdaptiveHistogramCamshift::ComputeCamshift(const IplImage* hue, const IplImage* mask)
{
  // Compute backproject
  cvCalcBackProject(&hue, m_imgBackproject, m_hist);
  cvAnd(m_imgBackproject, mask, m_imgBackproject, 0);

  // Init velocity
  m_trackPosTwoFramesBack = cvPoint(static_cast<int>(m_trackBox.center.x),
                                    static_cast<int>(m_trackBox.center.y));
  m_trackAreaTwoFramesBack = m_trackBox.size.width * m_trackBox.size.height;

  // DEBUG track window area
  //printf("track wnd area: %f\n", m_trackBox.size.width * m_trackBox.size.height);

  // Compute camshift this frame
  CvConnectedComp trackComp;
  assert((m_trackWindow.height > 0) && (m_trackWindow.width > 0));
  CvBox2D trackBox;
  const int camShiftRes = cvCamShift(m_imgBackproject,
                                     m_trackWindow,
                                     cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1),
                                     &trackComp,
                                     &trackBox);
  if (camShiftRes >= 0)
  {
    m_trackBox = trackBox;
    m_trackCompRect = trackComp.rect;
    return true;
  }
  else
  {
    return false;
  }
}
コード例 #13
0
ファイル: CMP_CvAnd.c プロジェクト: gmaclair/blendocv
static void node_composit_exec_cvAnd(void *data, bNode *node, bNodeStack **in, bNodeStack **out)
{
	CvArr* dst;
	CvArr* src1;
	CvArr* src2;
	CvArr* mask=NULL;
        CompBuf* dst_buf;

	if(out[0]->hasoutput==0) return;
	if((in[0]->data)){
                //Inputs
		src1 = BOCV_IplImage_attach(in[0]->data);
                mask = BOCV_Mask_attach(in[2]->data);
                
                //Output
                dst_buf=dupalloc_compbuf(in[0]->data);
                dst=BOCV_IplImage_attach(dst_buf);

                //Check Image - Mask sizes
                if(mask){
                    if (!BOCV_checkMask(src1, mask)){
                        node->error= 1;
                        return;
                    }
                }
                
                if(in[1]->data){
                    src2 = BOCV_IplImage_attach(in[1]->data);
                    //Checks
                    //Check Image Sizes
                    if(!BOCV_checkAreSameType(src1, src2)){
                        node->error= 1;
                        return;
                    }
                    //Check Image number Channels
                    if(!BOCV_checkSameNChannels(src1, src2)){
                        node->error= 1;
                        return;
                    }
                    cvAnd(src1, src2, dst, mask);
                    BOCV_IplImage_detach(src2);
                }else{
                    CvScalar s;
                    s.val[0]= (in[1]->vec[0]);
                    s.val[1]= (in[1]->vec[1]);
                    s.val[2]= (in[1]->vec[2]);
                    s.val[3]= 0;
                    cvAndS(src1, s, dst, mask);
                }
                
                out[0]->data= dst_buf;
		
                BOCV_IplImage_detach(src1);
                
                BOCV_IplImage_detach(mask);
                BOCV_IplImage_detach(dst);

	}
	
}
コード例 #14
0
ファイル: ofxCvImage.cpp プロジェクト: 4ker/openFrameworks
//--------------------------------------------------------------------------------
void ofxCvImage::operator &= ( ofxCvImage& mom ) {
	if( !mom.bAllocated ){
		ofLogError("ofxCvImage") << "operator&=: source image not allocated";	
		return;	
	}
	if( !bAllocated ){
		ofLogNotice("ofxCvImage") << "operator&=: allocating to match dimensions: "
			<< mom.getWidth() << " " << mom.getHeight();
		allocate(mom.getWidth(), mom.getHeight());
	}

	if( mom.getCvImage()->nChannels == cvImage->nChannels &&
        mom.getCvImage()->depth == cvImage->depth )
    {
        if( matchingROI(getROI(), mom.getROI()) ) {
            cvAnd( cvImage, mom.getCvImage(), cvImageTemp );
            swapTemp();
            flagImageChanged();
        } else {
            ofLogError("ofxCvImage") << "operator&=: region of interest mismatch";
        }
	} else {
        ofLogError("ofxCvImage") << "operator&=: images need to have matching type";
	}
}
コード例 #15
0
ファイル: Morphology1.cpp プロジェクト: CareShaw/OCR
//形态学二值击中-击不中变换
void lhMorpHMTB(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL)
{
	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	if (sebg == NULL)
	{
		sebg = lhStructuringElementNot(sefg);

	}
	IplImage*  temp1 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp2 = cvCreateImage(cvGetSize(src), 8, 1);

	//P104 (5.2)
	cvErode( src, temp1, sefg);
	cvNot(src, temp2);
	cvErode( temp2, temp2, sebg);
	cvAnd(temp1, temp2, dst);


	cvReleaseImage(&temp1);
	cvReleaseImage(&temp2);

	cvReleaseStructuringElement(&sebg);

}
コード例 #16
0
ファイル: testApp.cpp プロジェクト: MrMDeluxe/generative
bool testApp::readKinect()
{
	kinect.update();
	
	// there is a new frame and we are connected
	if(kinect.isFrameNew()) {
        updateColors();

        grayImagePrev = grayImage;
        
		// load grayscale depth image from the kinect source
		grayImage.setFromPixels(kinect.getDepthPixels(), inputWidth, inputHeight);
        if (mirrorInput)
            grayImage.mirror(false, true);
        
        grayThreshNear = grayImage;
        grayThreshFar = grayImage;
        grayThreshNear.threshold(nearThreshold, true);
        grayThreshFar.threshold(farThreshold);
        cvAnd(grayThreshNear.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);
        
		grayImage.flagImageChanged();
        
		return true;
	}
    return false;
}
コード例 #17
0
CamShift::Box CamShift::Track(const ImgBgr& img)
{
	cvCopy(ImgIplImage(img), image, 0 );
  cvCvtColor( image, hsv, CV_BGR2HSV );
  cvFlip(hsv,hsv,0);
  int _vmin = vmin, _vmax = vmax;
  
  cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),
  cvScalar(180,256,MAX(_vmin,_vmax),0), mask );
  cvSplit( hsv, hue, 0, 0, 0 );
  cvCalcBackProject( &hue, backproject, hist );
  //cvSaveImage("backproject.bmp", backproject);
  cvAnd( backproject, mask, backproject, 0 );
  //cvSaveImage("backproject.bmp", backproject);
  cvCamShift( backproject, track_window,
    cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
    &track_comp, &track_box );
  track_window = track_comp.rect;

  Box result;
  result.angle= track_box.angle;
  result.center.x= static_cast<LONG>( track_box.center.x );
  result.center.y= static_cast<LONG>( img.Height()-track_box.center.y-1 );
  result.size.cy = static_cast<LONG>( track_box.size.width );
  result.size.cx = static_cast<LONG>( track_box.size.height );
  return result;
}
コード例 #18
0
void THISCLASS::OnStep() {
	if (! mCore->mDataStructureImageGray.mImage) {
		AddError(wxT("No input image."));
		return;
	}

	// Mask the image
	if (mMaskImage) {
		if ((mCore->mDataStructureImageGray.mImage->width != mMaskImage->width) || (mCore->mDataStructureImageGray.mImage->height != mMaskImage->height)) {
			AddError(wxT("Wrong mask size."));
			return;
		}

		if ((mMode == cMode_WhiteWhite) || (mMode == cMode_BlackWhite)) {
			cvOr(mCore->mDataStructureImageGray.mImage, mMaskImage, mCore->mDataStructureImageGray.mImage);
		} else {
			cvAnd(mCore->mDataStructureImageGray.mImage, mMaskImage, mCore->mDataStructureImageGray.mImage);
		}
	}

	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(mCore->mDataStructureImageGray.mImage);
	}
}
コード例 #19
0
ファイル: tracker.cpp プロジェクト: amnosuperman/Sign2Text
CvRect combi_track(IplImage * pImg,KalmanFilter &kfilter)
{
	CvRect predrect=kfilter.predictionReport(prevHandRect);	
	//if((predrect.x<0)||(predrect.y<0)||((predrect.x+predrect.width)>pImg->width)||((predrect.y+predrect.height)>pImg->height))
	//	return NULL;	
	CvConnectedComp components;

	// Create a new hue image
	updateHueImage(pImg);

	// Create a probability image based on the hand histogram
	cvCalcBackProject( &pHueImg, pProbImg, pHist );
    cvAnd( pProbImg, pMask, pProbImg, 0 );
	//cvSetImageROI(pProbImg,predrect);
	
	// Use CamShift to find the center of the new hand probability
    if(!((predrect.x<0)||(predrect.y<0)||((predrect.x+predrect.width)>pImg->width)||((predrect.y+predrect.height)>pImg->height))) {
        cvCamShift( pProbImg, predrect, cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),&components, handBox );
	// Update hand location and angle
    prevHandRect = components.rect;

    }
    else
        //cvCamShift( pProbImg, prevHandRect, cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),&components, handBox );
		prevHandRect.x=-1;
	
    //if(!pImg->origin)	
	//	handBox->angle = -handBox->angle;
	//cvResetImageROI(pProbImg);
	
	return prevHandRect;

}
コード例 #20
0
void cvCopy2(CvArr *src, CvArr *dst, CvArr *mask) {
	IplImage *tmp1dB = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	IplImage *tmp1dG = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	IplImage *tmp1dR = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);

	cvSplit(src, tmp1dB, tmp1dG, tmp1dR, NULL);

	cvAnd(tmp1dB, mask, tmp1dB);
	cvAnd(tmp1dG, mask, tmp1dG);
	cvAnd(tmp1dR, mask, tmp1dR);

	cvMerge(tmp1dB, tmp1dG, tmp1dR, NULL, dst);

	cvReleaseImage(&tmp1dB);
	cvReleaseImage(&tmp1dG);
	cvReleaseImage(&tmp1dR);
}
コード例 #21
0
//--------------------------------------------------------------
void ofApp::update() {
    
    ofBackground(100, 100, 100);
    
    kinect.update();
    
    // there is a new frame and we are connected
    if(kinect.isFrameNew()) {
        
        // load grayscale depth image from the kinect source
        grayImage.setFromPixels(kinect.getDepthPixels(), kinect.width, kinect.height);
        
        // we do two thresholds - one for the far plane and one for the near plane
        // we then do a cvAnd to get the pixels which are a union of the two thresholds
        if(bThreshWithOpenCV) {
            grayThreshNear = grayImage;
            grayThreshFar = grayImage;
            grayThreshNear.threshold(nearThreshold, true);
            grayThreshFar.threshold(farThreshold);
            cvAnd(grayThreshNear.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);
        } else {
            
            // or we do it ourselves - show people how they can work with the pixels
            unsigned char * pix = grayImage.getPixels();
            
            int numPixels = grayImage.getWidth() * grayImage.getHeight();
            for(int i = 0; i < numPixels; i++) {
                if(pix[i] < nearThreshold && pix[i] > farThreshold) {
                    pix[i] = 255;
                } else {
                    pix[i] = 0;
                }
            }
        }
        
        // update the cv images
        grayImage.flagImageChanged();
        
        // find contours which are between the size of 20 pixels and 1/3 the w*h pixels.
        // also, find holes is set to true so we will get interior contours as well....
        contourFinder.findContours(grayImage, 10, (kinect.width*kinect.height)/2, 20,false);
        
    }
    
    //ribbon brush
    if(marking != NULL)
    {
        marking->update();
    }
    
    for(int i = 0; i < markings.size(); i++)
    {
        markings[i]->update();
    }
    
    
}
コード例 #22
0
ファイル: ImageProcessorCV.cpp プロジェクト: junaidnaseer/ivt
void ImageProcessorCV::CalculateGradientImageHSV(CByteImage *pInputImage, CByteImage *pOutputImage)
{
	if (pInputImage->width != pOutputImage->width || pInputImage->height != pOutputImage->height ||
		pInputImage->type != CByteImage::eRGB24 || pOutputImage->type != CByteImage::eGrayScale)
		return;

	IplImage *pIplInputImage = IplImageAdaptor::Adapt(pInputImage);
	IplImage *pIplOutputImage = IplImageAdaptor::Adapt(pOutputImage);

	// Determine Gradient Image by Irina Wchter
	// instead of normal norm sqrt(x*x +y*y) use |x|+|y| because it is much faster
	IplImage *singleChannel0 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel1 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel2 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *diff = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
	IplImage *abs = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_8U, 1);
		
	cvCvtPixToPlane(pIplInputImage, singleChannel0, singleChannel1, singleChannel2, NULL);
	
	// calculate gradients on S-channel
	//cvSmooth(singleChannel1, singleChannel1, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel1, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel1, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, pIplOutputImage);
	cvAdd(abs, pIplOutputImage, pIplOutputImage, 0);
	
	// threshold S-channel for creating a maskfor gradients of H-channel
	cvThreshold(singleChannel1, singleChannel1, 60, 255, CV_THRESH_BINARY);
	cvDilate(singleChannel1, singleChannel1);
	
	// calculate gradients on H-channel
	//cvSmooth(singleChannel0, singleChannel0, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel0, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel0, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, singleChannel0);
	cvAdd(abs, singleChannel0, singleChannel0, 0);
	
	// filter gradients of H-channel with mask
	cvAnd(singleChannel0, singleChannel1, singleChannel0);
	
	// combine to gradient images
	cvMax(pIplOutputImage, singleChannel0, pIplOutputImage);
	
	// free memory
	cvReleaseImage(&singleChannel0);
	cvReleaseImage(&singleChannel1);
	cvReleaseImage(&singleChannel2);
	cvReleaseImage(&diff);
	cvReleaseImage(&abs);
	
	cvReleaseImageHeader(&pIplInputImage);
	cvReleaseImageHeader(&pIplOutputImage);
}
コード例 #23
0
//--------------------------------------------------------------
void ofApp::update(){
    //--------------- kinect ------------
    ofBackground(255, 255, 255);
    
    kinect.update();
    
    // there is a new frame and we are connected
    if(kinect.isFrameNew()) {
        
        // load grayscale depth image from the kinect source
        grayImage.setFromPixels(kinect.getDepthPixels());
        
        // we do two thresholds - one for the far plane and one for the near plane
        // we then do a cvAnd to get the pixels which are a union of the two thresholds
        if(bThreshWithOpenCV) {
            grayThreshNear = grayImage;
            grayThreshFar = grayImage;
            grayThreshNear.threshold(nearThreshold, true);
            grayThreshFar.threshold(farThreshold);
            cvAnd(grayThreshNear.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);
        }
        
        // update the cv images
        grayImage.flagImageChanged();
        
        // find contours which are between the size of 20 pixels and 1/3 the w*h pixels.
        // also, find holes is set to true so we will get interior contours as well....
        //contourFinder.findContours(grayImage, 5000, (kinect.width*kinect.height)/3, 20,false);
        contourFinder.findContours(grayImage, 1000, (kinect.width*kinect.height)/3, 20,false);

    }
    //---------------------------
    
    //check when new blobs appear, check when blobs disappear
    //create strokes for every new blob

    ////////////////////////// check if the blob is the same blob /////////////////////////////
//    for(int j=0; contourFinder.blobs.size(); j++){
//        for(int i=0; i< strokes.size(); i++){
//            int lastPoint = strokes[i].lines.size();
//                ofDist(contourFinder.blobs[j].centroid.x, contourFinder.blobs[j].centroid.y, strokes[i].lines[lastPoint].posEnd.x, strokes[i].lines[lastPoint].posEnd.y );//check dist tween each blob and the points on each line of the stroke
//                //take the blob with the
//            
//        }
//    }
    
    
    


    
}
コード例 #24
0
ファイル: intersect.c プロジェクト: freeman94/vision
/**
 * \brief	Takes frame and applies image processing techniques to filter out non-laser line points. Updates images used for runtime display.
 */
int filterFrame() {
	args[0] = frame;
	cvCvtColor(frame, frameHSV, CV_BGR2HSV);	//convert RGB values of frame to HSV and place in frameHSV
	cvSplit(frameHSV, hue, saturation, value, NULL);	//split frameHSV into constituent components and place appropriately; we are done with frameHSV
	args[1] = hue;
	args[2] = value;
	cvCopy(saturation, saturation2);	//make an additional copy of saturation for display
	//args[8] = saturation2;
	//cvShowImage("saturation", saturation2);
	cvSmooth(frame, frameHSV, CV_BLUR, 20, 20 );   //smooth frame and store in frameHSV
	//cvShowImage("Smoothed frame", frameHSV);
	cvSplit(frame, blue, green, red, NULL);	//split frame into its RGB components
	cvSplit(frameHSV, blue2, green2, red2, NULL);	//split the smoothed version into its RGB components
	cvMin(blue, green, min_bg);	//take the min of blue and green and store in min_bg
	args[3] = min_bg;
	//cvShowImage("minimum of blue and green", min_bg);
	cvSub(red, min_bg, red_last);	//take red less the min of the blue and green
	//cvShowImage("red_last = red - min_bg", red_last);
	cvThreshold(red_last, red_last, thresholdValue, 255, CV_THRESH_BINARY_INV);	//threshold the red_last
	//cvShowImage("threshold of red_last", red_last);
	args[4] = red_last;
	cvSub(red, red2, deltaRed);
	//cvShowImage("deltaRed = Original red - smooth red", deltaRed);
	cvThreshold(deltaRed, deltaRed, thresholdValue, 255, CV_THRESH_BINARY);
	//cvShowImage("threshold(deltaRed)", deltaRed);
	cvCopy(deltaRed, alpha);
	cvInRangeS(saturation, cvScalar(0), cvScalar(25), saturation);
	//cvShowImage("Low saturation in original frame", saturation);
	cvInRangeS(hue, cvScalar(49), cvScalar(125), beta);
	//cvShowImage("Mixed hue in original frame", beta);
	cvOr(beta, saturation, beta);
	//cvShowImage("beta = Low saturation OR mixed hue", beta);
	cvOr(beta, red_last, beta);
	//cvShowImage("beta = beta OR red_last", beta);
	//args[5] = alpha;
	args[5] = beta;

	IplConvKernel*mask= cvCreateStructuringElementEx(5, 5, 2, 2, 2, NULL );

	cvDilate(saturation2,dialated, mask, 20);
	//cvShowImage("dilate original saturation", dialated);
	args[6] = dialated;
	cvThreshold(dialated, dialated, 100, 255, CV_THRESH_BINARY);
	cvErode(dialated,eroded, mask, 30);

	args[7] = eroded;
	cvSub(alpha, beta, orig_filter);
	args[8] = orig_filter;
	cvAnd(orig_filter, eroded, zeta);
	args[9] = zeta;
	return 0;
}
コード例 #25
0
//--------------------------------------------------------------
void KinectView::update(){
    
    
    kinect.update();
    
    // there is a new frame and we are connected
    if(kinect.isFrameNew()) {
        
        contrastKinect.setFromPixels(kinect.getPixels());

//        contrastKinect.brightnessContrast(ofMap(mouseX, 0, ofGetWidth(), 0, 1), ofMap(mouseY, 0, ofGetHeight(), 0, 1));
        
        // load grayscale depth image from the kinect source
        grayImage.setFromPixels(kinect.getDepthPixels());
        
        // we do two thresholds - one for the far plane and one for the near plane
        // we then do a cvAnd to get the pixels which are a union of the two thresholds
        if(bThreshWithOpenCV) {
            grayThreshNear = grayImage;
            grayThreshFar = grayImage;
            grayThreshNear.threshold(nearThreshold, true);
            grayThreshFar.threshold(farThreshold);
            cvAnd(grayThreshNear.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);
        } else {
            
            // or we do it ourselves - show people how they can work with the pixels
            ofPixels & pix = grayImage.getPixels();
            int numPixels = pix.size();
            for(int i = 0; i < numPixels; i++) {
                if(pix[i] < nearThreshold && pix[i] > farThreshold) {
                    pix[i] = 255;
                } else {
                    pix[i] = 0;
                }
            }
        }
        
        // update the cv images
        grayImage.flagImageChanged();
        
        // find contours which are between the size of 20 pixels and 1/3 the w*h pixels.
        // also, find holes is set to true so we will get interior contours as well....
        contourFinder.findContours(grayImage, 10, (kinect.width*kinect.height)/2, 20, false);
    }
    
#ifdef USE_TWO_KINECTS
    kinect2.update();
#endif

    
    
}
コード例 #26
0
ファイル: ofApp.cpp プロジェクト: chewyishere/DanceFlows
//--------------------------------------------------------------
void testApp::update(){
    
    ofBackground(100, 100, 100);
    
    // ************* Kinect Stuff *************
    
    kinect.update();
	
	if(kinect.isFrameNew()) {
		
		grayImage.setFromPixels(kinect.getDepthPixels(), kinect.width, kinect.height);
		
		if(bThreshWithOpenCV) {
			grayThreshNear = grayImage;
			grayThreshFar = grayImage;
			grayThreshNear.threshold(nearThreshold, true);
			grayThreshFar.threshold(farThreshold);
			cvAnd(grayThreshNear.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);
		} else {
			
            unsigned char * pix = grayImage.getPixels();
			
			int numPixels = grayImage.getWidth() * grayImage.getHeight();
			for(int i = 0; i < numPixels; i++) {
				if(pix[i] < nearThreshold && pix[i] > farThreshold) {
					pix[i] = 255;
				} else {
					pix[i] = 0;
				}
			}
		}
		
		// update the cv images
		grayImage.flagImageChanged();
		
		// find contours which are between the size of 20 pixels and 1/3 the w*h pixels.
		// also, find holes is set to true so we will get interior contours as well....
		contourFinder.findContours(grayImage, 10, (kinect.width*kinect.height)/3, 20, false);
	}
    
	deltaTime = ofGetElapsedTimef() - lastTime;
	lastTime = ofGetElapsedTimef();
    
    
    if( numberBlend == 1)blendMode = OF_BLENDMODE_ADD;
    else if( numberBlend == 2)blendMode = OF_BLENDMODE_SCREEN;
    else if( numberBlend == 3)blendMode = OF_BLENDMODE_SUBTRACT;
    else if( numberBlend == 4)blendMode = OF_BLENDMODE_MULTIPLY;
    else if( numberBlend == 5)blendMode = OF_BLENDMODE_DISABLED;
    
}
コード例 #27
0
ファイル: HandDetect.cpp プロジェクト: gunbaek/Hand2Mouse
void HandDetect::skinDetect()
{
	setImage();
	cvFlip(image, image, 1);

	hsv = cvCreateImage(cvGetSize(image), 8, 3);
	msk = cvCreateImage(cvGetSize(image), 8, 1);
	hue = cvCreateImage(cvGetSize(image), 8, 1);
	
	backproject1 = cvCreateImage(cvGetSize(image), 8, 1);
	backproject2 = cvCreateImage(cvGetSize(image), 8, 1);

	cvCvtColor(image, hsv, CV_RGB2HSV);
	cvInRangeS(hsv, cvScalar(0, smin, MIN(vmin, vmax), 0), cvScalar(180, 256, MAX(vmin, vmax), 0), msk);
	cvSplit(hsv, hue, 0, 0, 0);
		
	cvCalcBackProject(&hue, backproject1, hist1);
	cvCalcBackProject(&hue, backproject2, hist2);
		
	cvThreshold(backproject1, backproject1, 50, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
	cvThreshold(backproject2, backproject2, 50, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);

	cvOr(backproject1, backproject2, backproject, 0);

	cvErode(backproject, backproject, 0, 1);
	cvDilate(backproject, backproject, 0, 1);
	cvAnd(backproject, msk, backproject, 0);


	if(track_box.center.x!=-1&&track_box.center.y!=-1)
		preCen=cvPoint(handCen.x, handCen.y);
	else
		preCen=cvPoint(0,0);

	cvCamShift(backproject, track_window, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1), &track_comp, &track_box);
	if(track_comp.rect.height>0&&track_comp.rect.width>0)
		track_window = track_comp.rect;
	else
	{
		track_box.center.x=-1;
		track_box.center.y=-1;
	}
	

	cvReleaseImage(&hsv);
	cvReleaseImage(&msk);
	cvReleaseImage(&hue);
	cvReleaseImage(&backproject1);
	cvReleaseImage(&backproject2);
}
コード例 #28
0
//--------------------------------------------------------------
void testApp::update() {
	
	ofBackground(100, 100, 100);
	
    ofSetWindowTitle( "Kinect Workshop Start - FPS:"+ ofToString( ofGetElapsedTimef() ) ) ; 
	kinect.update();
	
	// there is a new frame and we are connected
	if(kinect.isFrameNew()) {
		
		// load grayscale depth image from the kinect source
		grayImage.setFromPixels(kinect.getDepthPixels(), kinect.width, kinect.height);
		
		// we do two thresholds - one for the far plane and one for the near plane
		// we then do a cvAnd to get the pixels which are a union of the two thresholds
		if(bThreshWithOpenCV) {
			grayThreshNear = grayImage;
			grayThreshFar = grayImage;
			grayThreshNear.threshold(nearThreshold, true);
			grayThreshFar.threshold(farThreshold);
			cvAnd(grayThreshNear.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);
		} else {
			
			// or we do it ourselves - show people how they can work with the pixels
			unsigned char * pix = grayImage.getPixels();
			
			int numPixels = grayImage.getWidth() * grayImage.getHeight();
			for(int i = 0; i < numPixels; i++) {
				if(pix[i] < nearThreshold && pix[i] > farThreshold) {
					pix[i] = 255;
				} else {
					pix[i] = 0;
				}
			}
		}
		
		// update the cv images
		grayImage.flagImageChanged();
		
		// find contours which are between the size of 20 pixels and 1/3 the w*h pixels.
		// also, find holes is set to true so we will get interior contours as well....
        // findContours( ofxCvGrayscaleImage&  input, int minArea, int maxArea, int nConsidered, bool bFindHoles, bool bUseApproximation ) ;
        
		contourFinder.findContours(grayImage, minBlobSize , maxBlobSize , 20, false);
	}
	
#ifdef USE_TWO_KINECTS
	kinect2.update();
#endif
}
コード例 #29
0
//--------------------------------------------------------------
void testApp::update(){
    // Bild Aufnahme
    kinect.update();

    if (kinect.isFrameNew()){
        
        // Tiefenbild laden und speichern
        grayscaleImgInput.setFromPixels(kinect.getDepthPixels(), w, h);
        
        grayscaleImgFar = grayscaleImgInput;
        grayscaleImgNear = grayscaleImgInput;
        grayscaleImgOutput = grayscaleImgInput;
        
        //Abfrage ob isThreshold true
        if (isThreshold) {                                      
            grayscaleImgNear.threshold(thresholdNear);
            grayscaleImgFar.threshold(thresholdFar, true);
            cvAnd(grayscaleImgFar.getCvImage(), grayscaleImgNear.getCvImage(), grayscaleImgOutput.getCvImage(), NULL);
            grayscaleImgOutput.flagImageChanged();

        }
        
        //Weichzeichner
        if (isBlur) {
            grayscaleImgOutput.dilate_3x3();
            grayscaleImgOutput.dilate_3x3();
            grayscaleImgOutput.blur(5);
            grayscaleImgOutput.flagImageChanged();
        }
        
        if (isContour) {
            contourFinder.findContours(grayscaleImgOutput, 5, (w * h)/2, 3, false, true);
        }
    }
    
    if (!isDebug) {
        if (PartikelSystem.size() < 10) {
            //ofPixels pixels = kinect.getPixelsRef();
            
            PartikelSystem.push_back(new Partikel(ofRandom(0, w), ofRandom(0, h)));
           
            //PartikelSystem.push_back(new Partikel(pixels.getWidth(), pixels.getHeight()));           //Erzeugung eines neuen Partikels
            
            //PartikelSystem.push_back(new Partikel(w/2, h/2));           //Erzeugung eines neuen Partikels
            
//            PartikelSystem.push_back(new Partikel(w/2+200, h/2+2));           //Erzeugung eines neuen Partikels
//            PartikelSystem.push_back(new Partikel(w/2+40, h/2+4));           //Erzeugung eines neuen Partikels
        }
    }
}
コード例 #30
0
ファイル: improc.cpp プロジェクト: awg21/sikle_lin
int convRGB(IplImage* srcRGB, IplImage* dstRGB, CvSize sizIm)
{
	// ñîçäàåì Image 
	srcR = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );
	srcG = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );
	srcB = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );

	srcRR = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );
	srcGR = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );
	srcBR = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );

	// ðàçáèâàåì íà êàíàëû
	cvSplit(srcRGB, srcB, srcG, srcR, 0);
		
	// âûäåëÿåì äëÿ êàæäîãî êàíàëà ãðàíèöû
	cvInRangeS(srcR, cvScalar(Rmin), cvScalar(Rmax), srcRR);
	cvInRangeS(srcG, cvScalar(Gmin), cvScalar(Gmax), srcGR);
	cvInRangeS(srcB, cvScalar(Bmin), cvScalar(Bmax), srcBR);

	// "ñêëåèâàåì" êàíàëû
	cvAnd( srcRR, srcGR, dstRGB );
	cvAnd( dstRGB, srcBR, dstRGB );

	// âûâîäèì â îêíå èçîáðàæåíèå
	cvShowImage("RGBVideo", dstRGB);

	// îñâîáîæäàåì ðåñóðñû
	cvReleaseImage( &srcR );
	cvReleaseImage( &srcG );
	cvReleaseImage( &srcB );
	cvReleaseImage( &srcRR );
	cvReleaseImage( &srcGR );
	cvReleaseImage( &srcBR );
	
	return 0;
}