Esempio n. 1
0
void CHandDrawEffect::EffectImage(IplImage* back, IplImage* frame, IplImage* alpha, IplImage* mask, IplImage* res)
{
	if(drawMode & 0x01) {
		//基本エフェクト
		Posterize(0xD0, frame, imageA);
	//	DrawHatching(frame, imageA);
		cvAnd(imageA, mask, imageB); //エフェクト処理後のCG部分のくりぬき

		//囲み
		cvNot(mask, imageA);
		cvDilate(imageA, imageD, 0, 1);
		cvDilate(mask, imageE, 0, 3);
		cvXor(imageE, imageD, mask);

		//アルファマスク更新
		cvNot(mask, imageA);
		cvConvertScale(imageA, imageA, 0.5);
		cvOr(alpha, imageA, alpha);

		//色付きの囲み
		cvNot(mask, imageA);
		cvAnd(imageA, imageC, imageA);
		cvOr(imageA, imageB, imageB);

		//走査線
		cvAnd(imageB, scanningLine, imageB);

		//アルファブレンド
		AlphaBlend(back, imageB, alpha, res);

		if(0) { //drawMode & 0x02) {
		//	DrawEdge(frame, imageB, res, 2);

			cvNot(mask, frame);
			cvDilate(frame, imageA, 0, 1);
			cvDilate(mask, imageB, 0, 3);
			cvXor(imageA, imageB, mask);
			cvAnd(mask, res, res);

			//色付きの線
			cvNot(mask, imageA);
			cvAnd(imageA, scanningLine, imageA);
			cvAnd(imageA, imageC, imageA);
			cvOr(res, imageA, res);
		}
	} else if(drawMode & 0x02) {
	//	DrawEdge(frame, imageB, res, 2);
	}
}
Esempio n. 2
0
static void node_composit_exec_cvXOr(void *data, bNode *node, bNodeStack **in, bNodeStack **out)
{
    //TODO: Use atach buffers
	CvArr* dst;
	CvArr* src1;
	CvArr* src2;
	CvArr* mask;
	CV_FUNCNAME( "cvXOr" ); 
	if(out[0]->hasoutput==0) return;
	cvSetErrMode(1); //Parent mode error
	__CV_BEGIN__;
	if((in[0]->data)&&(in[1]->data)){
		CV_CALL(src1 = in[0]->data);
		CV_CALL(src2 = in[1]->data);
		if(!BOCV_checkAreSameType(src1, src2))
		        CV_ERROR( CV_StsBadArg,"The source inputs are differents" );
			
		CV_CALL(mask = in[2]->data);
		CV_CALL(dst=BOCV_CreateArrFrom(src1));
		if(dst)		
		{
			CV_CALL(cvXor(src1, src2, dst, mask));
		 	CV_CALL(out[0]->data= dst);
		}
	}
	__CV_END__;
}
//추후 수정
void FkPaperKeyboard_TypeA::cornerVerification(IplImage* srcImage){
	CvSize size = cvGetSize(srcImage);
	IplImage* eigImage = cvCreateImage(size, IPL_DEPTH_8U,1);
	IplImage* tempImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* grayImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* veriImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* dstImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* mask = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* mask2 = cvCreateImage(size, IPL_DEPTH_8U, 1);
	CvRect rect = cvRect(10, 10, 640 - 20, 480 - 20);

	CvPoint2D32f srcQuad[4], dstQuad[4];
	CvMat* warp_matrix = cvCreateMat(3,3, CV_32FC1);
	CvMat* warp_matrix_invert = cvCreateMat(3,3, CV_32FC1);
	CvMat* result = cvCreateMat(3, 1, CV_32FC1);
	CvMat* dst = cvCreateMat(3, 1,CV_32FC1);

	int keyButtonCornerCount = 316;
	
	cvCvtColor(srcImage, grayImage, CV_BGR2GRAY);
	cvSetImageROI(grayImage, rect);
	cvSetImageROI(mask, rect);
	cvSetImageROI(dstImage, rect);
	cvSetImageROI(mask2, rect);

	// 150~255사이의 값만 추출해서 마스크에 저장
	cvInRangeS(grayImage, cvScalar(100, 100, 100), cvScalar(255, 255, 255), mask);
	cvCopy(mask, mask2);

	//cvShowImage("mask", mask);
	//cvShowImage("mask2", mask2);

	// 20,20? 150 미만의 값을 제외하기 위해 0인 값(mask)과 추출한 값(mask2)을 XOR 연산 한다.
	cvFloodFill(mask, cvPoint(10, 10), cvScalar(0, 0, 0));
	cvXor(mask2, mask, dstImage);
	
	//cvShowImage("mask3", mask);
	//cvShowImage("mask4", mask2);
	//cvShowImage("dstImage", dstImage);

	// 최종 연산된 이미지에서 코너 추출(각 키패드의 코너)
	cvGoodFeaturesToTrack(dstImage, eigImage, tempImage, keyButtonCorner, &keyButtonCornerCount, 0.01, 7, NULL, 7, 0);
	cvFindCornerSubPix (dstImage, keyButtonCorner, keyButtonCornerCount,cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
	
	cvResetImageROI(dstImage);
	for(int i =0 ; i < 316 ; i++){
		keyButtonCorner[i].x += rect.x;
		keyButtonCorner[i].y += rect.y;
	}
	
	initKeyButtonCorner();
	
	srcQuad[CLOCKWISE_1].x = keyButtonCorner[315].x+10;
	srcQuad[CLOCKWISE_1].y = keyButtonCorner[315].y-10;
	srcQuad[CLOCKWISE_5].x = keyButtonCorner[31].x + 10;
	srcQuad[CLOCKWISE_5].y = keyButtonCorner[31].y + 10;
	srcQuad[CLOCKWISE_7].x = keyButtonCorner[0].x - 10;
	srcQuad[CLOCKWISE_7].y = keyButtonCorner[0].y + 10;
	srcQuad[CLOCKWISE_11].x = keyButtonCorner[290].x - 10;
	srcQuad[CLOCKWISE_11].y = keyButtonCorner[290].y - 10;
	dstQuad[CLOCKWISE_1].x = 640;
	dstQuad[CLOCKWISE_1].y = 0;
	dstQuad[CLOCKWISE_5].x = 640;
	dstQuad[CLOCKWISE_5].y = 480;
	dstQuad[CLOCKWISE_7].x = 0;
	dstQuad[CLOCKWISE_7].y = 480;
	dstQuad[CLOCKWISE_11].x = 0;
	dstQuad[CLOCKWISE_11].y = 0;
	cvGetPerspectiveTransform(srcQuad, dstQuad, warp_matrix);
	
	cvWarpPerspective(dstImage, veriImage, warp_matrix);
	detectKeyButtonCorner(veriImage);
	cvInvert(warp_matrix, warp_matrix_invert);
	for(int i = 0 ; i < 316 ; i++){	
		cvmSet(dst, 0, 0, keyButtonCorner[i].x);  
		cvmSet(dst, 1, 0, keyButtonCorner[i].y);
		cvmSet(dst, 2, 0, 1);

		cvMatMul(warp_matrix_invert, dst, result);
		float t = cvmGet(result, 2,0);
		keyButtonCorner[i].x = cvmGet(result, 0,0)/t ;
		keyButtonCorner[i].y = cvmGet(result, 1,0)/t ;
	}
	cvResetImageROI(srcImage);
	cvResetImageROI(mask);
	cvReleaseImage(&eigImage);
	cvReleaseImage(&tempImage);
	cvReleaseImage(&grayImage);
	cvReleaseImage(&veriImage);
	cvReleaseImage(&dstImage);
	cvReleaseImage(&mask);
	cvReleaseImage(&mask2);
	cvReleaseMat(&warp_matrix);
	cvReleaseMat(&warp_matrix_invert);
	cvReleaseMat(&result);
	cvReleaseMat(&dst);	
}
Esempio n. 4
0
int main(int argc, char **argv)
{
	if (argc != 3)
	{
		help();
		return 0;
	}

	char *image1name = argv[1];
	char *image2name = argv[2];

	//create two windows
	cvNamedWindow(WINDOW1NAME, CV_WINDOW_NORMAL);
	cvNamedWindow(WINDOW2NAME, CV_WINDOW_NORMAL);

	//read two images with gray 
	pGrayImage1 = cvLoadImage(image1name, CV_LOAD_IMAGE_GRAYSCALE);
	pGrayImage2 = cvLoadImage(image2name, CV_LOAD_IMAGE_GRAYSCALE);

	//show two gray images
	cvShowImage(WINDOW1NAME, pGrayImage1);
	cvShowImage(WINDOW2NAME, pGrayImage2);

	cvWaitKey(0);

	//show image1 and with a track slider
	cvNamedWindow(WINDOW1BINARYNAME, CV_WINDOW_NORMAL);
	cvNamedWindow(WINDOW2BINARYNAME, CV_WINDOW_NORMAL);
	pBinaryImage1 = cvCreateImage(cvGetSize(pGrayImage1), IPL_DEPTH_8U, 1);
	pBinaryImage2 = cvCreateImage(cvGetSize(pGrayImage2), IPL_DEPTH_8U, 1);
	int threshold = 0;
	cvCreateTrackbar(TRACKNAME, WINDOW1BINARYNAME, &threshold, 254, on_trackbar);
	on_trackbar(1);
	
	int c = cvWaitKey(0);
	printf("%c, %d, %x, %x, %x, %x\n", c, c, c, 'a', 'o', 'x');
	IplImage * pImageXor = cvCreateImage(cvGetSize(pGrayImage1), IPL_DEPTH_8U, 1);
	if (c == 0x100078) {
		cvXor(pBinaryImage1, pBinaryImage2, pImageXor, NULL);
		cvNamedWindow(WINDOWXORNAME, CV_WINDOW_NORMAL);
		cvShowImage(WINDOWXORNAME, pImageXor);
	} else if (c == 0x100061) {
		cvAnd(pBinaryImage1, pBinaryImage2, pImageXor, NULL);
		cvNamedWindow(WINDOWANDNAME, CV_WINDOW_NORMAL);
		cvShowImage(WINDOWANDNAME, pImageXor);
	} else if (c == 0x10006f) {
		cvOr(pBinaryImage1, pBinaryImage2, pImageXor, NULL);
		cvNamedWindow(WINDOWORNAME, CV_WINDOW_NORMAL);
		cvShowImage(WINDOWORNAME, pImageXor);
	} else {
		goto err_input;
	}

	int s = cvWaitKey(0);
	char* filename = NULL;
	if (s == 0x100073) {
		if (c == 0x100078) {
			filename = "xor.jpg";
		} else if (c == 0x10006f) {
			filename = "or.jpg";
		} else if (c == 0x100061) {
			filename = "and.jpg";
		}
		cvSaveImage(filename, pImageXor, 0);
	}

	//xor two images
	/*cvOr(pGrayImage1, pGrayImage2, pImageXor, NULL);
	cvNamedWindow("xor", CV_WINDOW_NORMAL);
	cvShowImage("xor", pImageXor);
	cvWaitKey(0);
	cvReleaseImage(&pImageXor);
	cvDestroyWindow("xor");*/

err_input:
	cvReleaseImage(&pImageXor);
	cvReleaseImage(&pGrayImage1);
	cvReleaseImage(&pGrayImage2);
	cvReleaseImage(&pBinaryImage1);
	cvReleaseImage(&pBinaryImage2);
	cvDestroyWindow(WINDOW1NAME);
	cvDestroyWindow(WINDOW2NAME);
	cvDestroyWindow(WINDOW1BINARYNAME);
	cvDestroyWindow(WINDOW2BINARYNAME);
	return 0;
}
//--------------------------------------------------------------
void kinectTracker::update()
{
    bool newFrame = false;
    for (int i = 0; i < NUM_KINECTS; i++)
    {
        kinectDevice[i].update();
        if (kinectDevice[i].isFrameNew())
        {
            newFrame = true;
            kinectGrayFBO[i].begin();
            ofClear(0, 0, 0);
            kinectDevice[i].drawDepth(kinectOffset[i].x, kinectOffset[i].y, kinectGrayFBO[i].getWidth(), kinectGrayFBO[i].getHeight());
            kinectGrayFBO[i].end();
        }
    }
    
    if(newFrame)
    {
        for (int i = 0; i < NUM_KINECTS; i++)
        {
            kinectGrayFBO[i].readToPixels(kinectPixels[i]);
            cvi[i].setFromPixels(kinectPixels[i]);
            
            kinectGrayImage[i] = cvi[i];
            
            int rx = kinectDevice[i].getWidth() * (i%2);
            int ry = kinectDevice[i].getHeight() * (i/2);
            
            grayImage.setROI(rx, ry, kinectDevice[i].getWidth(), kinectDevice[i].getHeight());
            grayImage.setRoiFromPixels(kinectGrayImage[i].getPixels(), kinectGrayImage[i].width, kinectGrayImage[i].height);
        }

        grayImage.setROI(0, 0, grayImage.width, grayImage.height);
        
        // we do two thresholds - one for the far plane and one for the near plane
        // we then do a cvAnd to get the pixels which are a union of the two thresholds
        
        if (erodeIterations > 0)
        {
            grayImage.erode(erodeIterations);
        }
        
        if (dilateIterations > 0)
        {
            grayImage.dilate(dilateIterations);
        }
        
        if (enableBlur)
        {
            grayImage.blurHeavily();            
        }
        
        grayThreshNear = grayImage;
        grayThreshFar = grayImage;
        grayThreshNear.threshold(nearThreshold, true);
        grayThreshFar.threshold(farThreshold);
        cvAnd(grayThreshNear.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);
        grayImage.flagImageChanged();
        
        activePixels = cvCountNonZero(grayImage.getCvImage());
        
        cvXor(grayImage.getCvImage(), prevGrayImage.getCvImage(), grayImageXorred.getCvImage(), 0);
        deltaPixels = cvCountNonZero(grayImageXorred.getCvImage());
        grayImageXorred.flagImageChanged();
        
        activePixelsHistory.push_front(activePixels);
        deltaPixelsHistory.push_front(deltaPixels);
        
        while (activePixelsHistory.size() > numAverageSamples)
        {
            activePixelsHistory.pop_back();
        }
        
        while (deltaPixelsHistory.size() > numAverageSamples)
        {
            deltaPixelsHistory.pop_back();
        }
        
        prevGrayImage = grayImage;
        
//        contourFinder.findContours(grayImage, 1000, (grayImage.width*grayImage.height)/2, 20, false);
//        blobsManager.update(contourFinder.blobs);
        
        contourFinder.findContours(grayImageXorred, 1000, (grayImage.width*grayImage.height)/2, 20, false);
        blobsManager.update(contourFinder.blobs);
    }
}
void Frame::xOr(Frame *B)
{
	cvXor(image,B->image,image);
}