Example #1
0
void CHandDrawEffect::EffectImage(IplImage* back, IplImage* frame, IplImage* alpha, IplImage* mask, IplImage* res)
{
	if(drawMode & 0x01) {
		//基本エフェクト
		Posterize(0xD0, frame, imageA);
	//	DrawHatching(frame, imageA);
		cvAnd(imageA, mask, imageB); //エフェクト処理後のCG部分のくりぬき

		//囲み
		cvNot(mask, imageA);
		cvDilate(imageA, imageD, 0, 1);
		cvDilate(mask, imageE, 0, 3);
		cvXor(imageE, imageD, mask);

		//アルファマスク更新
		cvNot(mask, imageA);
		cvConvertScale(imageA, imageA, 0.5);
		cvOr(alpha, imageA, alpha);

		//色付きの囲み
		cvNot(mask, imageA);
		cvAnd(imageA, imageC, imageA);
		cvOr(imageA, imageB, imageB);

		//走査線
		cvAnd(imageB, scanningLine, imageB);

		//アルファブレンド
		AlphaBlend(back, imageB, alpha, res);

		if(0) { //drawMode & 0x02) {
		//	DrawEdge(frame, imageB, res, 2);

			cvNot(mask, frame);
			cvDilate(frame, imageA, 0, 1);
			cvDilate(mask, imageB, 0, 3);
			cvXor(imageA, imageB, mask);
			cvAnd(mask, res, res);

			//色付きの線
			cvNot(mask, imageA);
			cvAnd(imageA, scanningLine, imageA);
			cvAnd(imageA, imageC, imageA);
			cvOr(res, imageA, res);
		}
	} else if(drawMode & 0x02) {
	//	DrawEdge(frame, imageB, res, 2);
	}
}
void THISCLASS::OnReloadConfiguration() {
	// Mask mode
	wxString mode = GetConfigurationString(wxT("Mode"), wxT("black-black"));
	if (mode == wxT("white-white")) {
		mMode = cMode_WhiteWhite;
	} else if (mode == wxT("white-black")) {
		mMode = cMode_WhiteBlack;
		cvNot(mMaskImage, mMaskImage);
	} else if (mode == wxT("black-white")) {
		mMode = cMode_BlackWhite;
		cvNot(mMaskImage, mMaskImage);
	} else {
		mMode = cMode_BlackBlack;
	}
}
Example #3
0
int main()
{
	IplImage* image = 0;
	IplImage* reverse = 0;

	CvCapture* video = cvCaptureFromCAM(-1);

	cvNamedWindow("Original Video",0);
	cvNamedWindow("Reverse Video",0);

	while(1)
	{
		cvGrabFrame(video);
		image = cvRetrieveFrame(video);
		cvShowImage("Original Video",image);

		reverse = cvCreateImage(cvGetSize(image),image->depth,image->nChannels);

		cvNot(image,reverse);

		if(cvWaitKey(10) >=0)
			break;

		reverse->origin=image->origin;
		cvShowImage("Reverse Video",reverse);
	}

	cvReleaseImage(&reverse);
	cvReleaseCapture(&video);
	cvDestroyWindow("Reverse Video");
	cvDestroyWindow("Original Video");
	return 0;
}
int main(int argc,char *argv[])
{
  IplImage* loadedImg; // where we'll store the current frame
  IplImage* invFrame;

  if(argc < 2) {
    printf("usage: %s image1 [image2] [image3] ...\n", argv[0]);
    return 0;
  }

  for(int i = 1; i < argc; i++) {
    loadedImg = cvLoadImage(argv[i]); // load image

    // check for errors:
    if(!loadedImg)
      continue;

    cvNot(loadedImg, loadedImg); //some funcions can do in-place modification

    char buffer[100];
    sprintf(buffer, "modified-%s", argv[i]); //new filename

    cvSaveImage(buffer, loadedImg); //save the modified image

    cvReleaseImage(&loadedImg); //release it now that we're done
  }

  return 0;
}
Example #5
0
/* chain function
 * this function does the actual processing
 */
static GstFlowReturn
gst_edgedetect_chain (GstPad * pad, GstBuffer * buf)
{
  Gstedgedetect *filter;

  filter = GST_EDGEDETECT (GST_OBJECT_PARENT (pad));

  filter->cvImage->imageData = (char *) GST_BUFFER_DATA (buf);

  cvCvtColor (filter->cvImage, filter->cvGray, CV_RGB2GRAY);
  cvSmooth (filter->cvGray, filter->cvEdge, CV_BLUR, 3, 3, 0, 0);
  cvNot (filter->cvGray, filter->cvEdge);
  cvCanny (filter->cvGray, filter->cvEdge, filter->threshold1,
      filter->threshold2, filter->aperture);

  cvZero (filter->cvCEdge);
  if (filter->mask) {
    cvCopy (filter->cvImage, filter->cvCEdge, filter->cvEdge);
  } else {
    cvCvtColor (filter->cvEdge, filter->cvCEdge, CV_GRAY2RGB);
  }
  gst_buffer_set_data (buf, filter->cvCEdge->imageData,
      filter->cvCEdge->imageSize);

  return gst_pad_push (filter->srcpad, buf);
}
void thresholdCalculator::calculateAverages(ofxCvGrayscaleAdvanced & smallCurrentImg, ofxCvGrayscaleAdvanced & maskImg, ofRectangle & targetRect) {
	
	roi.x = targetRect.x / divisor;
	roi.y = targetRect.y / divisor;
	
	maskImg.setROI(roi);
	smallCurrentImg.setROI(roi);
	
	CvScalar tempPupilAvg = cvAvg(smallCurrentImg.getCvImage(), maskImg.getCvImage());
	cvNot(maskImg.getCvImage(), notDiffImg.getCvImage());
	pupilAvg = tempPupilAvg.val[0];
	
	// get average of pupil black iteratively(get average twice) to remove the influence of glint
	cvThreshold(smallCurrentImg.getCvImage(), farFromAvg, pupilAvg + 30, 255, CV_THRESH_BINARY);		// 30 is the distance from average.
	cvSub(maskImg.getCvImage(), farFromAvg, newMask);								// make a mask to get rid of those far points.
	CvScalar newPupilAvg = cvAvg(smallCurrentImg.getCvImage(), newMask);			// get new average value.
	
	// get average, min and max value of white area of an eye.
	CvScalar tempWhiteAvg = cvAvg(smallCurrentImg.getCvImage(), notDiffImg.getCvImage());
	for (int i = 0; i < 6; i++) notDiffImg.erode();				// this might be very useful to reduce the influence of small noise & glint
	cvMinMaxLoc(smallCurrentImg.getCvImage(), &whiteMin, &whiteMax, &whiteLocMin, &whiteLocMax, notDiffImg.getCvImage());

	maskImg.resetROI();
	smallCurrentImg.resetROI();
	
	pupilAvg = newPupilAvg.val[0];					// value is in the first element of CvScalar
	whiteAvg = tempWhiteAvg.val[0];
	
}
Example #7
0
int main(int args,char *argv[]){
	// 视频结构
	CvCapture *capture = NULL;
	// 获得摄像头数据
	// CV_CAP_ANY 自动
	capture = cvCreateCameraCapture(CV_CAP_ANY);
	if (!capture) {
		exit(0);
	}
	cvNamedWindow("camera",CV_WINDOW_AUTOSIZE);
	// 图片结构
	IplImage *image = NULL;
	while(image = cvQueryFrame(capture)){
		// 反色处理
		cvNot(image,image);
		// 现实图像
		cvShowImage("camera",image);
		// 监听键盘输入,如果有输入则跳出while
		if( cvWaitKey(2) >= 0 )
			break;
	}
	// 释放视频
	cvReleaseCapture(&capture);
	return 0;
}
Example #8
0
/* chain function
 * this function does the actual processing
 */
static GstFlowReturn
gst_edge_detect_chain (GstPad * pad, GstBuffer * buf)
{
  GstEdgeDetect *filter;
  GstBuffer *outbuf;

  filter = GST_EDGE_DETECT (GST_OBJECT_PARENT (pad));

  filter->cvImage->imageData = (char *) GST_BUFFER_DATA (buf);

  cvCvtColor (filter->cvImage, filter->cvGray, CV_RGB2GRAY);
  cvSmooth (filter->cvGray, filter->cvEdge, CV_BLUR, 3, 3, 0, 0);
  cvNot (filter->cvGray, filter->cvEdge);
  cvCanny (filter->cvGray, filter->cvEdge, filter->threshold1,
      filter->threshold2, filter->aperture);

  cvZero (filter->cvCEdge);
  if (filter->mask) {
    cvCopy (filter->cvImage, filter->cvCEdge, filter->cvEdge);
  } else {
    cvCvtColor (filter->cvEdge, filter->cvCEdge, CV_GRAY2RGB);
  }

  outbuf = gst_buffer_new_and_alloc (filter->cvCEdge->imageSize);
  gst_buffer_copy_metadata (outbuf, buf, GST_BUFFER_COPY_ALL);
  memcpy (GST_BUFFER_DATA (outbuf), filter->cvCEdge->imageData,
      GST_BUFFER_SIZE (outbuf));

  gst_buffer_unref (buf);
  return gst_pad_push (filter->srcpad, outbuf);
}
/**
 * Обновление выводимой информации.
 */
void EdgeDetector::update() {
	if (camera == NULL) return;

	cvWaitKey(33);

	cameraFrame = cvQueryFrame(camera);
	cvReleaseImage(&resultFrame);
	
	if (isGrayScaleEffect) {
		IplImage* tempFrame = cvCloneImage(cameraFrame);
		resultFrame = cvCreateImage(imageSize, cameraFrame->depth, CV_LOAD_IMAGE_GRAYSCALE);
		cvCvtColor(tempFrame, resultFrame, CV_BGR2GRAY);
		cvReleaseImage(&tempFrame);
	} else resultFrame = cvCloneImage(cameraFrame);

	if (!isOriginalEffect) {
		if (isStrokeEffect) {
			IplImage* tempFrame = cvCloneImage(resultFrame);
			tempFrame = edgeDetectOperator->applyOperator(tempFrame);
			cvSub(resultFrame, tempFrame, resultFrame);
			cvReleaseImage(&tempFrame);
		} else resultFrame = edgeDetectOperator->applyOperator(resultFrame);
	}

	if (isInverseEffect) {
		IplImage* tempFrame = cvCloneImage(resultFrame);
		cvNot(tempFrame, resultFrame);
		cvReleaseImage(&tempFrame);
	}

	cvShowImage(getWindowName(), resultFrame);
}
Example #10
0
File: main.c Project: ntavish/tri
void draw(int dummy)
{
	//delaunay
	cvClearMemStorage(storage);
	subdiv=cvCreateSubdivDelaunay2D(rect,trianglestore);

	blur(origV, out);
	SWAP(in,out);
	thresh(in, out);
	findContours(out, storage, &contours);

	cvMerge(origH, origS, out, NULL, temp);
	cvCvtColor( temp, temp, CV_HSV2RGB );

	every_contour(contours, temp);
	drawContour(temp, contours);
	SWAP(in,out);

	draw_subdiv(temp,subdiv, cvScalar(255,255,255,255));
		if(k==0)
		{
		cvNot(in,out);
		//k=1;
		}
		else{}
	cvClearMemStorage(trianglestore);
	//findcorners(origH,out);   //needs 32bit float image

	cvShowImage(OUT, temp);
}
Example #11
0
// Generates and returns the histogram of a GRAYSCALE image.
IplImage* DrawHistogram(IplImage* img)
{
	CvSize imgSize = cvGetSize(img);
	int area = imgSize.width*imgSize.height;

	// Holds the actual histogram image
	IplImage* ret = cvCreateImage(cvSize(257, 100), 8, 1);
	cvZero(ret);

	int freq[256] = {0};
	int max=0;

	// Loop through each pixel of the image
	for(int x=0;x<imgSize.width;x++)
	{
		for(int y=0;y<imgSize.height;y++)
		{
			// Increment the frequency
			int curr = (int)cvGetReal2D(img, y, x);
			freq[curr]++;
			if(freq[curr]>max)
				max = freq[curr];
		}
	}

	// Finally, draw the actual histogram
	for(int k=0;k<256;k++)
	{
		int value = ((float)(100*freq[k])/(float)max);
		cvLine(ret, cvPoint(k, 100), cvPoint(k, 100-value), cvScalar(255,255,255));
	}
	
	cvNot(ret, ret);
	return ret;
}
Example #12
0
//形态学非约束击中-击不中变换 针对二值和灰度图像
void lhMorpHMTU(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL)
{
	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	if (sebg == NULL)
	{
		sebg = lhStructuringElementNot(sefg);

	}
	
	IplImage*  temp = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  mask = cvCreateImage(cvGetSize(src), 8, 1);
	cvZero(mask);

	//P106 (5.4)
	cvErode( src, temp, sefg);
	cvDilate(src, dst, sebg);
	cvCmp(temp, dst, mask, CV_CMP_GT);

	cvSub(temp, dst, dst, mask);
	cvNot(mask, mask);
	cvSet(dst, cvScalar(0), mask);

	//cvCopy(dst, mask);
	//cvSet(dst, cvScalar(255), mask);
	cvReleaseImage(&mask);
	cvReleaseImage(&temp);

	cvReleaseStructuringElement(&sebg);
}
Example #13
0
//形态学二值击中-击不中变换
void lhMorpHMTB(const IplImage* src, IplImage* dst, IplConvKernel* sefg, IplConvKernel* sebg =NULL)
{
	assert(src != NULL && dst != NULL && src != dst && sefg!= NULL && sefg!=sebg);

	if (sebg == NULL)
	{
		sebg = lhStructuringElementNot(sefg);

	}
	IplImage*  temp1 = cvCreateImage(cvGetSize(src), 8, 1);
	IplImage*  temp2 = cvCreateImage(cvGetSize(src), 8, 1);

	//P104 (5.2)
	cvErode( src, temp1, sefg);
	cvNot(src, temp2);
	cvErode( temp2, temp2, sebg);
	cvAnd(temp1, temp2, dst);


	cvReleaseImage(&temp1);
	cvReleaseImage(&temp2);

	cvReleaseStructuringElement(&sebg);

}
Example #14
0
void COpenCVMFCView::OnImageInvert()
{
	// TODO: Add your command handler code here

	cvNot(workImg,workImg);

	Invalidate();
}
//--------------------------------------------------------------------------------
void ofxCvImage::invert(){
	if( !bAllocated ){
		ofLog(OF_LOG_ERROR, "in invert, need to allocate image first");
		return;		
	}
    cvNot(cvImage, cvImage);
    flagImageChanged();
}
Example #16
0
//--------------------------------------------------------------------------------
void ofxCvImage::invert(){
	if( !bAllocated ){
		ofLogError("ofxCvImage") << "invert(): image not allocated";
		return;		
	}
    cvNot(cvImage, cvImage);
    flagImageChanged();
}
Example #17
0
static COMMAND_FUNC( do_ocv_not )
{
	OpenCV_Image *src, *dst;

	dst = PICK_OCVI("destination image");
	src = PICK_OCVI("source image");

	if( dst == NO_OPENCV_IMAGE || src == NO_OPENCV_IMAGE ) return;

	cvNot( src->ocv_image, dst->ocv_image );
}
Example #18
0
bool optimizeDepthMap()
{	
	cvErode(uImage,uImage,0,2);		//Smoothen the User Map as well
	cvDilate(uImage,uImage,0,2);
	CvScalar depthMean=cvAvg(dImage,uImage);							//Get teh Average Depth Value of the User Pixels
	cvNot(uImage,uImage);												//Invert the user pixels to paint the rest of the image with average user depth									 
	//viewImage(dImage);
	cvSet(dImage,depthMean,uImage);										 
	IplImage* tempImage=cvCreateImage(dSize,IPL_DEPTH_8U,1);
	cvConvertScale(dImage,tempImage,1.0/256);
	cvSmooth(tempImage,tempImage,CV_GAUSSIAN,7);//Perform Gaussian Smoothing, depth map is optimized.
	cvConvert(tempImage,dImage);
	cvScale(dImage,dImage,256);
	cvSet(dImage,cvScalar(0),uImage);	
	//viewImage(dImage);
	//cvSmooth(dImage,dImage,CV_GAUSSIAN,gaussian_m,gaussian_n,gaussian_e);//Perform Gaussian Smoothing, depth map is optimized.
	cvNot(uImage,uImage);
	cvReleaseImage(&tempImage);
	return true;
}
Example #19
0
// The smooth filter really needs the blur size as a param
void InvertFilter::kernel()
{
    // derived class responsible for allocating storage for filtered image
    if( !destination )
    {
        destination = cvCreateImage(cvSize(source->width,source->height), source->depth, source->nChannels);
        destination->origin = source->origin;  // same vertical flip as source
    }


    cvNot(source, destination);
}
unique_ptr<Mat[]> CaliHelper::GetCalibrationImages(int numImag)
{
    char key = 'c';
	int capturedImages = 0;  

	//LightCrafter lcr;
	//Mat whiteImage(684,608, CV_8UC1, Scalar(255));
	//lcr.ProjectImage(whiteImage);

	namedWindow( "Camera", CV_WINDOW_NORMAL  );

	PGCam pgcam;
	pgcam.Init(CAMERA_WIDTH,CAMERA_HEIGHT,CAMERA_OFFSET_X,CAMERA_OFFSET_Y);

	unique_ptr<Mat[]> caliImages(new Mat[numImag*2]);
	while(key!='q')
	{ 
		vector<IplImage*> image = pgcam.grabFrame();
		
		char key = cvWaitKey(1);
		if(key == 'c')
		{
			if(capturedImages/2 == numImag)
				break;

			Mat(image[0]).copyTo(caliImages[capturedImages]);
			capturedImages+=1;
			Mat(image[1]).copyTo(caliImages[capturedImages]);
			capturedImages+=1;


			IplImage* view2=cvCloneImage(&(IplImage)Mat(image[0]));
			cvNot(view2,view2);
			imshow( "Camera", Mat(view2) );
			cvReleaseImage(&view2);
		}
		else
		{
			cv::Mat im1, im2;
			cv::resize(Mat(image[0]), im1, cv::Size(800,400));
			cv::resize(Mat(image[1]), im2, cv::Size(800,400));
			//imshow( "Camera", image[0] );
			imshow("left", im1);
			imshow("right", im2);
			cvReleaseImage(&image[0]);
			cvReleaseImage(&image[1]);
		}
	}
	cvDestroyWindow("Camera");
  
	return caliImages;
}	
Example #21
0
// define a trackbar callback
void on_trackbar(int h)
{
    cvSmooth( gray, edge, CV_BLUR, 3, 3, 0, 0 );
    cvNot( gray, edge );

    // Run the edge detector on grayscale
    cvCanny(gray, edge, (float)edge_thresh, (float)edge_thresh*3, 3);
  
    cvZero( cedge );
    // copy edge points
    cvCopy( image, cedge, edge );

    cvShowImage(wndname, cedge);
}
Example #22
0
/* We take ownership of mask here */
static void
gst_motiondetect_load_mask (StbtMotionDetect * filter, char* mask)
{
  char *oldMaskFilename = NULL;
  IplImage *oldMaskImage = NULL, *newMaskImage = NULL;

  if (mask) {
    newMaskImage = cvLoadImage (mask, CV_LOAD_IMAGE_GRAYSCALE);
    if (!newMaskImage) {
      /* Unfortunately OpenCV doesn't seem to provide any way of finding out
         why the image load failed, so we can't be more specific than FAILED: */
      GST_ELEMENT_WARNING (filter, RESOURCE, FAILED,
          ("OpenCV failed to load mask image"),
          ("While attempting to load mask '%s'", mask));
      GST_WARNING ("Couldn't load mask image: %s. error: %s",
          mask, g_strerror (errno));
      g_free (mask);
      mask = NULL;
    }
    gst_motiondetect_check_mask_compability(filter);
  }

  GST_OBJECT_LOCK(filter);
  oldMaskFilename = filter->mask;
  filter->mask = mask;
  oldMaskImage = filter->cvMaskImage;
  filter->cvMaskImage = newMaskImage;

  if (filter->cvInvertedMaskImage) {
    cvReleaseImage (&filter->cvInvertedMaskImage);
    filter->cvInvertedMaskImage = NULL;
  }
  if (filter->cvMaskImage) {
    filter->cvInvertedMaskImage = cvCloneImage (filter->cvMaskImage);
    cvNot(filter->cvMaskImage, filter->cvInvertedMaskImage);
  }
  GST_OBJECT_UNLOCK(filter);

  cvReleaseImage (&oldMaskImage);
  g_free(oldMaskFilename);
}
Example #23
0
// i can pass image  as constante, to make sure that no change
int negative(IplImage* image)
{ 
	IplImage *dest = cvCloneImage(image);
	 
	cvNamedWindow("Original:", 1);
	cvShowImage("Original:", image);
	 
	cvNot(image, dest); //Create a negative image from source image
	cvNamedWindow("Negative:", 1);
	cvShowImage("Negative:", dest);
	 
	cvWaitKey(0);
	 
	cvDestroyWindow("Original:");
	cvReleaseImage(&image);

	cvDestroyWindow("Negative:");
	cvReleaseImage(&dest);

	return 0;
}
int main(){
    
    //initialize
    IplImage *image, *reverse;
    
    //load camera image
    CvCapture *video = cvCaptureFromCAM(-1);
    
    //create window
    cvNamedWindow("Original Video",0);
    cvNamedWindow("Reverse Video",0);
    
    //show camera image
    while(1){
        cvGrabFrame(video);
        image = cvRetrieveFrame(video);
        cvShowImage("Original Video", image);
        
        reverse = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
        
        //reverse
        cvNot(image, reverse);
        
        //wait key input
        if(cvWaitKey(10)>=0)
            break;
        
        reverse->origin = image->origin;
        cvShowImage("Reverse Video", reverse);
        
    }
    
    //release memory
    cvReleaseImage(&reverse);
    cvReleaseCapture(&video);
    cvDestroyWindow("Original Video");
    cvDestroyWindow("Reverse Video");
    
    return 0;
}
Example #25
0
IplImage* crearMascaraBinarizada(IplImage* img_effect){
	IplImage *img_gray = cvCreateImage(cvSize(img_effect->width,img_effect->height),img_effect->depth,1);
	IplImage *img_binaria = cvCreateImage(cvSize(img_effect->width,img_effect->height),img_effect->depth,img_effect->nChannels);
	
	//cvShowImage("binarizacion",img_effect);

	//Convertir img_effect a gris
	cvCvtColor(img_effect,img_gray,CV_BGR2GRAY);

	//Binarizacion
	cvThreshold(img_gray,img_gray,230,255,CV_THRESH_BINARY);
	//cvShowImage("binarizacion",img_gray);

	//EL negativo
	cvNot(img_gray,img_gray);
	//cvShowImage("negativo",img_gray);

	//Dos veces cerradura (dos dilataciones +dos erosiones)
	//se realiza dos dilataciones
	cvDilate(img_gray,img_gray,NULL,2);
	//se realiza dos erosiones
	cvErode(img_gray,img_gray,NULL,2);

	//cvShowImage("cerradura",img_gray);
	
	//Una vez apertura(erosion+dilatacion)
	cvErode(img_gray,img_gray,NULL,1);
	cvDilate(img_gray,img_gray,NULL,1);

	//cvShowImage("apertura",img_gray);

	cvErode(img_gray,img_gray,NULL,2);
	//cvShowImage("erosion",img_gray);
	
	//Convertir la imagen binarizada en 3 canales
	cvCvtColor(img_gray,img_binaria,CV_GRAY2BGR);
	return img_binaria;

}
/* chain function
 * this function does the actual processing
 */
static GstFlowReturn
gst_edge_detect_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
{
  GstEdgeDetect *filter;
  GstBuffer *outbuf;
  GstMapInfo in_info;
  GstMapInfo out_info;

  filter = GST_EDGE_DETECT (parent);

  buf = gst_buffer_make_writable (buf);
  gst_buffer_map (buf, &in_info, GST_MAP_WRITE);
  filter->cvImage->imageData = (char *) in_info.data;

  cvCvtColor (filter->cvImage, filter->cvGray, CV_RGB2GRAY);
  cvSmooth (filter->cvGray, filter->cvEdge, CV_BLUR, 3, 3, 0, 0);
  cvNot (filter->cvGray, filter->cvEdge);
  cvCanny (filter->cvGray, filter->cvEdge, filter->threshold1,
      filter->threshold2, filter->aperture);

  cvZero (filter->cvCEdge);
  if (filter->mask) {
    cvCopy (filter->cvImage, filter->cvCEdge, filter->cvEdge);
  } else {
    cvCvtColor (filter->cvEdge, filter->cvCEdge, CV_GRAY2RGB);
  }

  outbuf = gst_buffer_new_and_alloc (filter->cvCEdge->imageSize);
  gst_buffer_copy_into (outbuf, buf, GST_BUFFER_COPY_METADATA, 0, -1);

  gst_buffer_map (outbuf, &out_info, GST_MAP_WRITE);
  memcpy (out_info.data, filter->cvCEdge->imageData,
      gst_buffer_get_size (outbuf));

  gst_buffer_unmap (buf, &in_info);
  gst_buffer_unmap (outbuf, &out_info);
  gst_buffer_unref (buf);
  return gst_pad_push (filter->srcpad, outbuf);
}
Example #27
0
void calc_and_publish_BWMask(const ros::Time time_stamp, const std::string frame_id)
{
    robmod->updateRobotLinks(time_stamp);
#ifdef USE_GLUT_RENDERING
    glutPostRedisplay();
    glutMainLoopEvent();
#else
    displayFunc();
#endif // USE_GLUT_RENDERING
    cvCvtColor(ipl_maskBGRA, ipl_maskBW, CV_BGRA2GRAY);
    cvFlip(ipl_maskBW);
    if (inverted)
        cvNot(ipl_maskBW,ipl_maskBW);


    if(publish_mask){
        sensor_msgs::ImagePtr img_msg = sensor_msgs::CvBridge::cvToImgMsg(ipl_maskBW);
        img_msg->header.frame_id = frame_id;
        img_msg->header.stamp = time_stamp;
        mask_publisher.publish(img_msg);
    }

}
Example #28
0
static IplImage*
get_convolution (const IplImage *image,
                 const IplImage *filter)
{
  CvSize dft_size;
  IplImage *reversed_image, *reversed_filter;
  IplImage *dft_image, *dft_filter, *dft_res;
  IplImage *res;

  dft_size.height = cvGetOptimalDFTSize(image->height + filter->height - 1);
  dft_size.width = cvGetOptimalDFTSize(image->width + filter->width - 1);

  res = cvCreateImage(cvSize(image->width,
                             image->height),
                      IPL_DEPTH_32F,
                      N_CHANNELS_GRAY);
  reversed_image = cvCreateImage(cvGetSize(image),
                                 IPL_DEPTH_8U,
                                 N_CHANNELS_GRAY);
  reversed_filter = cvCreateImage(cvGetSize(filter),
                                  IPL_DEPTH_8U,
                                  N_CHANNELS_GRAY);

  cvNot(image, reversed_image);
  cvNot(filter, reversed_filter);

  dft_image = cvCreateImage(dft_size,
                            IPL_DEPTH_32F,
                            N_CHANNELS_GRAY);
  cvSet(dft_image, cvScalar(0, 0, 0, 0), NULL);
  dft_filter = cvCreateImage(dft_size,
                             IPL_DEPTH_32F,
                             N_CHANNELS_GRAY);
  cvSet(dft_filter, cvScalar(0, 0, 0, 0), NULL);

  cvSetImageROI(dft_image, cvRect(0, 0,
                                  reversed_image->width,
                                  reversed_image->height));
  cvSetImageROI(dft_filter, cvRect(0, 0,
                                   reversed_filter->width,
                                   reversed_filter->height));
  double scaling_factor = 1.0/255;
  cvConvertScale(reversed_image, dft_image, scaling_factor, 0);
  cvConvertScale(reversed_filter, dft_filter, scaling_factor, 0);
  cvResetImageROI(dft_image);
  cvResetImageROI(dft_filter);


  cvDFT(dft_image, dft_image, CV_DXT_FORWARD, image->height);
  cvDFT(dft_filter, dft_filter, CV_DXT_FORWARD, filter->height);

  dft_res = cvCreateImage(dft_size,
                          IPL_DEPTH_32F,
                          N_CHANNELS_GRAY);

  cvMulSpectrums(dft_image, dft_filter, dft_res, 0);

  cvDFT(dft_res, dft_res, CV_DXT_INVERSE, res->height);
  cvSetImageROI(dft_res, cvRect(0, 0, res->width, res->height));
  cvCopy(dft_res, res, NULL);
  cvResetImageROI(dft_res);

  cvReleaseImage(&reversed_filter);
  cvReleaseImage(&reversed_image);
  cvReleaseImage(&dft_image);
  cvReleaseImage(&dft_filter);
  cvReleaseImage(&dft_res);

  return res;
}
Example #29
0
//Funcion recibe el frame y la imagen del efecto que le deseo añadir para su fusion.
// Tambien se recibe el escalamiento para que se encuentre redimensionada el efecto sobre el frame-video, y ademàs los desplazamientos de la imagen efecto 
void fusionImagenes(IplImage* frame, IplImage* img_effect,CvRect* r, double escalamiento, double despX,double despY){
	IplImage *cloneFrame,*imgObj,*imgObj2,*mascara,*mascara2,*resize_imgEffect,*resize_mascara;
	
	//Creo mi mascara
	IplImage* mascara_imgEffect = crearMascaraBinarizada(img_effect);
	//cvShowImage("MI MASCARA",mascara_imgEffect);
	
	//Se clona la Imagen que captura la camara
	cloneFrame = cvCloneImage(frame);
	
	//Incializo imagenes base de mi imagen efectos
	imgObj = cvCreateImage(cvSize(frame->width,frame->height),frame->depth,frame->nChannels);//imagen para pegar el img_effect
	imgObj2 = cvCreateImage(cvSize(frame->width,frame->height),frame->depth,frame->nChannels);//img_effect pero trasladada
	
	//Pinto mi imagen base de color blanco
	cvSet(imgObj, cvScalar(255,255,255,0),NULL);
	cvSet(imgObj2, cvScalar(255,255,255,0),NULL);

	//Incializo imagenes base de mi macara
	mascara = cvCreateImage(cvSize(frame->width,frame->height),frame->depth,frame->nChannels);//imagen para pegar la Mascara
	mascara2 = cvCreateImage(cvSize(frame->width,frame->height),frame->depth,frame->nChannels);//mascara pero  trasladada
	
	//Pinto mi imagen base de color negro
	cvSet(mascara, cvScalar(0,0,0,0),NULL);
	cvSet(mascara2, cvScalar(0,0,0,0),NULL);

	//Se define el nuevo tamaño
	int nuevoWidth = (int)(escalamiento*img_effect->width*r->width);//tamaño horizontal nuevo para el img_effect, tiene que ser proporcional al de r, por eso lo multiplico
	int nuevoHeight = (int)(escalamiento*img_effect->height*r->height);//tamaño vertical nuevo para el img_effect..
	//printf("\neffect(%d,%d)-> %d,%d",img_effect->width,img_effect->height,r->width,r->height);

	if(nuevoWidth > frame->width) nuevoWidth = frame->width;	// valida que el nuevo tamaño horizontal del objeto no sobrepase el del frame
	if(nuevoHeight > frame->height) nuevoHeight = frame->height;  // valida que el nuevo tamaño vertical del objeto no sobrepase el del frame

	//Se redimensiona la imagen effecto con los nuevos valores para el width-heigth
	resize_imgEffect = cvCreateImage(cvSize(nuevoWidth,nuevoHeight),img_effect->depth,img_effect->nChannels);//Imagen con el nuevo tamaño
	cvResize(img_effect,resize_imgEffect,1);

	resize_mascara = cvCreateImage(cvSize(nuevoWidth,nuevoHeight),mascara_imgEffect->depth,mascara_imgEffect->nChannels);//Mascara con el nuevo tamaño
	cvResize(mascara_imgEffect,resize_mascara,1);//crea la mascara con el nuevo tamaño

	cvShowImage("imgObj-antes",resize_imgEffect);cvShowImage("mascara-antes",resize_mascara);
	//ROI de la imagen effecto
	cvSetImageROI(imgObj, cvRect(0,0,resize_imgEffect->width,resize_imgEffect->height));//crea un roi en el origen con el tamaño de la imagen efecto
	cvCopy(resize_imgEffect,imgObj);//copia el objeto
	cvResetImageROI(imgObj);//quita el roi
	cvShowImage("copy",imgObj);

	//ROI de la mascara
	cvSetImageROI(mascara, cvRect(0,0,resize_mascara->width,resize_mascara->height));
	cvCopy(resize_mascara,mascara);
	cvResetImageROI(mascara);
	cvShowImage("MASK-ROI",mascara);
	
	//Se define los deplazamientos
	int dx = r->x+despX;//La posicion x del rectangulo que detecta la cara+desplazamiento basado en el ancho del rectangulo
	int dy = r->y+despY;

	//printf("\ndespx= %d , despy= %d ----- %d,%d",dx,dy,despX,despY);
	
	//cvShowImage("imgObj-antes",imgObj);cvShowImage("mascara-antes",mascara);
	//Matriz de transformacion, solo tomamos en cuenta dx,dy para la traslacion
	CvMat *M = cvCreateMat( 2, 3, CV_32FC1);
	cvmSet(M,0,0,1); // asignamos valor 1 al elemento (0,0)
	cvmSet(M,0,1,0); // asignamos valor 0 al elemento (0,1)
	cvmSet(M,1,0,0); // asignamos valor 0 al elemento (1,0)
	cvmSet(M,1,1,1); // asignamos valor 1 al elemento (1,1)
	cvmSet(M,0,2,dx); // el cuarto número indica los píxeles que se recorren en el eje x
	cvmSet(M,1,2,dy);
	
	cvWarpAffine (imgObj, imgObj2, M, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(255));// aplicamos la transformación para obtener la imagen trasladada
	cvWarpAffine (mascara,mascara2, M, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0));//lo mismo pero para la mascara
	//cvShowImage("imgObj-despues",imgObj2);cvShowImage("mascara-despues",mascara2);

	cvShowImage("imgObj-despues",imgObj2);cvShowImage("mascara-despues",mascara2);
	cvAnd(imgObj2,mascara2,imgObj2,0);//Se recorta el objeto usando la mascara
	cvShowImage("AND imgObj2-mascara2",imgObj2);
	cvNot(mascara2,mascara2);			//Se crea la mascara inversa
	cvShowImage("NOT mascara2",mascara2);
	cvShowImage("CLoNE FRAME",cloneFrame);
	cvAnd(cloneFrame,mascara2,cloneFrame,0);	//Se usa la macara inversa para quitar la parte en donde ira el objeto
	cvShowImage("AND cloneFrame-mascara2",cloneFrame);
	cvOr(cloneFrame,imgObj2,frame,0);		// Se unen las dos imagenes con OR


	//Libero memoria de cada una de las imagenes		
	/*
	cvReleaseImage(&cloneFrame);
	cvReleaseImage(&imgObj);
	cvReleaseImage(&imgObj2);
	cvReleaseImage(&mascara_imgEffect);
	cvReleaseImage(&mascara);
	cvReleaseImage(&mascara2);
	cvReleaseImage(&resize_imgEffect);
	cvReleaseImage(&resize_mascara);
	*/
}
Example #30
0
//static method for handling mouse events 
void Image_OP::my_Mouse_Handler(int events, int x, int y, int flags, void* param)
{       
	    
    	IplImage *img_orig;	
		// Operations are done on a cloned image
		// so original settings can be restored 
		// if operations need to be repeated
        IplImage *img_clone;
		img_orig = (IplImage*) param;
		int x_ROI =0, y_ROI =0 , wi_ROI =0, he_ROI =0; 
		
   switch(events)
   {//begin switch
	     // event, when user presses left button 
         case CV_EVENT_LBUTTONDOWN:
           { 
             // saves mouse coordinates (x,y) in static (CvPoint) variable 
			 // (by typecasting with cvPoint)
             my_point = cvPoint(x, y);
		   }
		 break;
    
         // user moves the mouse
         case CV_EVENT_MOUSEMOVE:
	       {
		   // user moves mouse and presses left button
	       if (flags == CV_EVENT_FLAG_LBUTTON )
              {
		       // makes a copy of original image 
			   // over which the mouse moves 
               img_clone = cvCloneImage(img_orig);
       
			   // draws green (see CV_RGB(0-255,0-255,0-255) rectangle 
			   // using point coordinates from 
			   // CV_EVENT_LBUTTONDOWN as origin (= upper left corner)
			   // and coordinates of this event as bottom right corner
               cvRectangle(img_clone,my_point,cvPoint(x , y ),
				    CV_RGB(0,255, 0),1, 8, 0);
			   // shows image with rectangle drawn on it
               cvShowImage("choose area", img_clone);
              }
	       }
         break;

         // user releases left button 
         case CV_EVENT_LBUTTONUP: 
	       {
	  
            img_clone = cvCloneImage(img_orig);

			// checks position of starting point
			// stored in CV_EVENT_LBUTTONWDOWN)
			// in relation to end point, in order
			// to avoid negative values

			if(my_point.x > x)
			{
			  x_ROI = x;
			  wi_ROI = my_point.x -x;
			}
			else 
			{
			  x_ROI = my_point.x;
			  wi_ROI = x - my_point.x ;
			}

			if(my_point.y > y)
			{
			  y_ROI = y;
			  he_ROI = my_point.y -y;
			}
			else 
			{
			  y_ROI = my_point.y;
			  he_ROI = y - my_point.y ;
			}

			//stores coordinates of Region of Interest
			// in static variable
			my_ROI.x = x_ROI;
			my_ROI.y = y_ROI;
			my_ROI.width = wi_ROI;
			my_ROI.height = he_ROI;

            // set region of interest; for cvNot operation below
            cvSetImageROI(img_clone,cvRect(x_ROI,
                y_ROI,wi_ROI, he_ROI));
       
		    // inverts color information of image (bitwise inversion of array)
		    // makes selected area clearly visible
            cvNot(img_clone, img_clone);    
            
			// resets region of interest
			cvResetImageROI(img_clone);

			cvShowImage("choose area", img_clone);
			
            
	        }
	     break;
     }//end switch
	
}//end mousehandler