コード例 #1
0
static GstFlowReturn gst_retinex_transform_ip(GstBaseTransform * btrans, GstBuffer * gstbuf) 
{
  GstRetinex *retinex = GST_RETINEX (btrans);

  GST_RETINEX_LOCK (retinex);

  //////////////////////////////////////////////////////////////////////////////
  // get image data from the input, which is RGBA or BGRA
  retinex->pFrame->imageData = (char*)GST_BUFFER_DATA(gstbuf);
  cvSplit(retinex->pFrame, 
          retinex->ch1, retinex->ch2, retinex->ch3, NULL        );
  cvMerge(retinex->ch1, retinex->ch2, retinex->ch3, NULL, 
          retinex->pFrame2);

  double sigma = 14.0;
  int gain = 128;
  int offset = 128;
  Retinex( retinex->pFrame2, sigma, gain, offset );
  cvSplit(retinex->pFrame2,  retinex->ch1, retinex->ch2, retinex->ch3, NULL);

  //////////////////////////////////////////////////////////////////////////////
  // restore alpha channel from input
  cvMerge(retinex->ch1, retinex->ch2, retinex->ch3, retinex->pFrameA, 
          retinex->pFrame);
  

  GST_RETINEX_UNLOCK (retinex);  
  
  return GST_FLOW_OK;
}
コード例 #2
0
ファイル: cvUtilProCam.cpp プロジェクト: Pacmanfan/MultiScan
// Shade a grayscale image using the "winter" colormap (similar to Matlab's). 
void colorizeWinter(IplImage* src, IplImage*& dst, IplImage* mask){

	// Create an increasing linear-ramp in the green channel.
	cvMerge(NULL, src, NULL, NULL, dst);

	// Create a decreasing linear-ramp in the blue channel.
	IplImage* blue = cvCloneImage(src);
	cvSubRS(src, cvScalar(255.0), blue, mask);
	cvMerge(blue, NULL, NULL, NULL, dst);
	
	// Release allocated resources.
	cvReleaseImage(&blue);
}
コード例 #3
0
ファイル: bmodel.cpp プロジェクト: grafin/Diplom
void BModel::wiener_filter(const int r, const double sigma, const double S)
{
    IplImage *reKernel = cvCreateImage(cvGetSize(_tempImageSrc), IPL_DEPTH_64F, 1);
    IplImage *image = cvCreateImage(cvGetSize(_tempImageSrc), IPL_DEPTH_64F, 4);
    IplImage *reRImage = cvCreateImage(cvGetSize(_tempImageSrc), IPL_DEPTH_64F, 1);
    IplImage *reGImage = cvCreateImage(cvGetSize(_tempImageSrc), IPL_DEPTH_64F, 1);
    IplImage *reBImage = cvCreateImage(cvGetSize(_tempImageSrc), IPL_DEPTH_64F, 1);

    IplImage *kernel = cvCreateImage(cvGetSize(_tempImageSrc), IPL_DEPTH_64F, 2);
    IplImage *rImage = cvCreateImage(cvGetSize(_tempImageSrc), IPL_DEPTH_64F, 2);
    IplImage *gImage = cvCreateImage(cvGetSize(_tempImageSrc), IPL_DEPTH_64F, 2);
    IplImage *bImage = cvCreateImage(cvGetSize(_tempImageSrc), IPL_DEPTH_64F, 2);

    IplImage *imaginary = cvCreateImage(cvGetSize(_tempImageSrc), IPL_DEPTH_64F, 1);

    cvZero(imaginary);
    cvZero(reKernel);
    create_kernel(r, sigma, reKernel);
    cvConvertScale(_tempImageSrc, image, 1/255.);
    cvSplit(image, reRImage, reGImage, reBImage, 0);

    cvMerge(reKernel, imaginary, 0, 0, kernel);
    cvMerge(reRImage, imaginary, 0, 0, rImage);
    cvMerge(reGImage, imaginary, 0, 0, gImage);
    cvMerge(reBImage, imaginary, 0, 0, bImage);

    wiener_filter_chanel(rImage, kernel, S);
    cvSplit(rImage, reRImage, imaginary, 0, 0);
    wiener_filter_chanel(gImage, kernel, S);
    cvSplit(gImage, reGImage, imaginary, 0, 0);
    wiener_filter_chanel(bImage, kernel, S);
    cvSplit(bImage, reBImage, imaginary, 0, 0);

    cvMerge(reRImage, reGImage, reBImage, 0, image);
    cvConvertScale(image, _tempImageDst, 255);
    remap_image(_tempImageDst, -r);
    change_filt_image();
    create_temp_image(_srcImage);

    cvReleaseImage(&reKernel);
    cvReleaseImage(&image);
    cvReleaseImage(&reRImage);
    cvReleaseImage(&reGImage);
    cvReleaseImage(&reBImage);
    cvReleaseImage(&kernel);
    cvReleaseImage(&rImage);
    cvReleaseImage(&gImage);
    cvReleaseImage(&bImage);
    cvReleaseImage(&imaginary);
}
コード例 #4
0
void adjustHSV(IplImage *&src, int HuePosition, int SaturationPosition, int ValuePosition)
{
	int Hue = HuePosition;
	double Saturation = SaturationPosition * 2.55;
	double Value = ValuePosition / 100.;

	//create float image
	IplImage *temp = cvCreateImage(cvGetSize(src), IPL_DEPTH_32F, src->nChannels);
	cvConvertScale(src, temp, 1.0/255.0, 0);
	//split 
	IplImage* floatingH = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1 );
	IplImage* floatingS = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1 );
	IplImage* floatingV = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1 );
	cvCvtColor(temp, temp, CV_BGR2HSV);//color convert
	cvSplit( temp, floatingH, floatingS, floatingV, NULL);

	//adjust
	cvAddS(floatingH, cvScalarAll(Hue), floatingH);
	cvAddS(floatingV, cvScalarAll(Value), floatingV);

	//merge
	cvZero(temp);
	cvMerge(floatingH, floatingS, floatingV, NULL, temp);
	cvCvtColor(temp, temp, CV_HSV2BGR);

	//save
	cvConvertScale( temp, src, 255, 0 );

	IplImage *HSV = convertImageRGBtoHSV(src);
	IplImage *H = cvCreateImage(cvGetSize(src), src->depth, 1);
	IplImage *S = cvCreateImage(cvGetSize(src), src->depth, 1);
	IplImage *V = cvCreateImage(cvGetSize(src), src->depth, 1);

	cvSplit(HSV, H, S, V, 0);
	cvAddS(S, cvScalarAll(Saturation), S);

	cvMerge(H, S, V, 0, HSV);

	cvReleaseImage(&src);
	src = convertImageHSVtoRGB(HSV);

	cvReleaseImage(&HSV);
	cvReleaseImage(&H);
	cvReleaseImage(&S);
	cvReleaseImage(&V);
	cvReleaseImage(&temp);
	cvReleaseImage(&floatingH);
	cvReleaseImage(&floatingS);
	cvReleaseImage(&floatingV);
}//end HSV
コード例 #5
0
IplImage* crop(IplImage* src, CvPoint c, int r){
	IplImage* res, * roi;
 
    //src = cvLoadImage("x.jpg", 1);
    res = cvCreateImage(cvGetSize(src), 8, 3);
    roi = cvCreateImage(cvGetSize(src), 8, 1);
 
    /* prepare the 'ROI' image */
    cvZero(roi);
 
    /* Note that you can use any shape for the ROI */
    cvCircle(
        roi,
        c,
        r,
        CV_RGB(255, 255, 255),
        -1, 8, 0
    );
 
    /* extract subimage */
    cvAnd(src, src, res, roi);
 
 
    /* 'restore' subimage */
    IplImage* roi_C3 = cvCreateImage(cvGetSize(src), 8, 3);
    cvMerge(roi, roi, roi, NULL, roi_C3);
    cvAnd(res, roi_C3, res, NULL);
 
    return res;
 
   
}
コード例 #6
0
void equalize_image(IplImage *img)
{
	IplImage *b = NULL;
	IplImage *g = NULL;
	IplImage *r = NULL;

	if (img->nChannels == 1)
	{
		cvEqualizeHist(img, img);
	}
	else if (img->nChannels == 3)
	{
		b = cvCreateImage(cvGetSize(img), 8, 1);
		g = cvCreateImage(cvGetSize(img), 8, 1);
		r = cvCreateImage(cvGetSize(img), 8, 1);

		cvSplit(img, b, g, r, NULL);

		cvEqualizeHist(b,b);
		cvEqualizeHist(g,g);
		cvEqualizeHist(r,r);

		cvMerge(b,g,r,NULL,img);

		cvReleaseImage(&b);
		cvReleaseImage(&g);
		cvReleaseImage(&r);
	}
}
コード例 #7
0
enum YUV_ReturnValue
	YUV_read(struct YUV_Capture *cap)
{
		size_t bytes_read;
		size_t npixels;

		npixels = cap->width*cap->height;
		bytes_read = fread(cap->y->imageData, sizeof(uint8_t), npixels, cap->fin);
		if (bytes_read == 0)
			return YUV_EOF;
		else if (bytes_read != npixels)
			return YUV_IO_ERROR;
		bytes_read = fread(cap->cb_half->imageData, sizeof(uint8_t), npixels / 4, cap->fin);
		if (bytes_read != npixels / 4)
			return YUV_IO_ERROR;

		bytes_read = fread(cap->cr_half->imageData, sizeof(uint8_t), npixels / 4, cap->fin);
		if (bytes_read != npixels / 4)
			return YUV_IO_ERROR;

		cvResize(cap->cb_half, cap->cb, CV_INTER_CUBIC);
		cvResize(cap->cr_half, cap->cr, CV_INTER_CUBIC);
		cvMerge(cap->y, cap->cr, cap->cb, NULL, cap->ycrcb);

		return YUV_OK;
	}
コード例 #8
0
ファイル: main.c プロジェクト: ntavish/tri
void draw(int dummy)
{
	//delaunay
	cvClearMemStorage(storage);
	subdiv=cvCreateSubdivDelaunay2D(rect,trianglestore);

	blur(origV, out);
	SWAP(in,out);
	thresh(in, out);
	findContours(out, storage, &contours);

	cvMerge(origH, origS, out, NULL, temp);
	cvCvtColor( temp, temp, CV_HSV2RGB );

	every_contour(contours, temp);
	drawContour(temp, contours);
	SWAP(in,out);

	draw_subdiv(temp,subdiv, cvScalar(255,255,255,255));
		if(k==0)
		{
		cvNot(in,out);
		//k=1;
		}
		else{}
	cvClearMemStorage(trianglestore);
	//findcorners(origH,out);   //needs 32bit float image

	cvShowImage(OUT, temp);
}
コード例 #9
0
bool _stdcall customizeImage(LPWSTR csInputPath, LPWSTR csOutputPath, int *values, bool isGray)
{
	char inputPath[SIZE] = "";
	WideCharToMultiByte(950, 0, csInputPath, -1, inputPath, SIZE, NULL, NULL);//wchar_t * to char
	char outputPath[SIZE] = "";
	WideCharToMultiByte(950, 0, csOutputPath, -1, outputPath, SIZE, NULL, NULL);//wchar_t * to char *

	IplImage *img = cvLoadImage(inputPath, 1);

	if(!img)
		return false;
	else
	{
		if(isGray)
		{
			IplImage *gray = cvCreateImage(cvGetSize(img), img->depth, 1);
			cvCvtColor(img, gray, CV_BGR2GRAY);
			cvMerge(gray, gray, gray, 0, img);
			cvReleaseImage(&gray);
		}//end if

		adjustRGB(img, values[0], values[1], values[2]);
		adjustHSV(img, values[3], values[4], values[5]);
		adjustBrightnessContrast(img, values[6], values[7]);

		cvSaveImage(outputPath, img);
		cvReleaseImage(&img);

		return true;
	}//end else

	return false;
}//end combineImage
コード例 #10
0
IplImage *ocv_histogramac(IplImage *image) {
	if (!image) { present(1, "!image"); return NULL; }

	IplImage *iplImageOut = NULL;
	if (image->nChannels == 1) {
		iplImageOut = ocv_histogramac1(image);
	} else if (image->nChannels == 3) {
		IplImage *iplImgIR = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
		IplImage *iplImgIG = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
		IplImage *iplImgIB = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
		cvSplit(image, iplImgIR, iplImgIG, iplImgIB, NULL);

		IplImage *iplImgOR = ocv_histogramac1(iplImgIR);
		IplImage *iplImgOG = ocv_histogramac1(iplImgIG);
		IplImage *iplImgOB = ocv_histogramac1(iplImgIB);
		cvReleaseImage(&iplImgIR);
		cvReleaseImage(&iplImgIG);
		cvReleaseImage(&iplImgIB);

		iplImageOut = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 3);
		cvMerge(iplImgOR, iplImgOG, iplImgOB, NULL, iplImageOut);
		cvReleaseImage(&iplImgOR);
		cvReleaseImage(&iplImgOG);
		cvReleaseImage(&iplImgOB);
	}
	return iplImageOut;
}
コード例 #11
0
ファイル: LightSet.cpp プロジェクト: cJeek/homeServiceRobot
/// <summary>
///光照归一化处理
///1.转换色彩空间到HSV空间;
///2.把HSV空间的V值设置为固定的值IlluminationThreshold;
///3.再从HSV空间转换到RGB空间;
/// </summary>
void CLightSet::LightNormalization(IplImage* src, IplImage* dst, int threshold){
	ASSERT(src->nChannels==3);
	//转换色彩空间
	cvCvtColor(src,dst,CV_RGB2HSV);
	//分离通道
	IplImage* imgChannel[3] = { 0, 0, 0 };  

	for (int i=0;i<dst->nChannels;i++)
	{
		imgChannel[i] = cvCreateImage(cvGetSize( dst ), IPL_DEPTH_8U, 1);  //要求单通道图像才能直方图均衡化  
	}

	cvSplit(dst, imgChannel[0], imgChannel[1], imgChannel[2],0);//HSVA  

	CvScalar avg=cvAvg(imgChannel[2]);
	cvCvtScale(imgChannel[2],imgChannel[2],1.0,threshold-avg.val[0]);
	cvMerge( imgChannel[0], imgChannel[1], imgChannel[2], 0, src );  		

	cvCvtColor(dst,dst,CV_HSV2RGB);

	for (int i=0;i<dst->nChannels;i++)
	{
		cvReleaseImage(&imgChannel[i] ); 
	}
}
コード例 #12
0
Mat initModify::histogramEqualization(Mat & sourceImage) {
	// change the Mat to IplImage, which can make whole process quicker.
	IplImage * src;
	src = &IplImage(sourceImage);
	IplImage * imgChannel[4] = { 0, 0, 0, 0 };
	IplImage * dist = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 3);

	if (src) {
		for (int i = 0; i < src->nChannels; i++) {
			imgChannel[i] = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
		}

		// split all the channels (R, G, B, A)
		cvSplit(src, imgChannel[0], imgChannel[1], imgChannel[2], imgChannel[3]);
		for (int i = 0; i < dist->nChannels; i++) {
			cvEqualizeHist(imgChannel[i], imgChannel[i]);
		}
		// merge all the channels
		cvMerge(imgChannel[0], imgChannel[1], imgChannel[2], imgChannel[3], dist);
		Mat resultImage = cvarrToMat(dist, true);
		cvReleaseImage(&dist);
		return resultImage;
	}
	else {
		return Mat();
	}
}
コード例 #13
0
ファイル: achesscorners.cpp プロジェクト: JackJone/opencv
void show_points( IplImage* gray, CvPoint2D32f* u, int u_cnt, CvPoint2D32f* v, int v_cnt,
                  CvSize etalon_size, int was_found )
{
    CvSize size;
    int i;

    cvGetImageRawData( gray, 0, 0, &size );
    
    IplImage* rgb = cvCreateImage( size, 8, 3 );
    cvMerge( gray, gray, gray, 0, rgb );

    if( v )
    {
        for( i = 0; i < v_cnt; i++ )
        {
            cvCircle( rgb, cvPoint(cvRound(v[i].x), cvRound(v[i].y)), 3, CV_RGB(255,0,0), CV_FILLED);
        }
    }

    if( u )
    {
        for( i = 0; i < u_cnt; i++ )
        {
            cvCircle( rgb, cvPoint(cvRound(u[i].x), cvRound(u[i].y)), 3, CV_RGB(0,255,0), CV_FILLED);
        }
    }

    cvDrawChessboardCorners( rgb, etalon_size, v, v_cnt, was_found );

    cvvNamedWindow( "test", 0 );
    cvvShowImage( "test", rgb );

    cvvWaitKey(0);
}
コード例 #14
0
IplImage* Panoramic::GetHsvFeature(IplImage* src,int H,int S,int V,int Scale ,int Scale_1 ,int Scale_2)
{
	IplImage *colorImg	  = cvCreateImage(cvGetSize(src),8,3);
    IplImage *hsvImg	  = cvCreateImage(cvGetSize(src),8,3);
	cvCopy(src,colorImg);
	IplImage *Plane_1	  = cvCreateImage( cvGetSize(colorImg), 8, 1);//H plane
    IplImage *Plane_2	  = cvCreateImage( cvGetSize(colorImg), 8, 1);//S plane
	IplImage *Plane_3	  = cvCreateImage( cvGetSize(colorImg), 8, 1);//V plane
	IplImage *dst	      = cvCreateImage( cvGetSize(src),8,1);
	cvCvtColor(colorImg,hsvImg,CV_BGR2HSV);
	cvCvtPixToPlane( hsvImg, Plane_1, Plane_2, Plane_3, 0 );

	cvEqualizeHist(Plane_2,Plane_2);//s_plane
	cvEqualizeHist(Plane_3,Plane_3);//v_plane
	cvMerge(Plane_1,Plane_2,Plane_3,0,hsvImg);
	cvInRangeS(hsvImg, cvScalar(H,S, V), cvScalar(5*Scale+H,5*Scale_1+S,5*Scale_2+V), dst);//cvScalar(0,40, 40), cvScalar(60, 170, 255)
	cvErode(dst,dst,0,2);

	/*cvNamedWindow("HSV_ROI",0);
	cvShowImage ("HSV_ROI",dst);*/

	cvReleaseImage(&colorImg);
	cvReleaseImage(&hsvImg);
	cvReleaseImage(&Plane_1);
	cvReleaseImage(&Plane_2);
	cvReleaseImage(&Plane_3);

	return dst;
}
コード例 #15
0
void imageturn(Mat imagein,Mat &imageout)//将rgb转化hsv,然后改变hsv图像。
{
IplImage imgtemp=imagein;
IplImage* src =cvCloneImage(&imgtemp);
IplImage* floathsv = NULL;
IplImage* floatimgH = NULL;
IplImage* floatimgS = NULL;
IplImage* floatimgV = NULL;
IplImage* floatimgZ = NULL;
CvSize size = cvGetSize( src );
IplImage* imgout=cvCreateImage( size, 8, 3 );
floathsv = cvCreateImage( size, 8, 3 );
floatimgH = cvCreateImage( size, 8, 1 );
floatimgS = cvCreateImage( size,8, 1 );
floatimgV = cvCreateImage( size, 8, 1 );
floatimgZ = cvCreateImage( size, 8, 1 );
cvCvtColor(src,floathsv,CV_BGR2HSV);
cvSplit( floathsv, floatimgH, floatimgS, floatimgV, NULL);
Mat V(floatimgV,true);
Mat Vout;
sharpen(V,Vout);
imgtemp=Vout;
floatimgV=cvCloneImage(&imgtemp);
cvSmooth(floatimgV, floatimgZ, CV_MEDIAN);//中值滤波
cvEqualizeHist( floatimgZ, floatimgV ); //直方图均衡化
cvMerge( floatimgH, floatimgS, floatimgV,0,imgout);
Mat M(imgout,true);
imageout=M;
}
コード例 #16
0
ファイル: toanaglyph.cpp プロジェクト: veter-team/cockpit
IplImage* 
toAnaglyph(IplImage *imgLeft, IplImage *imgRight)
{
  IplImage *iplReturn;

  IplImage *l_R, * l_G, *l_B;
  IplImage *r_R, * r_G, *r_B;

  iplReturn = cvCreateImage(cvGetSize(imgLeft), 
                            imgLeft->depth, 
                            imgLeft->nChannels);

  l_R = cvCreateImage(cvGetSize(imgLeft), imgLeft->depth, 1);
  l_G = cvCreateImage(cvGetSize(imgLeft), imgLeft->depth, 1);
  l_B = cvCreateImage(cvGetSize(imgLeft), imgLeft->depth, 1);
  r_R = cvCreateImage(cvGetSize(imgLeft), imgLeft->depth, 1);
  r_G = cvCreateImage(cvGetSize(imgLeft), imgLeft->depth, 1);
  r_B = cvCreateImage(cvGetSize(imgLeft), imgLeft->depth, 1);

  cvSplit(imgLeft, l_R, l_G, l_B, NULL);
  cvSplit(imgRight, r_R, r_G, r_B, NULL);

  //cvMerge(r_R, r_G, l_B, NULL, iplReturn);
  cvMerge(r_R, l_G, l_B, NULL, iplReturn);

  cvReleaseImage(&l_R);
  cvReleaseImage(&l_G);
  cvReleaseImage(&l_B);
  cvReleaseImage(&r_R);
  cvReleaseImage(&r_G);
  cvReleaseImage(&r_B);

  return iplReturn;
}
コード例 #17
0
ファイル: ocv_histogram.c プロジェクト: changeyourdestiny/DIP
IplImage *ocv_histogram(IplImage *image) {
	if (!image) { present(1, "!image"); return NULL; }
#if 1
	size_t binsCount = 0;
	size_t *bins = NULL;
	calcularHistograma(image, &binsCount, &bins);
	IplImage *subimage = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 3);
	graficarHistograma(subimage, binsCount, bins);
	cvReleaseImage(&subimage); //return subimage;
#endif
	IplImage *iplImageOut = NULL;
	if (image->nChannels == 1) {
		iplImageOut = ocv_histogram1(image);
	} else if (image->nChannels == 3) {
		IplImage *iplImgIR = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
		IplImage *iplImgIG = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
		IplImage *iplImgIB = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
		cvSplit(image, iplImgIR, iplImgIG, iplImgIB, NULL);

		IplImage *iplImgOR = ocv_histogram1(iplImgIR);
		IplImage *iplImgOG = ocv_histogram1(iplImgIG);
		IplImage *iplImgOB = ocv_histogram1(iplImgIB);
		cvReleaseImage(&iplImgIR);
		cvReleaseImage(&iplImgIG);
		cvReleaseImage(&iplImgIB);

		iplImageOut = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 3);
		cvMerge(iplImgOR, iplImgOG, iplImgOB, NULL, iplImageOut);
		cvReleaseImage(&iplImgOR);
		cvReleaseImage(&iplImgOG);
		cvReleaseImage(&iplImgOB);
	}
	return iplImageOut;
}
コード例 #18
0
ファイル: csiopencv.c プロジェクト: JMarple/RPi_OpenCV
/* This will output a frame that can be used for OpenCV */
IplImage* cvQueryRPiFrame(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
{
	MMAL_BUFFER_HEADER_T *new_buffer;
	PORT_USERDATA *pData = (PORT_USERDATA *)port->userdata;

	if (pData)
	{	 
		if (buffer->length)
		{
			/*Convert buffer to RGB IplImage*/
						
			mmal_buffer_header_mem_lock(buffer);

			int w=pData->pstate->width;	// get image size
			int h=pData->pstate->height;
			int h4=h/4;
			
			memcpy(yCSI_CAM->imageData,buffer->data,w*h);
			memcpy(uCSI_CAM->imageData,buffer->data+w*h,w*h4);
			memcpy(vCSI_CAM->imageData,buffer->data+w*h+w*h4,w*h4);
			
			cvResize(uCSI_CAM, uCSI_CAM_BIG, CV_INTER_NN);
			cvResize(vCSI_CAM, vCSI_CAM_BIG, CV_INTER_NN);  //CV_INTER_LINEAR looks better but it's slower
			cvMerge(yCSI_CAM, uCSI_CAM_BIG, vCSI_CAM_BIG, NULL, CSI_CAM_IMAGE);
	
			cvCvtColor(CSI_CAM_IMAGE,CSI_CAM_DSTIMAGE,CV_YCrCb2RGB);	// convert in RGB color space (slow)
			
			mmal_buffer_header_mem_unlock(buffer);		  
		 }
		 else 
		 {
			 vcos_log_error("buffer null");
		 }
      
   }
   else
   {
      vcos_log_error("Received a encoder buffer callback with no state");
   }
   
   // release buffer back to the pool
   mmal_buffer_header_release(buffer);

   // and send one back to the port (if still open)
   if (port->is_enabled)
   {
      MMAL_STATUS_T status;

      new_buffer = mmal_queue_get(pData->pstate->video_pool->queue);

      if (new_buffer)
         status = mmal_port_send_buffer(port, new_buffer);

      if (!new_buffer || status != MMAL_SUCCESS)
         vcos_log_error("Unable to return a buffer to the encoder port");
   }
   
   return CSI_CAM_DSTIMAGE;
}
コード例 #19
0
void ImageComponentsRGB2Image(ImageComponents *image_componentes)
{
	cvMerge(image_componentes->img[0],
			image_componentes->img[0],
			image_componentes->img[0],
			NULL,
			image_componentes->image);
}
コード例 #20
0
//
//	コーナーを検出する
//
//	引数:
//      frameImage : キャプチャ画像用IplImage
//      grayImage  : グレースケール画像用IplImage
//      corners    : コーナーの位置を格納する変数
//
//	戻り値:
//		0   : コーナーがすべて検出できなかった場合
//		非0 : コーナーがすべて検出された場合
//
int findCorners( IplImage *frameImage, IplImage *grayImage, CvPoint2D32f *corners ) {
	int cornerCount;				//	検出したコーナーの数
	int findChessboardCornersFlag;	//	cvFindChessboardCorners用フラグ
	int findFlag;					//	コーナーがすべて検出できたかのフラグ
	
	IplImage* m_image_binary;
	IplImage* m_set_image;
	
	m_image_binary     = cvCreateImage(cvSize(frameImage->width, frameImage->height), IPL_DEPTH_8U, 1);
	m_set_image     = cvCreateImage(cvSize(frameImage->width, frameImage->height), IPL_DEPTH_8U, 3);

	//	cvChessboardCorners用フラグを生成する
	findChessboardCornersFlag = createFindChessboardCornersFlag();
	
	// 画像をBinaryImageとして変換する。
	//	コーナーを検出する
	cvCvtColor( frameImage, grayImage, CV_BGR2GRAY );

    //	グレースケールから2値に変換する
    cvThreshold( grayImage, m_image_binary, 128, 255, CV_THRESH_BINARY );

    // Convert to 3channel image
    cvMerge(m_image_binary, m_image_binary, m_image_binary, NULL, m_set_image);

	findFlag=cvFindChessboardCorners(
		m_set_image,
		//m_set_image,
		//cvSize( CORNER_WIDTH, CORNER_HEIGHT ),
		board_sz,
		corners,
		&cornerCount,
		findChessboardCornersFlag
	);
	
	if( findFlag != 0 ) {
		//	コーナーがすべて検出された場合
		//	検出されたコーナーの位置をサブピクセル単位にする

		CvTermCriteria criteria={ CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, MAX_ITERATIONS, EPSILON };
		cvFindCornerSubPix(
			grayImage,
			corners,
			cornerCount,
			cvSize( SEARCH_WINDOW_HALF_WIDTH, SEARCH_WINDOW_HALF_HEIGHT ),
			cvSize( DEAD_REGION_HALF_WIDTH, DEAD_REGION_HALF_HEIGHT ), 
			cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, MAX_ITERATIONS, EPSILON )
		);
	}
		
	//	コーナーの位置を描く
	cvDrawChessboardCorners( frameImage, board_sz, corners, cornerCount, findFlag );
	
	cvReleaseImage(&m_set_image);
	cvReleaseImage(&m_image_binary);
	
	return findFlag;
}
コード例 #21
0
ファイル: laplace.c プロジェクト: glo/ee384b
int main( int argc, char** argv )
{
    IplImage* laplace = 0;
    IplImage* colorlaplace = 0;
    IplImage* planes[3] = { 0, 0, 0 };
    CvCapture* capture = 0;

    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromAVI( argv[1] );

    if( !capture )
    {
        fprintf(stderr,"Could not initialize capturing...\n");
        return -1;
    }

    cvNamedWindow( "Laplacian", 0 );

    for(;;)
    {
        IplImage* frame = 0;
        int i;

        frame = cvQueryFrame( capture );
        if( !frame )
            break;

        if( !laplace )
        {
            for( i = 0; i < 3; i++ )
                planes[i] = cvCreateImage( cvSize(frame->width,frame->height), 8, 1 );
            laplace = cvCreateImage( cvSize(frame->width,frame->height), IPL_DEPTH_16S, 1 );
            colorlaplace = cvCreateImage( cvSize(frame->width,frame->height), 8, 3 );
        }

        cvSplit( frame, planes[0], planes[1], planes[2], 0 );
        for( i = 0; i < 3; i++ )
        {
            cvLaplace( planes[i], laplace, 3 );
            cvConvertScaleAbs( laplace, planes[i], 1, 0 );
        }
        cvMerge( planes[0], planes[1], planes[2], 0, colorlaplace );
        colorlaplace->origin = frame->origin;

        cvShowImage("Laplacian", colorlaplace );

        if( cvWaitKey(10) >= 0 )
            break;
    }

    cvReleaseCapture( &capture );
    cvDestroyWindow("Laplacian");

    return 0;
}
コード例 #22
0
bool _stdcall customizeFilter(LPWSTR csInputPath, LPWSTR csOutputPath, LPWSTR csSettings)
{
	char inputPath[SIZE] = "";
	WideCharToMultiByte(950, 0, csInputPath, -1, inputPath, SIZE, NULL, NULL);//wchar_t * to char
	char outputPath[SIZE] = "";
	WideCharToMultiByte(950, 0, csOutputPath, -1, outputPath, SIZE, NULL, NULL);//wchar_t * to char *
	char settingsPath[SIZE] = "";
	WideCharToMultiByte(950, 0, csSettings, -1, settingsPath, SIZE, NULL, NULL);//wchar_t * to char *

	IplImage *img = cvLoadImage(inputPath, 1);

	if(!img)
		return false;
	else
	{
		FILE *in = fopen(settingsPath, "r+");
		if(!in)
			return false;
		int values[9];
		memset(values, 1, 9);
		char data[SIZE];
		//read data
		int i;
		//consider c# newline is \n\r
		for(i = 0 ; !feof(in) && i < SIZE ; i++)
			data[i] = fgetc(in);
		data[i] = 0;
		fclose(in);
		//skip filterName
		char *token = strtok(data, "\r\n");
		for(int i = 0 ; i < sizeof(values)/sizeof(*values) && token ; i++)
		{
			token = strtok(NULL, " ");
			values[i] = atoi(token);
		}//end for

		if(values[8] == 1)
		{
			IplImage *gray = cvCreateImage(cvGetSize(img), img->depth, 1);
			cvCvtColor(img, gray, CV_BGR2GRAY);
			cvMerge(gray, gray, gray, 0, img);
			cvReleaseImage(&gray);
		}//end if isGray

		adjustRGB(img, values[0], values[1], values[2]);
		adjustHSV(img, values[3], values[4], values[5]);
		adjustBrightnessContrast(img, values[6], values[7]);

		cvSaveImage(outputPath, img);
		cvReleaseImage(&img);

		return true;
	}//end else

	return false;
}//end customizeFilter
コード例 #23
0
Mat ImageAnalysis::getColoredAreas(){
	if( !m_areaCol_ok ) genColoredAreas();


	IplImage gray = m_depthf;
	IplImage rgb = m_rgb;
	cvMerge(&gray, &gray, &gray, NULL, &rgb);
	addWeighted(m_rgb,0.5f,m_areaCol,0.5f,0,m_rgb);
	return m_rgb;
}
コード例 #24
0
// parameters:
//  img - input video frame
//  dst - resultant motion picture
//  args - optional parameters
static void  update_mhi( IplImage* img, IplImage* dst, int diff_threshold, int frameCount){
	if(DEBUG){
		std::cout << "- UPDATING_MHI" << std::endl;
	}
	double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
	CvSize size = cvSize(img->width,img->height); // get current frame size
	int i, idx1 = last, idx2;
	CvSeq* seq;
	CvRect comp_rect;
	CvRect roi;
	double count;
	double angle;
	CvPoint center;
	double magnitude;
	CvScalar color;

	// Allocate images at the beginning or reallocate them if the frame size is changed
	if( !mhi || mhi->width != size.width || mhi->height != size.height ) {
		if( buf == 0 ) {
			buf = (IplImage**)malloc(N*sizeof(buf[0]));
			memset( buf, 0, N*sizeof(buf[0]));
		}

		for( i = 0; i < N; i++ ) {
			cvReleaseImage( &buf[i] );
			buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
			cvZero( buf[i] );
		}
		cvReleaseImage( &mhi );
		cvReleaseImage( &orient );
		cvReleaseImage( &segmask );
		cvReleaseImage( &mask );

		mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
		cvZero( mhi ); // clear MHI at the beginning
		orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
		segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
		mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
	}

	cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale
	idx2 = (last + 1) % N; // index of (last - (N-1))th frame
	last = idx2;

	silh = buf[idx2];
	cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames

	cvThreshold( silh, silh, diff_threshold, 255, CV_THRESH_BINARY); // and threshold it
	cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI

	// convert MHI to blue 8u image
	cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION );
	cvZero( dst );
	cvMerge( mask, 0, 0, 0, dst );
}
コード例 #25
0
ファイル: RaspiCamCV.c プロジェクト: nettercm/raspicam-opencv
IplImage * raspiCamCvQueryFrame_New(RaspiCamCvCapture * capture, int mode)
{
	//fprintf(stderr,"-");fflush(stderr);

	RASPIVID_STATE * state = capture->pState;

	//fprintf(stderr,"A");fflush(stderr);
	vcos_semaphore_post(&state->capture_sem);
	//fprintf(stderr,"B");fflush(stderr);
	vcos_semaphore_wait(&state->capture_done_sem);
	//fprintf(stderr,"C");fflush(stderr);

	counter2++;
	//printf("video_buffer_callback: %d\tFrames: %d\n", counter, counter2);

	if (state->graymode==0)
	{
		if (mode == 0)
		{
			return state->py;			
		}
		else if (mode == 1)
		{
			cvResize(state->pu, state->pu_big, CV_INTER_NN);
			cvResize(state->pv, state->pv_big, CV_INTER_NN);  //CV_INTER_LINEAR looks better but it's slower
			cvMerge(state->py, state->pu_big, state->pv_big, NULL, state->yuvImage);

			cvCvtColor(state->yuvImage,state->dstImage,CV_YCrCb2RGB);	// convert in RGB color space (slow)
			return state->dstImage;
		}
		else if (mode == 2)
		{
			cvResize(state->pu, state->pu_big, CV_INTER_NN);
			cvResize(state->pv, state->pv_big, CV_INTER_NN);  //CV_INTER_LINEAR looks better but it's slower
			cvMerge(state->py, state->pu_big, state->pv_big, NULL, state->yuvImage);

			//cvCvtColor(state->yuvImage,state->dstImage,CV_YCrCb2RGB);	// convert in RGB color space (slow)
			return state->yuvImage;
		}
	}
	return state->py;
}
コード例 #26
0
ファイル: tracker_helpers.c プロジェクト: Hazer/moveonpc
void th_equalize_image(IplImage* img) {
	//return frame;
	th_create_image(&ch0, cvGetSize(img), img->depth, 1);
	th_create_image(&ch1, cvGetSize(img), img->depth, 1);
	th_create_image(&ch2, cvGetSize(img), img->depth, 1);

	cvSplit(img, ch0, ch1, ch2, 0x0);
	cvEqualizeHist(ch0, ch0);
	cvEqualizeHist(ch1, ch1);
	cvEqualizeHist(ch2, ch2);
	cvMerge(ch0, ch1, ch2, 0x0, img);
}
コード例 #27
0
IplImage* CvCaptureCAM_PvAPI::retrieveFrame(int)
{

    if (PvCaptureWaitForFrameDone(Camera.Handle, &(Camera.Frame), 1000) == ePvErrSuccess) {
		if (!monocrome) {
			cvMerge(grayframe,grayframe,grayframe,NULL,frame); 
			return frame;	
		}	
		return grayframe;
    }		
    else return NULL;		
}
コード例 #28
0
void adjustBrightnessContrast(IplImage *&src, int Brightness, int Contrast)
{
	unsigned char LookupTableData[256];
	CvMat *LookupTableMatrix;
	double Delta;
	double a, b;
	int y;

	IplImage *filterB = cvCreateImage(cvGetSize(src), (src)->depth, 1);
	IplImage *filterG = cvCreateImage(cvGetSize(src), (src)->depth, 1);
	IplImage *filterR = cvCreateImage(cvGetSize(src), (src)->depth, 1);
	cvSplit(src, filterB, filterG, filterR, 0);

	//Brightness/Contrast Formula
	if(Contrast > 0)
	{
		Delta = 127 * Contrast / 100;
		a=255 / (255 - Delta * 2);
		b = a * (Brightness - Delta);
	}
	else
	{
		Delta = -128 * Contrast / 100;
		a = (256 - Delta*2) / 255;
		b = a * Brightness + Delta;
	}

	for(int x = 0 ; x < 256 ; x++)
	{
		y=(int)(a * x + b);
		if(y < 0) y = 0; else if(y > 255) y = 255;

		LookupTableData[x]=(uchar)y;
	}

	LookupTableMatrix = cvCreateMatHeader(1, 256, CV_8UC1);
	cvSetData(LookupTableMatrix, LookupTableData, 0);

	cvLUT(filterB, filterB, LookupTableMatrix);
	cvLUT(filterG, filterG, LookupTableMatrix);
	cvLUT(filterR, filterR, LookupTableMatrix);

	IplImage *dst = cvCreateImage(cvGetSize(src), src->depth, src->nChannels);
	cvMerge(filterB, filterG, filterR, 0, dst);

	cvReleaseImage(&src);
	src = cvCloneImage(dst);
	cvReleaseImage(&dst);
	cvReleaseImage(&filterB);
	cvReleaseImage(&filterG);
	cvReleaseImage(&filterR);
	cvReleaseMat(&LookupTableMatrix);
}//end Brightness/Contrast
コード例 #29
0
ファイル: LightSet.cpp プロジェクト: cJeek/homeServiceRobot
////////////////////////perform fourier transform//////////////////////////////////////////////////
//fft2
// code comes from http://www.opencv.org.cn/
void CLightSet::fft2(IplImage *src,CvMat *dst)
{
	IplImage * realInput;
	IplImage * imaginaryInput;
	IplImage * complexInput;
	int dft_M, dft_N;
	CvMat* dft_A, tmp;
	IplImage * image_Re;
	IplImage * image_Im;

	realInput = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1);
	imaginaryInput = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1);
	complexInput = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 2);

	cvScale(src, realInput, 1.0, 0.0);
	cvZero(imaginaryInput);
	cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

// 	dft_M = cvGetOptimalDFTSize( src->height - 1 );
// 	dft_N = cvGetOptimalDFTSize( src->width - 1 );

	dft_M =src->height;
	dft_N =src->width ;

	dft_A = cvCreateMat( dft_M, dft_N, CV_32FC2 );
	image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_32F, 1);
	image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_32F, 1);

	// copy A to dft_A and pad dft_A with zeros
	cvGetSubRect( dft_A, &tmp, cvRect(0,0, src->width, src->height));
	cvCopy( complexInput, &tmp, NULL );
	if( dft_A->cols > src->width )
	{
		cvGetSubRect( dft_A, &tmp, cvRect(src->width,0, dft_A->cols - src->width, src->height));
		cvZero( &tmp );
	}

	// no need to pad bottom part of dft_A with zeros because of
	// use nonzero_rows parameter in cvDFT() call below

	cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

	cvCopy(dft_A,dst);

	cvReleaseImage(&realInput);
	cvReleaseImage(&imaginaryInput);
	cvReleaseImage(&complexInput);
	cvReleaseImage(&image_Re);
	cvReleaseImage(&image_Im);

}
コード例 #30
0
static GstFlowReturn gst_skin_transform_ip(GstBaseTransform * btrans, GstBuffer * gstbuf) 
{
  GstSkin *skin = GST_SKIN (btrans);

  GST_SKIN_LOCK (skin);

  //////////////////////////////////////////////////////////////////////////////
  // Image preprocessing: color space conversion etc
  // get image data from the input, which is BGR/RGB
  skin->cvRGBA->imageData = (char*)GST_BUFFER_DATA(gstbuf);
  cvCvtColor(skin->cvRGBA, skin->cvRGB, CV_BGRA2BGR);

  //////////////////////////////////////////////////////////////////////////////
  // here goes the bussiness logic
  //////////////////////////////////////////////////////////////////////////////
  ///////////// SKIN COLOUR BLOB FACE DETECTION/////////////////////////////////
  //////////////////////////////////////////////////////////////////////////////
  if( skin->enableskin ) 
  {                                                            
    int display = 1;                                           
    if( METHOD_HSV == skin->method ){ // HSV
      gstskin_find_skin_center_of_mass( skin, display); 
    }
    else if( METHOD_RGB == skin->method ){ // RGB
      gstskin_find_skin_center_of_mass2( skin, display); 
    }
  }                                         
  //////////////////////////////////////////////////////////////////////////////
  // After this we have a RGB Black and white image with the skin, in skin->cvRGB
  // Just copy one channel of the RGB skin, which anyway has just values 255 or 0
  // and save it for later
  cvSplit(skin->cvRGB, skin->chA, NULL, NULL, NULL);

  cvErode( skin->chA, skin->chA, cvCreateStructuringElementEx(3,3, 1,1, CV_SHAPE_RECT,NULL), 1);
  cvDilate(skin->chA, skin->chA, cvCreateStructuringElementEx(3,3, 1,1, CV_SHAPE_RECT,NULL), 2);
  cvErode( skin->chA, skin->chA, cvCreateStructuringElementEx(3,3, 1,1, CV_SHAPE_RECT,NULL), 1);

  // copy the skin output to the alpha channel in the output image
  cvSplit(skin->cvRGBA, skin->ch1, skin->ch2, skin->ch3, NULL);
  cvMerge(skin->ch1, skin->ch2, skin->ch3, skin->chA,    skin->cvRGBA);
 
  //////////////////////////////////////////////////////////////////////////////
  // if we want to display, just overwrite the output
  if( skin->display ){
    cvCvtColor(skin->chA, skin->cvRGBA, CV_GRAY2RGB);
  }

  GST_SKIN_UNLOCK (skin);  
  
  return GST_FLOW_OK;
}