Beispiel #1
0
void Sobel(IplImage *src, IplImage *dx, IplImage *dy)
{
    if (!src || !dx || !dy)
    {
        return;
    }

    cvSobel(src, dx, 1, 0, 1);
    cvSobel(src, dy, 0, 1, 1);

    CvScalar cur;
    for (int i=0;i<src->height;i++)
    {
        for (int j=0;j<src->width;j++)
        {
            cur = cvGet2D(dx,i,j);
            cur.val[0] /= 2.0;
            cvSet2D(dx,i,j,cur);

            cur = cvGet2D(dy,i,j);
            cur.val[0] /= 2.0;
            cvSet2D(dy,i,j,cur);
        }
    }
}
Beispiel #2
0
int main( int argc, char** argv )
{

   IplImage * src = 0;
   IplImage * dst = 0;
   //IplImage * dst_ini = 0;

   char* filename = argc == 2 ? argv[1] : (char*)"lena.bmp";

   // Cargo la imagen
   if( (src = cvLoadImage (filename, CV_LOAD_IMAGE_GRAYSCALE)) == 0 )
	   return -1;

   // Creo una IplImage para cada salida esperada
   if( (dst = cvCreateImage (cvGetSize (src), IPL_DEPTH_8U, 1) ) == 0 )
	   return -1;

   // Creo una IplImage para cada salida esperada
   //if( (dst_ini = cvCreateImage (cvGetSize (src), IPL_DEPTH_8U, 1) ) == 0 )
   //   return -1;

   // Aplico el filtro (Sobel con derivada x en este caso) y salvo imagen 
   cvSobel(src, dst, 1,0,3); 	// Esta parte es la que tienen que programar los alumnos en ASM	y comparar
   cvSaveImage("derivada x.BMP", dst);

   // Aplico el filtro (Sobel con derivada y en esta caso) y salvo imagen 
   cvSobel(src, dst, 0,1,3);    // Esta parte es la que tienen que programar los alumnos en ASM	y comparar
   cvSaveImage("derivada y.BMP", dst);

   return 0;

}
Beispiel #3
0
void ImageProcessorCV::CalculateGradientImageHSV(CByteImage *pInputImage, CByteImage *pOutputImage)
{
	if (pInputImage->width != pOutputImage->width || pInputImage->height != pOutputImage->height ||
		pInputImage->type != CByteImage::eRGB24 || pOutputImage->type != CByteImage::eGrayScale)
		return;

	IplImage *pIplInputImage = IplImageAdaptor::Adapt(pInputImage);
	IplImage *pIplOutputImage = IplImageAdaptor::Adapt(pOutputImage);

	// Determine Gradient Image by Irina Wchter
	// instead of normal norm sqrt(x*x +y*y) use |x|+|y| because it is much faster
	IplImage *singleChannel0 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel1 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel2 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *diff = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
	IplImage *abs = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_8U, 1);
		
	cvCvtPixToPlane(pIplInputImage, singleChannel0, singleChannel1, singleChannel2, NULL);
	
	// calculate gradients on S-channel
	//cvSmooth(singleChannel1, singleChannel1, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel1, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel1, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, pIplOutputImage);
	cvAdd(abs, pIplOutputImage, pIplOutputImage, 0);
	
	// threshold S-channel for creating a maskfor gradients of H-channel
	cvThreshold(singleChannel1, singleChannel1, 60, 255, CV_THRESH_BINARY);
	cvDilate(singleChannel1, singleChannel1);
	
	// calculate gradients on H-channel
	//cvSmooth(singleChannel0, singleChannel0, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel0, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel0, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, singleChannel0);
	cvAdd(abs, singleChannel0, singleChannel0, 0);
	
	// filter gradients of H-channel with mask
	cvAnd(singleChannel0, singleChannel1, singleChannel0);
	
	// combine to gradient images
	cvMax(pIplOutputImage, singleChannel0, pIplOutputImage);
	
	// free memory
	cvReleaseImage(&singleChannel0);
	cvReleaseImage(&singleChannel1);
	cvReleaseImage(&singleChannel2);
	cvReleaseImage(&diff);
	cvReleaseImage(&abs);
	
	cvReleaseImageHeader(&pIplInputImage);
	cvReleaseImageHeader(&pIplOutputImage);
}
Beispiel #4
0
CV_IMPL void cvCalS(const CvArr* srcarr,
                    CvArr* dstarr)
{
    CV_FUNCNAME("cvCalS");
    
    __BEGIN__;
    CvMat sstub, *src;
    CvMat dstub, *dst;
    CvMat* src_dx=0, *src_dy=0;
    CvSize size;
    int i, j;
    int iStep;
    float* fPtr;
    
    CV_CALL( src = cvGetMat(srcarr, &sstub ));
    CV_CALL( dst = cvGetMat(dstarr, &dstub ));
    
    if( CV_MAT_TYPE(src->type) != CV_32FC1)
        CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" );
    
    if( CV_MAT_TYPE(dst->type) != CV_32FC1)
        CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" );
    
    if( !CV_ARE_SIZES_EQ( src, dst ))
        CV_ERROR( CV_StsUnmatchedSizes, "The input images must have the same size" );
    
    size = cvGetMatSize( src );
    
    src_dx  = cvCreateMat(size.height, size.width, CV_32FC1 );
    src_dy  = cvCreateMat(size.height, size.width, CV_32FC1 );
    cvSetZero(src_dx);
    cvSetZero(src_dy);
    
    iStep = dst->step / sizeof(fPtr[0]);
    fPtr = dst->data.fl;
    
    cvSobel(src, src_dx, 1, 0, 1);
    cvSobel(src, src_dy, 0, 1, 1);
    cvMul(src_dx, src_dx, src_dx, 0.25f*0.25f); //rescale gradient
    cvMul(src_dy, src_dy, src_dy, 0.25f*0.25f); //rescale gradient
    cvAdd(src_dx, src_dy, dst);
    
    for(j=0; j<size.height; j++){
        for (i=0; i<size.width; i++)
            fPtr[i+iStep*j] = sqrt(fPtr[i+iStep*j])+SMALLNUM;
    }
    cvReleaseMat(&src_dx);
    cvReleaseMat(&src_dy);
    
    __END__;
}
Beispiel #5
0
void onTrackbar(int)
{
	if(!(aSize & 1)) aSize++;

	if(isColor)
	{
		dst = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
		//create BGR layer and splitt
		IplImage *B = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
		IplImage *G = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
		IplImage *R = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
		IplImage *B16s = cvCreateImage(cvGetSize(img), IPL_DEPTH_16S, 1);
		IplImage *G16s = cvCreateImage(cvGetSize(img), IPL_DEPTH_16S, 1);
		IplImage *R16s = cvCreateImage(cvGetSize(img), IPL_DEPTH_16S, 1);
		cvSplit(img, B, G, R, 0);

		//sobel
		cvSobel(B, B16s, isY, 1 - isY, aSize);
		cvSobel(G, G16s, isY, 1 - isY, aSize);
		cvSobel(R, R16s, isY, 1 - isY, aSize);
		cvConvertScaleAbs(B16s, B, 1, 0);
		cvConvertScaleAbs(G16s, G, 1, 0);
		cvConvertScaleAbs(R16s, R, 1, 0);

		cvMerge(B, G, R, 0, dst);

		cvReleaseImage(&B);
		cvReleaseImage(&G);
		cvReleaseImage(&R);
		cvReleaseImage(&B16s);
		cvReleaseImage(&G16s);
		cvReleaseImage(&R16s);
	}//end if
	else
	{
		dst = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
		IplImage *gray = cvCreateImage(cvGetSize(img), img->depth, 1);
		IplImage *img16s = cvCreateImage(cvGetSize(img), IPL_DEPTH_16S, 1);
		cvCvtColor(img, gray, CV_BGR2GRAY);

		//sobel
		cvSobel(gray, img16s, isY, 1 - isY, aSize);
		cvConvertScaleAbs(img16s, dst, 1, 0);

		cvReleaseImage(&gray);
		cvReleaseImage(&img16s);
	}//end else

	cvShowImage(windowName, dst);
}
Beispiel #6
0
CV_IMPL void cvCurvature(const CvArr* srcarr_x, 
                         const CvArr* srcarr_y,
                         CvArr* dstarr)
{
    CV_FUNCNAME("cvCurvature");
    
    __BEGIN__;
    
    CvMat sstub_x, sstub_y, *src_x, *src_y;
    CvMat dstub, *dst;
    CvSize size;
    CvMat *Nxx=0, *Nyy=0, *ones=0;
    
    CV_CALL( src_x = cvGetMat(srcarr_x, &sstub_x ));
    CV_CALL( src_y = cvGetMat(srcarr_y, &sstub_y ));
    CV_CALL( dst = cvGetMat(dstarr, &dstub ));
    
    if( CV_MAT_TYPE(src_x->type) != CV_32FC1)
        CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" );
    
    if( CV_MAT_TYPE(src_y->type) != CV_32FC1)
        CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" );
    
    if( CV_MAT_TYPE(dst->type) != CV_32FC1)
        CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" );
    
    if( !CV_ARE_SIZES_EQ( src_x, src_y ))
        CV_ERROR( CV_StsUnmatchedSizes, "The input images must have the same size" );
    
    size = cvGetMatSize( src_x );
    Nxx = cvCreateMat(size.height, size.width, CV_32FC1 );
    Nyy = cvCreateMat(size.height, size.width, CV_32FC1 );
    ones= cvCreateMat(size.height, size.width, CV_32FC1 );
    cvSetZero(Nxx);
    cvSetZero(Nyy);
    cvSet(ones, cvScalar(1.0f));
    
    cvSobel(src_x, Nxx, 1, 0, 1);
    cvSobel(src_y, Nyy, 0, 1, 1);
    cvMul(Nxx, ones, Nxx, 0.25f);
    cvMul(Nyy, ones, Nyy, 0.25f);
    cvAdd(Nxx, Nyy, dst);
    cvReleaseMat(&Nxx);
    cvReleaseMat(&Nyy);
    cvReleaseMat(&ones);
    
    __END__;
    
}
Beispiel #7
0
void radial_sample(int width, int height, char* data, IplImage *unwrapped, int slice)
{
	IplImage *cvcast = cvCreateImageHeader(cvSize(width, height),
			IPL_DEPTH_8U, 1);
	cvcast->imageData = data;

	// cvSaveImage("slice.png",cvcast);
	
	CvPoint center = cvPoint(cx,cy);

	unsigned char* linebuf;
	for(int sample = 0; sample < RADIAL_SAMPLES; sample++) {
		float theta = ((float)sample)*((2.0*PI)/(float)RADIAL_SAMPLES);
		CvPoint outer = calc_ray_outer(theta, center);

		// printf("%g:\t%d,%d\n", theta*(180.0/PI), outer.x, outer.y);
		cvClipLine(cvSize(width, height), &outer, &center);
		int linesize = abs(center.x-outer.x)+abs(center.y-outer.y)+1;
		linebuf = (unsigned char*)malloc(linesize);
		cvSampleLine(cvcast,outer,center,linebuf,4);
		
		IplImage *castline = cvCreateImageHeader(cvSize(linesize,1), IPL_DEPTH_8U, 1);
		castline->imageData = (char*)linebuf;

		IplImage *sobel = cvCreateImage(cvSize(linesize,1), IPL_DEPTH_8U, 1);

		cvSobel(castline, sobel, 1, 0, 3);

		int layer = 0;
		for(int i = 0; (i < linesize) && (layer < MAX_LAYERS); i++) {
			// printf(" %d,", (int)cvGetReal1D(sobel,i));
			if((int)cvGetReal1D(sobel,i) > SOBEL_THRESH) {
				int max = 0, max_i = 0;
				for(; i < linesize; i++) {
					int curval = (int)cvGetReal1D(sobel,i);
					if(curval == 0) break;
					if(curval > max) {
						max = curval;
						max_i = i;
					}
				}
				cvSetReal2D(unwrapped,slice,(layer*RADIAL_SAMPLES)+sample,cvGetReal1D(castline,max_i));
				// printf("%d\t",max);
				layer++;
			}
		}
		// printf("\n");
	
		/*	
		char filename[] = "line000.png";
		sprintf(filename,"line%03d.png",(int)(theta*(180.0/PI)));
		cvSaveImage(filename,sobel);
		*/
		
		cvReleaseImageHeader(&castline);
		cvReleaseImage(&sobel);

		free(linebuf);
	}
}
Beispiel #8
0
int main(){
    
    //initialize&make images
    IplImage* src_image = cvLoadImage("/Users/ihong-gyu/MyProject/OpenCVTest/Lena.jpeg",0);
    IplImage* edge_image = cvCreateImage(cvGetSize(src_image), IPL_DEPTH_16S, 1);
    IplImage* dst_image = cvCreateImage(cvGetSize(src_image), IPL_DEPTH_8U, 1);
    
    //create window
    cvNamedWindow("Original Image", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("Edge Image", CV_WINDOW_AUTOSIZE);
    
    //show image
    cvShowImage("Original Image", src_image);
    
    //catch edge by sobel
    cvSobel(src_image,edge_image,1,1,3);
    cvConvertScale(edge_image, dst_image,1,0);
    
    cvShowImage("Edge Image", dst_image);
    
    //wait a key input
    cvWaitKey(0);
    
    //release memory
    cvReleaseImage(&src_image);
    cvReleaseImage(&edge_image);
    cvReleaseImage(&dst_image);
    
    
    return 0;
}
void opencv_image_filter(IplImage* src, IplImage* dst) {
    cvSobel(src, dst, 1, 0);
    cvSubS(dst, cvScalar(50,50,50), src);
    cvScale(src, dst, 2, 0);
    cvErode(dst, src);
    cvDilate(src, dst);
}
void rspfOpenCVSobelFilter::runUcharTransformation(rspfImageData* tile) {
   
	IplImage *input;
	IplImage *output;

	char* bSrc;
	char* bDst;
	
	int nChannels = tile->getNumberOfBands();

	for(int k=0; k<nChannels; k++) {
		printf("Channel %d\n",k);
		input=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
		output=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
		bSrc = static_cast<char*>(tile->getBuf(k));
		input->imageData=bSrc;
		bDst = static_cast<char*>(theTile->getBuf(k));
		output->imageData=bDst;
		IplImage * tmp = cvCreateImage(cvSize(tile->getWidth(),tile->getHeight()),IPL_DEPTH_16S,1);
		cvSobel(input,tmp,theXOrder,theYOrder,theApertureSize); 
		cvConvertScale(tmp,output);
		cvReleaseImageHeader(&input);
		cvReleaseImageHeader(&output);
		cvReleaseImage(&tmp);
	}

	theTile->validate(); 
}
Beispiel #11
0
int main (int argc, char **argv){
  IplImage *src_img, *dst_img, *tmp_img;

  // 画像の読み込み(グレースケールで読み込み)
  if (argc != 2 ||
	  (src_img = cvLoadImage (argv[1], CV_LOAD_IMAGE_GRAYSCALE)) == 0){
	fprintf(stderr,"Usage: $ %s img_file\n",argv[0]);
    return -1;
  }
  
  tmp_img = cvCreateImage(cvGetSize(src_img), IPL_DEPTH_16S, 1);
  dst_img = cvCreateImage(cvGetSize(src_img), IPL_DEPTH_8U,  1);

  // Sobelフィルタによる微分画像の作成
  cvSobel(src_img, tmp_img, 1, 0, 3);
  cvConvertScaleAbs(tmp_img, dst_img,1,0);

  // 画像の表示
  cvNamedWindow(SRC, CV_WINDOW_AUTOSIZE);
  cvShowImage(SRC, src_img);
  cvNamedWindow(DST, CV_WINDOW_AUTOSIZE);
  cvShowImage(DST, dst_img);

  // ユーザ入力待ち
  cvWaitKey(0);

  // 後始末
  cvDestroyWindow(SRC);
  cvDestroyWindow(DST);
  cvReleaseImage(&src_img);
  cvReleaseImage(&dst_img);
  cvReleaseImage(&tmp_img);

  return 0;
}
void FillHoleShiftMap::ComputeShiftMap2(IplImage* input, IplImage* saliency, CvSize output, CvSize shiftSize, MaskShift* maskShift)
{
	try{
		_input = input;
		_shiftSize = shiftSize;

		IplImage* inputGradient = cvCloneImage(input);
		cvSobel(input, inputGradient, 1, 1);

		// first processing the mask (including neighborhood)	
		// ProcessMask();
		ProcessMask(maskShift);

		// setup data cost & smooth cost for masked data
		ForDataFH2 dataCost;
		dataCost.mask = maskShift;
		 
		IplImage* maskDataGradient = cvCloneImage(_maskData);
		cvSobel(_maskData, maskDataGradient, 1, 1);
		 
		dataCost.pointMapping = _pointMapping;
		dataCost.shiftSize = shiftSize;
		dataCost.saliency = saliency;
		dataCost.inputSize = cvSize(input->width, input->height);
		dataCost.maskNeighbor = _maskNeighbor;
		dataCost.inputGradient = inputGradient;
		dataCost.input = input;
		_gcGeneral->setDataCost(&dataFunctionFH2, &dataCost);

		// setup smooth cost
		ForSmoothFH smoothCost;
		smoothCost.input = input;
		smoothCost.inputGradient = inputGradient;
		smoothCost.inputSize = cvSize(input->width, input->height);
		smoothCost.pointMapping = _pointMapping;
		smoothCost.shiftSize = shiftSize;
		_gcGeneral->setSmoothCost(&smoothFunctionFH, &smoothCost);

		printf("\nBefore optimization energy is %d \n", _gcGeneral->compute_energy());
		//gc->swap(20);
		_gcGeneral->expansion(2);// run expansion for 2 iterations. For swap use gc->swap(num_iterations);
		printf("\nAfter optimization energy is %d \n", _gcGeneral->compute_energy()); 
	}
	catch (GCException e){
		e.Report();
	}
}
Beispiel #13
0
void COpenCVMFCView::OnSobel()
{
	// TODO: Add your command handler code here

	IplImage* pImage;
	IplImage* pImgSobel = NULL;
	IplImage* pImgPlanes[3] = {0,0,0};
	int i;

	pImage = workImg;

	pImgSobel = cvCreateImage(cvGetSize(pImage),
		IPL_DEPTH_16S,1);   //  Create Working Image

	if (workImg->nChannels == 1) {            //  Handle Single Channel
		cvSobel(pImage,pImgSobel,1,1,3);
		cvConvertScaleAbs(pImgSobel,pImage, 1, 0 );
	}
	else {                                  //  Handle Triad Ones
		for (i = 0; i < 3; i++) {
			pImgPlanes[i] = cvCreateImage(cvGetSize(pImage),
				IPL_DEPTH_8U,1);    //  Create Sub Image
		}

		cvCvtPixToPlane(pImage,pImgPlanes[0],
			pImgPlanes[1],pImgPlanes[2],0);  //  Get Sub

		for (i = 0; i < 3; i++) {                 //  Handle Sub Independently
			cvSobel(pImgPlanes[i],pImgSobel,1,1,3);
			cvConvertScaleAbs(pImgSobel,pImgPlanes[i], 1, 0 );
		}

		cvCvtPlaneToPix(pImgPlanes[0],pImgPlanes[1],
			pImgPlanes[2],0,pImage);    //  Form Color Image From Sub Images

		for (i = 0; i < 3; i++) {
			cvReleaseImage(&pImgPlanes[i]);  //  Release Sub Image
		}
	}

	cvReleaseImage(&pImgSobel);             //  Release Working Image

	Invalidate();
}
void detectBox(IplImage *InputImage,CvRect iRect)
{
	/*int X=iRect.x+iRect.width/2;
	int Y=iRect.y+iRect.height/2;
	unsigned char* pImageData = (unsigned char*)GrayImage->imageData;//imageData是指向图像数据区域首地址的指针,类型为char*!!!*/
	
	cvSetImageROI(InputImage,iRect);
	IplImage *ROI=cvCreateImage(cvGetSize(InputImage),InputImage->depth,InputImage->nChannels);
	cvCopy(InputImage,ROI,NULL);
	IplImage *GrayImage=cvCreateImage(cvGetSize(InputImage),InputImage->depth,1);
	
	cvCvtColor(InputImage,GrayImage,CV_RGB2GRAY);
	IplImage *X_gradient=cvCreateImage(cvGetSize(InputImage),InputImage->depth,1);
	IplImage *Y_gradient=cvCreateImage(cvGetSize(InputImage),InputImage->depth,1);
	IplImage *Ang=cvCreateImage(cvGetSize(InputImage),InputImage->depth,1);
	IplImage *Magnitude=cvCreateImage(cvGetSize(InputImage),InputImage->depth,1);
	//IplImage *GrayImage=cvCreateImage(cvGetSize(InputImage),InputImage->depth,1);
 

//	if(Color==GREEN_PIXEL_LABEL)
	//{
		//在x-2r,y-6r到x+2r到y+2r区域之间操作
		/*int Xstart=X-iRect.width;
		int Ystart=Y-3*iRect.height;
		int Xend=X+iRect.width;
		int Yend=Y+iRect.height;*/
		
		cvSobel(GrayImage,X_gradient,1,0,3);
		cvSobel(GrayImage,Y_gradient,0,1,3);
		//cvCartToPolar(X_gradient,Y_gradient,Magnitude,Ang);

		cvShowImage("X_gradient",X_gradient);
		cvShowImage("Y_gradient",Y_gradient);
//	}

	/*if(Color==RED_PIXEL_LABEL)
	{
		//在x-2r,y-2r到x+2r到y+6r区域之间操作
	}*/
	cvReleaseImage(&X_gradient);
	cvReleaseImage(&Y_gradient);
}
static GstFlowReturn
gst_cv_sobel_transform (GstOpencvVideoFilter * base, GstBuffer * buf,
    IplImage * img, GstBuffer * outbuf, IplImage * outimg)
{
  GstCvSobel *filter = GST_CV_SOBEL (base);

  cvSobel (img, outimg, filter->x_order, filter->y_order,
      filter->aperture_size);

  return GST_FLOW_OK;
}
typename image<T, D>::create_new sobel(const image<T, D>& a,
	int y_order, int x_order, int aperture_size)
{
	IplImage* src = a.ipl();
	IplImage* dst = cvCreateImage(cvGetSize(src),
		image_details::ipl_depth<T>(), (int)a.channels());
	cvSobel(src, dst, x_order, y_order, aperture_size);
	typename image<T, D>::create_new r(dst);
	cvReleaseImage(&src);
	cvReleaseImage(&dst);
	return r;
}
/*
*The sobel filter is an approximation to a derivative, it can apply first or second order in both coordinates in an image
*First, the sobel operator applies a gaussian filter, in order to smooth the image
*Then it calculates the derivative, and umbralizes the image
*@param input, the input image, could be grayscale or RGB
*@param output, the output image must have at least 16 bit pixel representation, to avoid overflow
*@param xOrder, the derivative order for the X axis
*@param yOrder, the derivative order for the Y axis
*@param apertureSize, size of the filter window, if the size is 3, the scharr filter is used, less sensitive to noise
*/
ImageImPro* OpenImProLib_OpenCvImpl::filterSobel(ImageImPro* ptrInput, int xOrder, int yOrder, int apertureSize){  
    IplImage* ptrCvInput = ptrInput->getOpenCvImage();
    //buffer for sobel result needing more bits per pixel for the result, then, rescaling is necesary to get it back to 8 bits per pixel
    IplImage* ptrCvTemp = cvCreateImage(cvGetSize(ptrCvInput),IPL_DEPTH_32F,1);
    IplImage* ptrCvOutput = cvCreateImage(cvGetSize(ptrCvInput), IPL_DEPTH_8U, 1);
    if(ptrInput->getChannels() != 1){
        IplImage* ptrCvInputGray = cvCreateImage(cvSize(ptrCvInput->width,ptrCvInput->height),IPL_DEPTH_8U,1);
        cvCvtColor(ptrCvInput,ptrCvInputGray, CV_RGB2GRAY);
        cvSobel(ptrCvInputGray,ptrCvTemp, xOrder, yOrder, apertureSize);
        cvReleaseImage(&ptrCvInputGray);
    }
    else{
        cvSobel(ptrCvInput,ptrCvTemp, xOrder, yOrder, apertureSize);
    }
    cvConvertScaleAbs(ptrCvTemp, ptrCvOutput, 1, 0);
    ImageImPro* ptrOutput = new ImageImPro_OpenCvImpl(ptrCvOutput);
    cvReleaseImage(&ptrCvOutput);
    cvReleaseImage(&ptrCvInput);
    cvReleaseImage(&ptrCvTemp);
    return ptrOutput;
}
Beispiel #18
0
void Filters::Sobel(VRFrame* frame, int direction)
{
    IplImage* img_dst;

    Log::writeLog("%s :: param: frame[%x] direction[%d]", __FUNCTION__, frame, direction);

    img_dst = VRFrame::imgAlloc(cvGetSize(frame->data),frame->data->depth,frame->data->nChannels);

    switch(direction)
    {

    case 0: //Vertical

        Log::writeLog("%s :: cvSobel : frame_src[%x] frame_dst[%d] dx[%d] dy[%d] apertureSize[%d]",
            __FUNCTION__, frame->data, img_dst, 1, 0, 3);

        cvSobel(frame->data,img_dst,1,0,3);
        break;
    case 1: //Horizontal

        Log::writeLog("%s :: cvSobel : frame_src[%x] frame_dst[%d] dx[%d] dy[%d] apertureSize[%d]",
            __FUNCTION__, frame->data, img_dst, 0, 1, 3);

        cvSobel(frame->data,img_dst,0,1,3);
        break;
    case 2: //Both

        Log::writeLog("%s :: cvSobel : frame_src[%x] frame_dst[%d] dx[%d] dy[%d] apertureSize[%d]",
            __FUNCTION__, frame->data, img_dst, 1, 1, 3);

        cvSobel(frame->data,img_dst,1,1,3);
        break;

    }	

    frame->setImage(img_dst);

}
Beispiel #19
0
void compute_vertical_edge_image(IplImage* input_image, IplImage* output_image)
{
	// TO-DO:  Compute the partial first derivative edge image in order to locate the vertical edges in the passed image,
	//   and then determine the non-maxima suppressed version of these edges (along each row as the rows can be treated
	//   independently as we are only considering vertical edges). Output the non-maxima suppressed edge image. 
	// Note:   You may need to smooth the image first.

	IplImage * tmp = cvCreateImage( 
		cvGetSize(input_image)
		, IPL_DEPTH_8U
		, 1);
	IplImage * first_derivative_img = cvCreateImage(
		cvGetSize(input_image)
		, IPL_DEPTH_16S
		, 1
		);
	IplImage * non_maxima_suppressed_img = cvCloneImage(tmp);
	cvZero(non_maxima_suppressed_img);
	cvCvtColor(input_image, tmp, CV_BGR2GRAY);
	cvSmooth(tmp, tmp,CV_GAUSSIAN, 3, 3);
	cvSobel(tmp, first_derivative_img, 1, 0, 3);
	cvConvertScaleAbs(first_derivative_img, tmp);
	cvThreshold(tmp, tmp, 220, 0, CV_THRESH_TOZERO);

	// non-maximum suppression
	int width_step = tmp->widthStep;
	int pixel_step = tmp->widthStep/tmp->width;
	
	for(int row = 0; row<tmp->height;++row)
	{
		for(int col = 1; col < tmp->width-1;++col)
		{
			unsigned char* curr_point = GETPIXELPTRMACRO(tmp, col, row, width_step, pixel_step);
			unsigned char* pre_point = GETPIXELPTRMACRO(tmp, col-1, row, width_step, pixel_step);
			unsigned char* post_point = GETPIXELPTRMACRO(tmp, col+1, row, width_step, pixel_step);
			unsigned char* non_maxima_suppressed_img_point = GETPIXELPTRMACRO(non_maxima_suppressed_img, col, row, width_step, pixel_step);
			if( (*curr_point) <= (*pre_point)
				|| (*curr_point) < (*post_point) )
			{
				(*non_maxima_suppressed_img_point) = 0;
			}
			else
			{
				(*non_maxima_suppressed_img_point) = *(curr_point);
			}
		}
	}
	cvThreshold(non_maxima_suppressed_img, non_maxima_suppressed_img, 200, 255, CV_THRESH_BINARY);
	cvCvtColor(non_maxima_suppressed_img, output_image, CV_GRAY2BGR);
}
Beispiel #20
0
void cvProcessFrame(CamFrame frame, CvResult info) {    

    if(!is_ready) { return; } // Module readiness quick fail       

    cvReadFrameParams(frame, info);     
    //cvRotateFrame(frame, -attGetYawBAMS());
    
    if(high_pass_on) {
        cvSobel(frame, info);
        //cvBinary(frame, info);
    }
        
    
}
IplImage* Zooming::SobelEnergy::GetEnergyImage(IplImage* input)
{ 
	IplImage* dstX = cvCloneImage(input);	
	IplImage* dstY = cvCloneImage(input);
	cvSobel(input, dstY, 0, _yOrder);
	cvSobel(input, dstX, 1, _xOrder);

	int width = input->width;
	int height = input->height;

	// average 3 channels to 1
	IplImage* energy = cvCreateImage(cvSize(width, height), IPL_DEPTH_16U, 1);
	//
	//char* dstData = dstX->imageData;
	//short* energyData = (short*)energy->imageData;

	for(int i = 0; i < height; i++)
	{
		for(int j = 0; j < width; j++)
		{
			CvScalar inputPixelX = cvGet2D(dstX, i, j); 
			CvScalar inputPixelY = cvGet2D(dstY, i, j);
 
			CvScalar outputX = cvScalar( abs(inputPixelX.val[0] + inputPixelX.val[1] + inputPixelX.val[2]) / 3);
			CvScalar outputY = cvScalar( abs(inputPixelY.val[0] + inputPixelY.val[1] + inputPixelY.val[2]) / 3);
			
			CvScalar output;
			output.val[0] = outputX.val[0] + outputY.val[0];

			cvSet2D(energy, i, j, output);
		}
		//dstData += dst->widthStep;
		//energyData += energy->width;
	}
	return energy;
}
void ShiftMapHierarchy::ComputeShiftMapGuess(IplImage* input, IplImage* saliency, CvMat* initialGuess, CvSize output, CvSize shiftSize)
{	
		try{
		_inputSize.width = input->width;
		_inputSize.height = input->height;
		_outputSize = output;
		_shiftSize = shiftSize;
		_input = input; 
		_initialGuess = initialGuess;
			
		_gc = new GCoptimizationGridGraph(_outputSize.width, _outputSize.height, _shiftSize.width * _shiftSize.height);

		// set up the needed data to pass to function for the data costs
		ForDataFunctionH dataFn;
		dataFn.inputSize = _inputSize;
		dataFn.outputSize = _outputSize;
		dataFn.shiftSize = _shiftSize;
		dataFn.saliency = saliency;
		dataFn.initialGuess = initialGuess;

		_gc->setDataCost(&dataFunctionShiftmapH,&dataFn);
		
		// smoothness comes from function pointer
		ForSmoothFunctionH smoothFn;
		smoothFn.inputSize = _inputSize;
		smoothFn.outputSize = _outputSize;
		smoothFn.shiftSize = _shiftSize;
		smoothFn.image = input;
		smoothFn.gradient = cvCloneImage(input);
		smoothFn.initialGuess = initialGuess;
		cvSobel(input, smoothFn.gradient, 1, 1);	 
		_gc->setSmoothCost(&smoothFunctionShiftmapH, &smoothFn);
		
		printf("\nBefore optimization energy is %d \n", _gc->compute_energy());
		//gc->swap(20);
		_gc->expansion(2);// run expansion for 2 iterations. For swap use gc->swap(num_iterations);
		printf("\nAfter optimization energy is %d \n", _gc->compute_energy()); 	
 
	}
	catch (GCException e){
		e.Report();
	}

}
Beispiel #23
0
static GstFlowReturn
gst_cv_sobel_transform (GstOpencvVideoFilter * base, GstBuffer * buf,
    IplImage * img, GstBuffer * outbuf, IplImage * outimg)
{
  GstCvSobel *filter = GST_CV_SOBEL (base);

  cvCvtColor (img, filter->cvGray, CV_RGB2GRAY);
  cvSobel (filter->cvGray, filter->cvSobel, filter->x_order, filter->y_order,
      filter->aperture_size);

  cvZero (outimg);
  if (filter->mask) {
    cvCopy (img, outimg, filter->cvSobel);
  } else {
    cvCvtColor (filter->cvSobel, outimg, CV_GRAY2RGB);
  }

  return GST_FLOW_OK;
}
static GstFlowReturn
gst_cv_sobel_transform (GstOpencvVideoFilter * base, GstBuffer * buf,
    IplImage * img, GstBuffer * outbuf, IplImage * outimg)
{
  GstCvSobel *filter = GST_CV_SOBEL (base);
  GstMapInfo out_info;

  cvCvtColor (img, filter->cvGray, CV_RGB2GRAY);
  cvSobel (filter->cvGray, filter->cvSobel, filter->x_order, filter->y_order,
      filter->aperture_size);

  cvZero (filter->cvCSobel);
  if (filter->mask) {
    cvCopy (img, filter->cvCSobel, filter->cvSobel);
  } else {
    cvCvtColor (filter->cvSobel, filter->cvCSobel, CV_GRAY2RGB);
  }

  gst_buffer_map (outbuf, &out_info, GST_MAP_WRITE);
  memcpy (out_info.data, filter->cvCSobel->imageData,
      gst_buffer_get_size (outbuf));

  return GST_FLOW_OK;
}
Beispiel #25
0
CV_IMPL void
cvCalcMotionGradient( const CvArr* mhiimg, CvArr* maskimg,
                      CvArr* orientation,
                      double delta1, double delta2,
                      int aperture_size )
{
    cv::Ptr<CvMat> dX_min, dY_max;

    CvMat  mhistub, *mhi = cvGetMat(mhiimg, &mhistub);
    CvMat  maskstub, *mask = cvGetMat(maskimg, &maskstub);
    CvMat  orientstub, *orient = cvGetMat(orientation, &orientstub);
    CvMat  dX_min_row, dY_max_row, orient_row, mask_row;
    CvSize size;
    int x, y;

    float  gradient_epsilon = 1e-4f * aperture_size * aperture_size;
    float  min_delta, max_delta;

    if( !CV_IS_MASK_ARR( mask ))
        CV_Error( CV_StsBadMask, "" );

    if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 )
        CV_Error( CV_StsOutOfRange, "aperture_size must be 3, 5 or 7" );

    if( delta1 <= 0 || delta2 <= 0 )
        CV_Error( CV_StsOutOfRange, "both delta's must be positive" );

    if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 )
        CV_Error( CV_StsUnsupportedFormat,
        "MHI and orientation must be single-channel floating-point images" );

    if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi ))
        CV_Error( CV_StsUnmatchedSizes, "" );

    if( orient->data.ptr == mhi->data.ptr )
        CV_Error( CV_StsInplaceNotSupported, "orientation image must be different from MHI" );

    if( delta1 > delta2 )
    {
        double t;
        CV_SWAP( delta1, delta2, t );
    }

    size = cvGetMatSize( mhi );
    min_delta = (float)delta1;
    max_delta = (float)delta2;
    dX_min = cvCreateMat( mhi->rows, mhi->cols, CV_32F );
    dY_max = cvCreateMat( mhi->rows, mhi->cols, CV_32F );

    // calc Dx and Dy
    cvSobel( mhi, dX_min, 1, 0, aperture_size );
    cvSobel( mhi, dY_max, 0, 1, aperture_size );
    cvGetRow( dX_min, &dX_min_row, 0 );
    cvGetRow( dY_max, &dY_max_row, 0 );
    cvGetRow( orient, &orient_row, 0 );
    cvGetRow( mask, &mask_row, 0 );

    // calc gradient
    for( y = 0; y < size.height; y++ )
    {
        dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step;
        dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step;
        orient_row.data.ptr = orient->data.ptr + y*orient->step;
        mask_row.data.ptr = mask->data.ptr + y*mask->step;
        cvCartToPolar( &dX_min_row, &dY_max_row, 0, &orient_row, 1 );

        // make orientation zero where the gradient is very small
        for( x = 0; x < size.width; x++ )
        {
            float dY = dY_max_row.data.fl[x];
            float dX = dX_min_row.data.fl[x];

            if( fabs(dX) < gradient_epsilon && fabs(dY) < gradient_epsilon )
            {
                mask_row.data.ptr[x] = 0;
                orient_row.data.i[x] = 0;
            }
            else
                mask_row.data.ptr[x] = 1;
        }
    }

    cvErode( mhi, dX_min, 0, (aperture_size-1)/2);
    cvDilate( mhi, dY_max, 0, (aperture_size-1)/2);

    // mask off pixels which have little motion difference in their neighborhood
    for( y = 0; y < size.height; y++ )
    {
        dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step;
        dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step;
        mask_row.data.ptr = mask->data.ptr + y*mask->step;
        orient_row.data.ptr = orient->data.ptr + y*orient->step;
        
        for( x = 0; x < size.width; x++ )
        {
            float d0 = dY_max_row.data.fl[x] - dX_min_row.data.fl[x];

            if( mask_row.data.ptr[x] == 0 || d0 < min_delta || max_delta < d0 )
            {
                mask_row.data.ptr[x] = 0;
                orient_row.data.i[x] = 0;
            }
        }
    }
}
Beispiel #26
0
int opencv::CreateGeoMatchModel(const Mat& templateArr, double maxContrast, double minContrast)
{
	if (CV_8UC1 != templateArr.type())
	{
		return 0;
	}
#if 0

	Mat src = templateArr.clone();
	CvSize Ssize;

	// Convert IplImage to Matrix for integer operations
	//CvMat srcstub, *src = (CvMat*)templateArr;
  


	// set width and height
	Ssize.width = src.cols;
	Ssize.height = src.rows;
	modelHeight = src.rows;		//Save Template height
	modelWidth = src.cols;			//Save Template width

	noOfCordinates = 0;											//initialize
	if (cordinates) delete cordinates;
	cordinates = new Point[modelWidth *modelHeight];		//Allocate memory for coorinates of selected points in template image

	if (edgeMagnitude) delete edgeMagnitude;
	edgeMagnitude = new double[modelWidth *modelHeight];		//Allocate memory for edge magnitude for selected points

	if (edgeDerivativeX) delete edgeDerivativeX;
	edgeDerivativeX = new double[modelWidth *modelHeight];			//Allocate memory for edge X derivative for selected points

	if (edgeDerivativeY) delete edgeDerivativeY;
	edgeDerivativeY = new double[modelWidth *modelHeight];			////Allocate memory for edge Y derivative for selected points


	// Calculate gradient of Template
	Mat gx(Ssize.height, Ssize.width, CV_16SC1);		//create Matrix to store X derivative
	Mat gy(Ssize.height, Ssize.width, CV_16SC1);		//create Matrix to store Y derivative
	cvSobel(src, gx, 1, 0, 3);		//gradient in X direction			
	cvSobel(src, gy, 0, 1, 3);	//gradient in Y direction

	Mat nmsEdges(Ssize.height, Ssize.width, CV_32F);		//create Matrix to store Final nmsEdges
	const short* _sdx;
	const short* _sdy;
	double fdx, fdy;
	double MagG, DirG;
	double MaxGradient = -99999.99;
	double direction;

	int *orients = new int[Ssize.height *Ssize.width];
	int count = 0, i, j; // count variable;

	double **magMat;
	CreateDoubleMatrix(magMat, Ssize);

	for (i = 1; i < Ssize.height - 1; i++)
	{
		for (j = 1; j < Ssize.width - 1; j++)
		{
			_sdx = (short*)(gx->data.ptr + gx->step*i);
			_sdy = (short*)(gy->data.ptr + gy->step*i);
			fdx = _sdx[j]; fdy = _sdy[j];        // read x, y derivatives

			MagG = sqrt((float)(fdx*fdx) + (float)(fdy*fdy)); //Magnitude = Sqrt(gx^2 +gy^2)
			direction = cvFastArctan((float)fdy, (float)fdx);	 //Direction = invtan (Gy / Gx)
			magMat[i][j] = MagG;

			if (MagG > MaxGradient)
				MaxGradient = MagG; // get maximum gradient value for normalizing.


			// get closest angle from 0, 45, 90, 135 set
			if ((direction>0 && direction < 22.5) || (direction >157.5 && direction < 202.5) || (direction>337.5 && direction < 360))
				direction = 0;
			else if ((direction > 22.5 && direction < 67.5) || (direction >202.5 && direction <247.5))
				direction = 45;
			else if ((direction >67.5 && direction < 112.5) || (direction>247.5 && direction<292.5))
				direction = 90;
			else if ((direction >112.5 && direction < 157.5) || (direction>292.5 && direction < 337.5))
				direction = 135;
			else
				direction = 0;

			orients[count] = (int)direction;
			count++;
		}
	}

	count = 0; // init count
	// non maximum suppression
	double leftPixel, rightPixel;

	for (i = 1; i < Ssize.height - 1; i++)
	{
		for (j = 1; j < Ssize.width - 1; j++)
		{
			switch (orients[count])
			{
			case 0:
				leftPixel = magMat[i][j - 1];
				rightPixel = magMat[i][j + 1];
				break;
			case 45:
				leftPixel = magMat[i - 1][j + 1];
				rightPixel = magMat[i + 1][j - 1];
				break;
			case 90:
				leftPixel = magMat[i - 1][j];
				rightPixel = magMat[i + 1][j];
				break;
			case 135:
				leftPixel = magMat[i - 1][j - 1];
				rightPixel = magMat[i + 1][j + 1];
				break;
			}
			// compare current pixels value with adjacent pixels
			if ((magMat[i][j] < leftPixel) || (magMat[i][j] < rightPixel))
				(nmsEdges->data.ptr + nmsEdges->step*i)[j] = 0;
			else
				(nmsEdges->data.ptr + nmsEdges->step*i)[j] = (uchar)(magMat[i][j] / MaxGradient * 255);

			count++;
		}
	}


	int RSum = 0, CSum = 0;
	int curX, curY;
	int flag = 1;

	//Hysterisis threshold
	for (i = 1; i < Ssize.height - 1; i++)
	{
		for (j = 1; j < Ssize.width; j++)
		{
			_sdx = (short*)(gx->data.ptr + gx->step*i);
			_sdy = (short*)(gy->data.ptr + gy->step*i);
			fdx = _sdx[j]; fdy = _sdy[j];

			MagG = sqrt(fdx*fdx + fdy*fdy); //Magnitude = Sqrt(gx^2 +gy^2)
			DirG = cvFastArctan((float)fdy, (float)fdx);	 //Direction = tan(y/x)

			////((uchar*)(imgGDir->imageData + imgGDir->widthStep*i))[j]= MagG;
			flag = 1;
			if (((double)((nmsEdges->data.ptr + nmsEdges->step*i))[j]) < maxContrast)
			{
				if (((double)((nmsEdges->data.ptr + nmsEdges->step*i))[j]) < minContrast)
				{

					(nmsEdges->data.ptr + nmsEdges->step*i)[j] = 0;
					flag = 0; // remove from edge
					////((uchar*)(imgGDir->imageData + imgGDir->widthStep*i))[j]=0;
				}
				else
				{   // if any of 8 neighboring pixel is not greater than max contraxt remove from edge
					if ((((double)((nmsEdges->data.ptr + nmsEdges->step*(i - 1)))[j - 1]) < maxContrast) &&
						(((double)((nmsEdges->data.ptr + nmsEdges->step*(i - 1)))[j]) < maxContrast) &&
						(((double)((nmsEdges->data.ptr + nmsEdges->step*(i - 1)))[j + 1]) < maxContrast) &&
						(((double)((nmsEdges->data.ptr + nmsEdges->step*i))[j - 1]) < maxContrast) &&
						(((double)((nmsEdges->data.ptr + nmsEdges->step*i))[j + 1]) < maxContrast) &&
						(((double)((nmsEdges->data.ptr + nmsEdges->step*(i + 1)))[j - 1]) < maxContrast) &&
						(((double)((nmsEdges->data.ptr + nmsEdges->step*(i + 1)))[j]) < maxContrast) &&
						(((double)((nmsEdges->data.ptr + nmsEdges->step*(i + 1)))[j + 1]) < maxContrast))
					{
						(nmsEdges->data.ptr + nmsEdges->step*i)[j] = 0;
						flag = 0;
						////((uchar*)(imgGDir->imageData + imgGDir->widthStep*i))[j]=0;
					}
				}

			}

			// save selected edge information
			curX = i;	curY = j;
			if (flag != 0)
			{
				if (fdx != 0 || fdy != 0)
				{
					RSum = RSum + curX;	CSum = CSum + curY; // Row sum and column sum for center of gravity

					cordinates[noOfCordinates].x = curX;
					cordinates[noOfCordinates].y = curY;
					edgeDerivativeX[noOfCordinates] = fdx;
					edgeDerivativeY[noOfCordinates] = fdy;

					//handle divide by zero
					if (MagG != 0)
						edgeMagnitude[noOfCordinates] = 1 / MagG;  // gradient magnitude 
					else
						edgeMagnitude[noOfCordinates] = 0;

					noOfCordinates++;
				}
			}
		}
	}

	centerOfGravity.x = RSum / noOfCordinates; // center of gravity
	centerOfGravity.y = CSum / noOfCordinates;	// center of gravity

	// change coordinates to reflect center of gravity
	for (int m = 0; m < noOfCordinates; m++)
	{
		int temp;

		temp = cordinates[m].x;
		cordinates[m].x = temp - centerOfGravity.x;
		temp = cordinates[m].y;
		cordinates[m].y = temp - centerOfGravity.y;
	}

	////cvSaveImage("Edges.bmp",imgGDir);

	// free alocated memories
	delete[] orients;
	////cvReleaseImage(&imgGDir);
	cvReleaseMat(&gx);
	cvReleaseMat(&gy);
	cvReleaseMat(&nmsEdges);

	ReleaseDoubleMatrix(magMat, Ssize.height);

	modelDefined = true;

	
#endif
	return 0;

}
Beispiel #27
0
int main( int argc, char** argv ) {
	if(argc != 2) {
		printf("Usage: ./image <image name>\n");
		return 0;
	}

	// Show original image
	IplImage* img = cvLoadImage( argv[1], 0 );
	cvNamedWindow( "Input", CV_WINDOW_AUTOSIZE );
	cvShowImage( "Input", img );
	printf("Input depth: %x\n", img->depth); // 8U

	// Run Sobel edge detection
	IplImage *res = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 );
#ifdef USE_OPENCV
	// OpenCV implementation
	// Convert images to 16SC1
	IplImage *out = cvCreateImage( cvGetSize(img), IPL_DEPTH_16S, 1 );
	cvSobel( img, out, 1, 0 );
	cvConvertScaleAbs(out, res); // need 8U again
	cvReleaseImage( &out );
#else
	// Own implementation of Sobel kernel
	int H = img->height, W = img->width, WS = img->widthStep;
	uint8_t *srcdata = (uint8_t *) img->imageData;
	uint8_t *dstdata = (uint8_t *) res->imageData;

#define ind(i,j) ((i)*WS+(j))

	// work on bulk of image
	for(int i = 1; i+1 < H; i++) {
		for(int j = 1; j+1 < W; j++) {
			// dx kernel
			// -1 0 1
			// -2 0 2
			// -1 0 1
			int16_t value = - srcdata[ind(i-1,j-1)]
							+ srcdata[ind(i-1,j+1)]
							- srcdata[ind(i,j-1)] * 2
							+ srcdata[ind(i,j+1)] * 2
							- srcdata[ind(i+1,j-1)]
							+ srcdata[ind(i+1,j+1)];
			if(value < 0)
				value = 0;
			if(value > 255)
				value = 255;
			dstdata[ind(i,j)] = value;
		}
	}

	// work on left and right edges (not accurate)
	for(int i = 0; i < H; i++) {
		dstdata[ind(i,0)] = 0;
		dstdata[ind(i,W-1)] = 0;
	}

	// work on top and bottom edges (not accurate)
	for(int j = 0; j < W; j++) {
		dstdata[ind(0,j)] = 0;
		dstdata[ind(H-1,j)] = 0;
	}

	// debugging
	for(int j = 0; j < 10; j++)
		printf("%d ", dstdata[ind(1, j)]);
	printf("\n");

#undef ind

#endif

	// Display
	cvNamedWindow( "Output", CV_WINDOW_AUTOSIZE );
	cvShowImage( "Output", res );

	// Cleanup
	cvWaitKey(0);
	cvReleaseImage( &img );
	cvReleaseImage( &res );
	cvDestroyWindow( "Input" );
	cvDestroyWindow( "Output" );

	return 0;
}
bool IrisFinderHough::Find(IplImage* image, CvRect eyeROI)
{
	if (!ParametersValid())
		return false;
	if (m_sizeData.SizeChanged(eyeROI))
		PrepareImage(eyeROI);
	// some helper imgs
	IplImage* imgSobelH = cvCreateImage(cvSize(eyeROI.width, eyeROI.height), IPL_DEPTH_16S, 1);
	IplImage* imgSobelV = cvCreateImage(cvSize(eyeROI.width, eyeROI.height), IPL_DEPTH_16S, 1);

	// copy roi to internal image
	ImgLib::CopyRect(image, m_eyeImg, eyeROI, cvPoint(0, 0));
	cvSobel(m_eyeImg, imgSobelH, 1, 0, 3);
	cvSobel(m_eyeImg, imgSobelV, 0, 1, 3);
	
	double angle;
	double dx, dy;
	double thetaRad;
	double xPrim, yPrim;
	double xsi;
	double max_e = 2.2;

	HoughAccumulator acc(m_accPrecision);

	acc.AddParam(0, m_eyeImg->width);	// x0
	acc.AddParam(0, m_eyeImg->height);	// x1
	acc.AddParam(m_thetaMin, m_thetaMax);		// theta
	acc.AddParam(m_aMin, m_aMax);				// a
	acc.AddParam(m_bMin, m_bMax);				// b
	acc.Init();

	DOUBLEVECT indices;
	indices.resize(5);


	cvSmooth(m_eyeImg, m_eyeImg);
	cvCanny(m_eyeImg, m_eyeImg, 250, 100);


	for(int y = 0; y < m_eyeImg->height; y++)
    {
        short* sh_row = (short*)(imgSobelH->imageData + y * imgSobelH->widthStep);
		short* sv_row = (short*)(imgSobelV->imageData + y * imgSobelV->widthStep);
		uchar* canny_row = (uchar *)(m_eyeImg->imageData + y * m_eyeImg->widthStep);
		double x0, y0;
		double a, b, theta=0;

        for (int x = 0; x < m_eyeImg->width; x++)
        {
			if (canny_row[x] == 0)
				continue;
			short dX = sh_row[x];
			short dY = sv_row[x];
			if ( (abs(dX) + abs(dY)) < m_minGradStrength)
			{
				cvLine(m_eyeImg, cvPoint(x,y),cvPoint(x,y),CV_RGB(0,0,0));
				continue;
			}
			
			for (a = m_aMin; a < m_aMax; a+= (1 / m_accPrecision))
				for (b = m_bMin; b < m_bMax; b+= (1 / m_accPrecision))
				{
					double e = a / b;
					if (e < 1)
						e = b / a;
					if (e > max_e)
						continue;
					for (theta = m_thetaMin; theta < m_thetaMax; theta += (1 / m_accPrecision))
					{
						angle = atan2((float)dY, (float)dX);
						thetaRad = 2 * CV_PI * theta / 360.0;
						angle -= (thetaRad + CV_PI / 2.0);
						xsi = tan(angle);
						//xsi = (float) dY / (float) dX;
						dx = -SignX(dX, dY) * a / sqrt(1 + (b * b) / (a * a * xsi * xsi));
						dy = -SignY(dX, dY) * b / sqrt(1 + (a * a * xsi * xsi) / (b * b));
						// rotate by theta
						xPrim = cos(thetaRad) * dx - sin(thetaRad) * dy;
						yPrim = sin(thetaRad) * dx + cos(thetaRad) * dy;
						dx = xPrim; dy = yPrim;
						x0 = x + dx;
						y0 = y + dy;
						indices[0] = x0;
						indices[1] = y0;
						indices[2] = theta;
						indices[3] = a;
						indices[4] = b;
						acc.Increment(indices);
					}
				}
        }
    }

	indices = acc.FindBest();


	if (indices.size() > 0)
	{
		cvEllipse(image,
			cvPoint(indices[0] + eyeROI.x, indices[1] + eyeROI.y),
			cvSize(indices[3], indices[4]),
			-indices[2],
	//		90,
			0,
			360,
			CV_RGB(255, 0, 0));
		m_irisCentre.x = indices[0] + eyeROI.x;
		m_irisCentre.y = indices[1] + eyeROI.y;
		return true;
	}
	return false;
}
//Ham tinh gradient
IplImage* HOGProcessor::doSobel(IplImage* in,int xorder, int yorder, int apertureSize)
{
	IplImage* derivative = cvCreateImage(cvGetSize(in), IPL_DEPTH_32F, 1);
	cvSobel(in, derivative, xorder, yorder, apertureSize);
	return derivative;
}
Beispiel #30
-1
ImageRAII canny( IplImage * image, std::pair< int, int > thresh, double sigma )
{
	const char * WINDOW_NAME = "Basic Canny Edge Detector";

	ImageRAII grayscale( cvCreateImage( cvGetSize( image ), image->depth, 1 ) );
	ImageRAII destination( cvCreateImage( cvGetSize( image ), image->depth, grayscale.image->nChannels ) );
	ImageRAII gaussian( cvCreateImage( cvGetSize( image ), image->depth, grayscale.image->nChannels ) );
	ImageRAII gradient_x( cvCreateImage( cvGetSize( image ), image->depth, grayscale.image->nChannels ) );
	ImageRAII gradient_y( cvCreateImage( cvGetSize( image ), image->depth, grayscale.image->nChannels ) );
	ImageRAII gradient( cvCreateImage( cvGetSize( image ), image->depth, grayscale.image->nChannels ) );
	ImageRAII orientation( cvCreateImage( cvGetSize( image ), image->depth, grayscale.image->nChannels ) );

	// convert image to grayscale
	cvCvtColor( image, grayscale.image, CV_BGR2GRAY );

	// gaussian smoothing
	cvSmooth( grayscale.image, gaussian.image, CV_GAUSSIAN, GAUSSIAN_X, GAUSSIAN_Y, sigma );
	// find edge strength
	cvSobel( gaussian.image, gradient_x.image, 1, 0, 3 );
	cvSobel( gaussian.image, gradient_y.image, 0, 1, 3 );
	// find edge orientation
	CvSize image_size = cvGetSize( gaussian.image );

	for( int i = 0; i < image_size.width; i++ )
	{
		for( int j = 0; j < image_size.height; j++ )
		{
			double x = cvGet2D( gradient_x.image, j, i ).val[0];
			double y = cvGet2D( gradient_y.image, j, i ).val[0];
			float angle;

			if( x == 0 )
			{
				if( y == 0 )
					angle = 0;
				else
					angle = 90;
			}
			else
				angle = cvFastArctan( y, x );

			CvScalar g;
			CvScalar a;
		   	g.val[0] = cvSqrt( pow( x, 2 ) + pow( y, 2 ) );
			a.val[0] = find_angle( angle );

			cvSet2D( destination.image, j, i, g );
			cvSet2D( orientation.image, j, i, a );
		}
	}

	ImageRAII suppressed_image = nonMaxSup( destination.image, orientation.image );
	ImageRAII hysteresis_image = hysteresis( suppressed_image.image, orientation.image, thresh );

	cvNamedWindow( WINDOW_NAME );
	cvShowImage( WINDOW_NAME, destination.image );
	cvMoveWindow( WINDOW_NAME, image_size.width, 0 );

	return hysteresis_image;
}