Exemplo n.º 1
0
void ImageProcessorCV::CalculateGradientImageHSV(CByteImage *pInputImage, CByteImage *pOutputImage)
{
	if (pInputImage->width != pOutputImage->width || pInputImage->height != pOutputImage->height ||
		pInputImage->type != CByteImage::eRGB24 || pOutputImage->type != CByteImage::eGrayScale)
		return;

	IplImage *pIplInputImage = IplImageAdaptor::Adapt(pInputImage);
	IplImage *pIplOutputImage = IplImageAdaptor::Adapt(pOutputImage);

	// Determine Gradient Image by Irina Wchter
	// instead of normal norm sqrt(x*x +y*y) use |x|+|y| because it is much faster
	IplImage *singleChannel0 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel1 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *singleChannel2 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
	IplImage *diff = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
	IplImage *abs = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_8U, 1);
		
	cvCvtPixToPlane(pIplInputImage, singleChannel0, singleChannel1, singleChannel2, NULL);
	
	// calculate gradients on S-channel
	//cvSmooth(singleChannel1, singleChannel1, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel1, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel1, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, pIplOutputImage);
	cvAdd(abs, pIplOutputImage, pIplOutputImage, 0);
	
	// threshold S-channel for creating a maskfor gradients of H-channel
	cvThreshold(singleChannel1, singleChannel1, 60, 255, CV_THRESH_BINARY);
	cvDilate(singleChannel1, singleChannel1);
	
	// calculate gradients on H-channel
	//cvSmooth(singleChannel0, singleChannel0, CV_GAUSSIAN, 3, 3);
	cvSobel(singleChannel0, diff, 1, 0, 3);
	cvConvertScaleAbs(diff, abs);
	cvSobel(singleChannel0, diff, 0, 1, 3);
	cvConvertScaleAbs(diff, singleChannel0);
	cvAdd(abs, singleChannel0, singleChannel0, 0);
	
	// filter gradients of H-channel with mask
	cvAnd(singleChannel0, singleChannel1, singleChannel0);
	
	// combine to gradient images
	cvMax(pIplOutputImage, singleChannel0, pIplOutputImage);
	
	// free memory
	cvReleaseImage(&singleChannel0);
	cvReleaseImage(&singleChannel1);
	cvReleaseImage(&singleChannel2);
	cvReleaseImage(&diff);
	cvReleaseImage(&abs);
	
	cvReleaseImageHeader(&pIplInputImage);
	cvReleaseImageHeader(&pIplOutputImage);
}
Exemplo n.º 2
0
void onTrackbar(int)
{
	if(!(aSize & 1)) aSize++;

	if(isColor)
	{
		dst = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
		//create BGR layer and splitt
		IplImage *B = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
		IplImage *G = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
		IplImage *R = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
		IplImage *B16s = cvCreateImage(cvGetSize(img), IPL_DEPTH_16S, 1);
		IplImage *G16s = cvCreateImage(cvGetSize(img), IPL_DEPTH_16S, 1);
		IplImage *R16s = cvCreateImage(cvGetSize(img), IPL_DEPTH_16S, 1);
		cvSplit(img, B, G, R, 0);

		//sobel
		cvSobel(B, B16s, isY, 1 - isY, aSize);
		cvSobel(G, G16s, isY, 1 - isY, aSize);
		cvSobel(R, R16s, isY, 1 - isY, aSize);
		cvConvertScaleAbs(B16s, B, 1, 0);
		cvConvertScaleAbs(G16s, G, 1, 0);
		cvConvertScaleAbs(R16s, R, 1, 0);

		cvMerge(B, G, R, 0, dst);

		cvReleaseImage(&B);
		cvReleaseImage(&G);
		cvReleaseImage(&R);
		cvReleaseImage(&B16s);
		cvReleaseImage(&G16s);
		cvReleaseImage(&R16s);
	}//end if
	else
	{
		dst = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
		IplImage *gray = cvCreateImage(cvGetSize(img), img->depth, 1);
		IplImage *img16s = cvCreateImage(cvGetSize(img), IPL_DEPTH_16S, 1);
		cvCvtColor(img, gray, CV_BGR2GRAY);

		//sobel
		cvSobel(gray, img16s, isY, 1 - isY, aSize);
		cvConvertScaleAbs(img16s, dst, 1, 0);

		cvReleaseImage(&gray);
		cvReleaseImage(&img16s);
	}//end else

	cvShowImage(windowName, dst);
}
Exemplo n.º 3
0
void myShowSignedImage(char *window, IplImage *img){
    IplImage *temp=NULL;
    temp = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1 );
    cvConvertScaleAbs(img, temp, 1, 0);
    cvShowImage(window, temp);
    cvReleaseImage(&temp);
}
Exemplo n.º 4
0
int main (int argc, char **argv){
  IplImage *src_img, *dst_img, *tmp_img;

  // 画像の読み込み(グレースケールで読み込み)
  if (argc != 2 ||
	  (src_img = cvLoadImage (argv[1], CV_LOAD_IMAGE_GRAYSCALE)) == 0){
	fprintf(stderr,"Usage: $ %s img_file\n",argv[0]);
    return -1;
  }
  
  tmp_img = cvCreateImage(cvGetSize(src_img), IPL_DEPTH_16S, 1);
  dst_img = cvCreateImage(cvGetSize(src_img), IPL_DEPTH_8U,  1);

  // Sobelフィルタによる微分画像の作成
  cvSobel(src_img, tmp_img, 1, 0, 3);
  cvConvertScaleAbs(tmp_img, dst_img,1,0);

  // 画像の表示
  cvNamedWindow(SRC, CV_WINDOW_AUTOSIZE);
  cvShowImage(SRC, src_img);
  cvNamedWindow(DST, CV_WINDOW_AUTOSIZE);
  cvShowImage(DST, dst_img);

  // ユーザ入力待ち
  cvWaitKey(0);

  // 後始末
  cvDestroyWindow(SRC);
  cvDestroyWindow(DST);
  cvReleaseImage(&src_img);
  cvReleaseImage(&dst_img);
  cvReleaseImage(&tmp_img);

  return 0;
}
int
main (int argc, char **argv)
{
    CvCapture *capture = 0;
    IplImage *frame = 0;
    double w = 320, h = 240;
    int c;

    // (1)コマンド引数によって指定された番号のカメラに対するキャプチャ構造体を作成する
    if (argc == 1 || (argc == 2 && strlen (argv[1]) == 1 && isdigit (argv[1][0])))
	capture = cvCreateCameraCapture (argc == 2 ? argv[1][0] - '0' : 0);

    /* この設定は,利用するカメラに依存する */
    // (2)キャプチャサイズを設定する.
    //cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_WIDTH, w);
    //cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_HEIGHT, h);

    cvNamedWindow ("Capture", CV_WINDOW_AUTOSIZE);

    // (3)カメラから画像をキャプチャする
    {
	IplImage *src_img_gray, *tmp_img_gray, *dst_img_gray;
	
	// (a) 画像処用の画像データを確保
	frame = cvQueryFrame (capture);
	frame = cvQueryFrame (capture);
	frame = cvQueryFrame (capture);
	tmp_img_gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_16S, 1);
	src_img_gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
	dst_img_gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);

	while (1) {
	    frame = cvQueryFrame (capture);

	    // (b)グレイ画像を作成
	    cvCvtColor(frame, src_img_gray, CV_BGR2GRAY);
	    
	    // (c)Edge処理
	    if ( 0 ) {
		cvCanny (src_img_gray, dst_img_gray, 50.0, 200.0, 3);
	    }else{
		cvLaplace (src_img_gray, tmp_img_gray, 5);
		cvConvertScaleAbs(tmp_img_gray, dst_img_gray, 1, 0);
	    }

	    // (d)結果を表示する
	    cvShowImage ("Capture", dst_img_gray);

	    c = cvWaitKey (10);
	    if (c == '\x1b')
		break;
	}
    }
	
    cvReleaseCapture (&capture);
    cvDestroyWindow ("Capture");

    return 0;
}
Exemplo n.º 6
0
// À­ÆÕÀ­Ë¹±ä»»
int main( int argc, char** argv )
{
    IplImage* laplace = 0;
    IplImage* colorlaplace = 0;
    IplImage* planes[3] = { 0, 0, 0 };
    CvCapture* capture = 0;
    
    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromAVI( argv[1] ); 

    if( !capture )
    {
        fprintf(stderr,"Could not initialize capturing...\n");
        return -1;
    }
        
    cvNamedWindow( "Laplacian", 0 );

    for(;;)
    {
        IplImage* frame = 0;
        int i;

        frame = cvQueryFrame( capture );
        if( !frame )
            break;

        if( !laplace )
        {
            for( i = 0; i < 3; i++ )
                planes[i] = cvCreateImage( cvSize(frame->width,frame->height), 8, 1 );
            laplace = cvCreateImage( cvSize(frame->width,frame->height), IPL_DEPTH_16S, 1 );
            colorlaplace = cvCreateImage( cvSize(frame->width,frame->height), 8, 3 );
        }

        cvCvtPixToPlane( frame, planes[0], planes[1], planes[2], 0 );
        for( i = 0; i < 3; i++ )
        {
            cvLaplace( planes[i], laplace, 3 );
            cvConvertScaleAbs( laplace, planes[i], 1, 0 );
        }
        cvCvtPlaneToPix( planes[0], planes[1], planes[2], 0, colorlaplace );
        colorlaplace->origin = frame->origin;

        cvShowImage("Laplacian", colorlaplace );

        if( cvWaitKey(10) >= 0 )
            break;
    }

    cvReleaseCapture( &capture );
    cvDestroyWindow("Laplacian");

    return 0;
}
Exemplo n.º 7
0
void COpenCVMFCView::OnLaplace()
{
	// TODO: Add your command handler code here

	IplImage* pImage;
	IplImage* pImgLaplace = NULL;
	IplImage* pImgPlanes[3] = {0,0,0};
	int i;

	pImage = workImg;

	pImgLaplace = cvCreateImage(cvGetSize(pImage),
		IPL_DEPTH_16S,1);

	if (workImg->nChannels == 1) {
		cvLaplace(pImage,pImgLaplace,3);
		cvConvertScaleAbs(pImgLaplace,pImage, 1, 0 );
	}
	else {
		for (i = 0; i < 3; i++) {
			pImgPlanes[i] = cvCreateImage(cvGetSize(pImage),
				IPL_DEPTH_8U,1);
		}

		cvCvtPixToPlane(pImage,pImgPlanes[0],
			pImgPlanes[1],pImgPlanes[2],0);

		for (i = 0; i < 3; i++) {
			cvLaplace(pImgPlanes[i],pImgLaplace,3);
			cvConvertScaleAbs(pImgLaplace,pImgPlanes[i], 1, 0 );
		}

		cvCvtPlaneToPix(pImgPlanes[0],pImgPlanes[1],
			pImgPlanes[2],0,pImage);

		for (i = 0; i < 3; i++) {
			cvReleaseImage(&pImgPlanes[i]);
		}
	}

	cvReleaseImage(&pImgLaplace);

	Invalidate();
}
Exemplo n.º 8
0
void COpenCVMFCView::OnSobel()
{
	// TODO: Add your command handler code here

	IplImage* pImage;
	IplImage* pImgSobel = NULL;
	IplImage* pImgPlanes[3] = {0,0,0};
	int i;

	pImage = workImg;

	pImgSobel = cvCreateImage(cvGetSize(pImage),
		IPL_DEPTH_16S,1);   //  Create Working Image

	if (workImg->nChannels == 1) {            //  Handle Single Channel
		cvSobel(pImage,pImgSobel,1,1,3);
		cvConvertScaleAbs(pImgSobel,pImage, 1, 0 );
	}
	else {                                  //  Handle Triad Ones
		for (i = 0; i < 3; i++) {
			pImgPlanes[i] = cvCreateImage(cvGetSize(pImage),
				IPL_DEPTH_8U,1);    //  Create Sub Image
		}

		cvCvtPixToPlane(pImage,pImgPlanes[0],
			pImgPlanes[1],pImgPlanes[2],0);  //  Get Sub

		for (i = 0; i < 3; i++) {                 //  Handle Sub Independently
			cvSobel(pImgPlanes[i],pImgSobel,1,1,3);
			cvConvertScaleAbs(pImgSobel,pImgPlanes[i], 1, 0 );
		}

		cvCvtPlaneToPix(pImgPlanes[0],pImgPlanes[1],
			pImgPlanes[2],0,pImage);    //  Form Color Image From Sub Images

		for (i = 0; i < 3; i++) {
			cvReleaseImage(&pImgPlanes[i]);  //  Release Sub Image
		}
	}

	cvReleaseImage(&pImgSobel);             //  Release Working Image

	Invalidate();
}
Exemplo n.º 9
0
void detectPupils(IplImage *img)
{    

    //cvShowImage("Eyes before", img);
    //setting up the grayscale image
    IplImage* gray = cvCreateImage( cvGetSize(img), 8, 1 );
    IplImage* copy = cvCreateImage( cvGetSize(img), 8, 3 );
    //IplImage* src = cvCreateImage(cvGetSize(img), 8, 1);

    //set up circle storage
    CvMemStorage* storage = cvCreateMemStorage(0);

    //prepair the grayscale image
    cvCvtColor( img, gray, CV_RGB2GRAY );

    //cvCopy(img, copy);


    /*try close elipse start*/

    ///cvThreshold(gray, gray, 30, 255, CV_THRESH_BINARY_INV);
    cvShowImage("Eyes before", gray);

    //IplConvKernel elipse = *cvCreateStructuringElementEx(3, 3, 0, 0, CV_SHAPE_ELLIPSE, 0);

    //cvMorphologyEx(gray, gray, copy, &elipse, CV_MOP_CLOSE, 2);

    /*try close elipse end  */

    //cvThreshold(gray, gray, 42, 255, CV_THRESH_BINARY_INV);

    //cvAdaptiveThreshold(gray, gray, 255);


cvConvertScaleAbs(gray, gray, 4, -3);
cvSmooth( gray, gray, CV_GAUSSIAN, 9, 9); // smooth it, otherwise a lot of false circles may be detected
//cvCanny(gray, gray, 0, canny2, canny3);
//cvThreshold(gray, gray, 200, 255, CV_THRESH_BINARY);
    //apply Hough
    CvSeq* circles = cvHoughCircles( gray, storage, CV_HOUGH_GRADIENT, dp, 10, param1, param2, 0, gray->height/3);


    //loop through circles
    for(int i = 0; i < circles->total; i++)
    {
        float* circle = (float*)cvGetSeqElem( circles, i );
        cvCircle( img, cvPoint(cvRound(circle[0]),cvRound(circle[1])), cvRound(circle[2]), CV_RGB(0,255,0), 1, 8, 0 );
    }



    cvShowImage("Eyes", gray);

    //std::cerr << "Circles: " << circles->total << std::endl;
}
Exemplo n.º 10
0
bool motion_free_frame(IplImage* current_frame, IplImage* previous_frame)
{
	// TO-DO:  Determine the percentage of the frames which have changed (by more than VARIATION_ALLOWED_IN_PIXEL_VALUES)
	//        and return whether that percentage is less than ALLOWED_MOTION_FOR_MOTION_FREE_IMAGE.
	//return true;  // Just to allow the system to compile while the code is missing.

	if(previous_frame == NULL)
		return true;
	IplImage *current_frame_grayscale = cvCreateImage(
		cvGetSize(current_frame)
		, IPL_DEPTH_8U
		, 1);
	IplImage *previous_frame_grayscale = cvCreateImage(
		cvGetSize(previous_frame)
		, IPL_DEPTH_8U
		, 1);
	cvCvtColor(current_frame, current_frame_grayscale, CV_BGR2GRAY);
	cvCvtColor(previous_frame, previous_frame_grayscale, CV_BGR2GRAY);
	IplImage *image = cvCreateImage(
		cvGetSize(current_frame_grayscale)
		,IPL_DEPTH_8U
		, 1);
	IplImage *sub_image = cvCreateImage(
		cvGetSize(current_frame_grayscale)
		,IPL_DEPTH_16S
		, 1);
	cvSub(current_frame_grayscale, previous_frame_grayscale, sub_image);
	cvConvertScaleAbs(sub_image, image);
	int width_step = image->widthStep;
	int pixel_step = image->widthStep/image->width;

	int count = 0;
	for(int row = 0; row<image->height;++row)
	{
		for(int col = 0; col < image->width;++col)
		{
			unsigned char* curr_point = GETPIXELPTRMACRO( image, col, row, width_step, pixel_step );

			if( (*curr_point) > VARIATION_ALLOWED_IN_PIXEL_VALUES)
			{
				count ++ ;
			}
		}
	}
	
	float res = ((float)count * 100)/(image->height * image->width);

	if(res < ALLOWED_MOTION_FOR_MOTION_FREE_IMAGE)
		return true;  
	else
		return false;
}
inline
void convert_scale_abs_( const ipl_image_wrapper& src
                      , ipl_image_wrapper&        dst
                      , const double&             scale = 1.0
                      , const double&             shift = 0.0
                      )
{
   cvConvertScaleAbs( src.get()
                    , dst.get()
                    , scale
                    , shift
                    );
}
Exemplo n.º 12
0
void compute_vertical_edge_image(IplImage* input_image, IplImage* output_image)
{
	// TO-DO:  Compute the partial first derivative edge image in order to locate the vertical edges in the passed image,
	//   and then determine the non-maxima suppressed version of these edges (along each row as the rows can be treated
	//   independently as we are only considering vertical edges). Output the non-maxima suppressed edge image. 
	// Note:   You may need to smooth the image first.

	IplImage * tmp = cvCreateImage( 
		cvGetSize(input_image)
		, IPL_DEPTH_8U
		, 1);
	IplImage * first_derivative_img = cvCreateImage(
		cvGetSize(input_image)
		, IPL_DEPTH_16S
		, 1
		);
	IplImage * non_maxima_suppressed_img = cvCloneImage(tmp);
	cvZero(non_maxima_suppressed_img);
	cvCvtColor(input_image, tmp, CV_BGR2GRAY);
	cvSmooth(tmp, tmp,CV_GAUSSIAN, 3, 3);
	cvSobel(tmp, first_derivative_img, 1, 0, 3);
	cvConvertScaleAbs(first_derivative_img, tmp);
	cvThreshold(tmp, tmp, 220, 0, CV_THRESH_TOZERO);

	// non-maximum suppression
	int width_step = tmp->widthStep;
	int pixel_step = tmp->widthStep/tmp->width;
	
	for(int row = 0; row<tmp->height;++row)
	{
		for(int col = 1; col < tmp->width-1;++col)
		{
			unsigned char* curr_point = GETPIXELPTRMACRO(tmp, col, row, width_step, pixel_step);
			unsigned char* pre_point = GETPIXELPTRMACRO(tmp, col-1, row, width_step, pixel_step);
			unsigned char* post_point = GETPIXELPTRMACRO(tmp, col+1, row, width_step, pixel_step);
			unsigned char* non_maxima_suppressed_img_point = GETPIXELPTRMACRO(non_maxima_suppressed_img, col, row, width_step, pixel_step);
			if( (*curr_point) <= (*pre_point)
				|| (*curr_point) < (*post_point) )
			{
				(*non_maxima_suppressed_img_point) = 0;
			}
			else
			{
				(*non_maxima_suppressed_img_point) = *(curr_point);
			}
		}
	}
	cvThreshold(non_maxima_suppressed_img, non_maxima_suppressed_img, 200, 255, CV_THRESH_BINARY);
	cvCvtColor(non_maxima_suppressed_img, output_image, CV_GRAY2BGR);
}
Exemplo n.º 13
0
void ImageProcessorCV::Laplacian5x5(CByteImage *pInputImage, CByteImage *pOutputImage)
{
	if (pInputImage->width != pOutputImage->width || pInputImage->height != pOutputImage->height ||
		pInputImage->type != pOutputImage->type || pInputImage->type != CByteImage::eGrayScale)
		return;

	IplImage *pIplInputImage = IplImageAdaptor::Adapt(pInputImage);
	IplImage *pIplOutputImage = IplImageAdaptor::Adapt(pOutputImage);
	IplImage *pIplTempImage = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
	
	cvLaplace(pIplInputImage, pIplTempImage, 5);
	cvConvertScaleAbs(pIplTempImage, pIplOutputImage);
	
	cvReleaseImage(&pIplTempImage);
	cvReleaseImageHeader(&pIplInputImage);
	cvReleaseImageHeader(&pIplOutputImage);
}
/*
*The sobel filter is an approximation to a derivative, it can apply first or second order in both coordinates in an image
*First, the sobel operator applies a gaussian filter, in order to smooth the image
*Then it calculates the derivative, and umbralizes the image
*@param input, the input image, could be grayscale or RGB
*@param output, the output image must have at least 16 bit pixel representation, to avoid overflow
*@param xOrder, the derivative order for the X axis
*@param yOrder, the derivative order for the Y axis
*@param apertureSize, size of the filter window, if the size is 3, the scharr filter is used, less sensitive to noise
*/
ImageImPro* OpenImProLib_OpenCvImpl::filterSobel(ImageImPro* ptrInput, int xOrder, int yOrder, int apertureSize){  
    IplImage* ptrCvInput = ptrInput->getOpenCvImage();
    //buffer for sobel result needing more bits per pixel for the result, then, rescaling is necesary to get it back to 8 bits per pixel
    IplImage* ptrCvTemp = cvCreateImage(cvGetSize(ptrCvInput),IPL_DEPTH_32F,1);
    IplImage* ptrCvOutput = cvCreateImage(cvGetSize(ptrCvInput), IPL_DEPTH_8U, 1);
    if(ptrInput->getChannels() != 1){
        IplImage* ptrCvInputGray = cvCreateImage(cvSize(ptrCvInput->width,ptrCvInput->height),IPL_DEPTH_8U,1);
        cvCvtColor(ptrCvInput,ptrCvInputGray, CV_RGB2GRAY);
        cvSobel(ptrCvInputGray,ptrCvTemp, xOrder, yOrder, apertureSize);
        cvReleaseImage(&ptrCvInputGray);
    }
    else{
        cvSobel(ptrCvInput,ptrCvTemp, xOrder, yOrder, apertureSize);
    }
    cvConvertScaleAbs(ptrCvTemp, ptrCvOutput, 1, 0);
    ImageImPro* ptrOutput = new ImageImPro_OpenCvImpl(ptrCvOutput);
    cvReleaseImage(&ptrCvOutput);
    cvReleaseImage(&ptrCvInput);
    cvReleaseImage(&ptrCvTemp);
    return ptrOutput;
}
Exemplo n.º 15
0
int main( int argc, char** argv ) {
	if(argc != 2) {
		printf("Usage: ./image <image name>\n");
		return 0;
	}

	// Show original image
	IplImage* img = cvLoadImage( argv[1], 0 );
	cvNamedWindow( "Input", CV_WINDOW_AUTOSIZE );
	cvShowImage( "Input", img );
	printf("Input depth: %x\n", img->depth); // 8U

	// Run Sobel edge detection
	IplImage *res = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 );
#ifdef USE_OPENCV
	// OpenCV implementation
	// Convert images to 16SC1
	IplImage *out = cvCreateImage( cvGetSize(img), IPL_DEPTH_16S, 1 );
	cvSobel( img, out, 1, 0 );
	cvConvertScaleAbs(out, res); // need 8U again
	cvReleaseImage( &out );
#else
	// Own implementation of Sobel kernel
	int H = img->height, W = img->width, WS = img->widthStep;
	uint8_t *srcdata = (uint8_t *) img->imageData;
	uint8_t *dstdata = (uint8_t *) res->imageData;

#define ind(i,j) ((i)*WS+(j))

	// work on bulk of image
	for(int i = 1; i+1 < H; i++) {
		for(int j = 1; j+1 < W; j++) {
			// dx kernel
			// -1 0 1
			// -2 0 2
			// -1 0 1
			int16_t value = - srcdata[ind(i-1,j-1)]
							+ srcdata[ind(i-1,j+1)]
							- srcdata[ind(i,j-1)] * 2
							+ srcdata[ind(i,j+1)] * 2
							- srcdata[ind(i+1,j-1)]
							+ srcdata[ind(i+1,j+1)];
			if(value < 0)
				value = 0;
			if(value > 255)
				value = 255;
			dstdata[ind(i,j)] = value;
		}
	}

	// work on left and right edges (not accurate)
	for(int i = 0; i < H; i++) {
		dstdata[ind(i,0)] = 0;
		dstdata[ind(i,W-1)] = 0;
	}

	// work on top and bottom edges (not accurate)
	for(int j = 0; j < W; j++) {
		dstdata[ind(0,j)] = 0;
		dstdata[ind(H-1,j)] = 0;
	}

	// debugging
	for(int j = 0; j < 10; j++)
		printf("%d ", dstdata[ind(1, j)]);
	printf("\n");

#undef ind

#endif

	// Display
	cvNamedWindow( "Output", CV_WINDOW_AUTOSIZE );
	cvShowImage( "Output", res );

	// Cleanup
	cvWaitKey(0);
	cvReleaseImage( &img );
	cvReleaseImage( &res );
	cvDestroyWindow( "Input" );
	cvDestroyWindow( "Output" );

	return 0;
}
Exemplo n.º 16
0
/**
 * @brief Return an Image (gandalf image class) with a specific Type
 * @param Type The Type of gabor kernel, e.g. REAL, IMAG, MAG, PHASE   
 * @return Pointer to image structure, or NULL on failure
 */
IplImage* CvGabor::get_image(int Type)
{
    if(IsKernelCreate() == false) { 
      perror("Error: the Gabor kernel has not been created in get_image()!\n");
      return NULL;
    }
    else
    {  
    IplImage* pImage;
    IplImage *newimage;
    newimage = cvCreateImage(cvSize(Width,Width), IPL_DEPTH_8U, 1 );
    //printf("Width is %d.\n",(int)Width);
    //printf("Sigma is %f.\n", Sigma);
    //printf("F is %f.\n", F);
    //printf("Phi is %f.\n", Phi);
    
    //pImage = gan_image_alloc_gl_d(Width, Width);
    pImage = cvCreateImage( cvSize(Width,Width), IPL_DEPTH_32F, 1 );
    
    CvMat* kernel = cvCreateMat(Width, Width, CV_32FC1);
    double ve;
//     CvScalar S;
    CvSize size = cvGetSize( kernel );
    int rows = size.height;
    int cols = size.width;
    switch(Type)
    {
        case 1:  //Real
           cvCopy( (CvMat*)Real, (CvMat*)kernel, NULL );
            //pImage = cvGetImage( (CvMat*)kernel, pImageGL );
          for (int i = 0; i < rows; i++)
          {
            for (int j = 0; j < cols; j++)
            {
              ve = cvGetReal2D((CvMat*)kernel, i, j);
              cvSetReal2D( (IplImage*)pImage, j, i, ve );
            }
          }
          break;
        case 2:  //Imag
           cvCopy( (CvMat*)Imag, (CvMat*)kernel, NULL );
           //pImage = cvGetImage( (CvMat*)kernel, pImageGL );
          for (int i = 0; i < rows; i++)
          {
            for (int j = 0; j < cols; j++)
            {
              ve = cvGetReal2D((CvMat*)kernel, i, j);
              cvSetReal2D( (IplImage*)pImage, j, i, ve );
            }
          }
          break; 
        case 3:  //Magnitude
           ///@todo  
           printf("[CvGabor::get_image] Error: No magnitude available.\n");
           break;
        case 4:  //Phase
          ///@todo
           printf("[CvGabor::get_image] Error: No phase available.\n");
           break;
    }
   
    cvNormalize((IplImage*)pImage, (IplImage*)pImage, 0, 255, CV_MINMAX, NULL );
    cvConvertScaleAbs( (IplImage*)pImage, (IplImage*)newimage, 1, 0 );

    cvReleaseMat(&kernel);
    cvReleaseImage(&pImage);
    return newimage;
  }
}
Exemplo n.º 17
0
int main(void)
{
  IplImage *src=NULL;
  if (0){
    src = cvCreateImageHeader(cvSize(4,4),IPL_DEPTH_8U,1);
    char rawdata[4][4] = { {0, 0, 1, 1},
			   {0, 0, 1, 1},
			   {0, 2, 2, 2},
			   {2, 2, 3, 3}};
    src->imageData = (char*)(&rawdata);
  }else{
    src = cvLoadImage("test.png",0);
  }
  CvGLCM* glcm;
//  glcm = cvCreateGLCM(src, 1, NULL, 4, CV_GLCM_OPTIMIZATION_LUT);
  glcm = cvCreateGLCM(src, 1, NULL, 4, CV_GLCM_OPTIMIZATION_NONE);
  cvCreateGLCMDescriptors(glcm, CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST);
//#define CV_GLCMDESC_ENTROPY                         0
//#define CV_GLCMDESC_ENERGY                          1
//#define CV_GLCMDESC_HOMOGENITY                      2
//#define CV_GLCMDESC_CONTRAST                        3
//#define CV_GLCMDESC_CLUSTERTENDENCY                 4
//#define CV_GLCMDESC_CLUSTERSHADE                    5
//#define CV_GLCMDESC_CORRELATION                     6
//#define CV_GLCMDESC_CORRELATIONINFO1                7
//#define CV_GLCMDESC_CORRELATIONINFO2                8
//#define CV_GLCMDESC_MAXIMUMPROBABILITY              9

  for (int step=0; step<4; step++){ 
    for (int i=0; i<10; i++){
      printf("%.3f,", cvGetGLCMDescriptor(glcm, step, i));
    }
    printf("\n");
    
  }


  IplImage *d0org = cvCreateImage(cvSize(256,256),IPL_DEPTH_32F,1); 
  cvResize(cvCreateGLCMImage(glcm,0),d0org,CV_INTER_NN);
  IplImage *d0 = cvCreateImage(cvGetSize(d0org),IPL_DEPTH_8U,1);
  cvConvertScaleAbs(d0org,d0,255,0);
  cvNormalize(d0,d0,0,255,CV_MINMAX);
  cvSaveImage("d0.png",d0);

  IplImage *d1org = cvCreateImage(cvSize(256,256),IPL_DEPTH_32F,1); 
  cvResize(cvCreateGLCMImage(glcm,1),d1org,CV_INTER_NN);
  IplImage *d1 = cvCreateImage(cvGetSize(d1org),IPL_DEPTH_8U,1);
  cvConvertScaleAbs(d1org,d1,255,0);
  cvNormalize(d1,d1,0,255,CV_MINMAX);
  cvSaveImage("d1.png",d1);

  IplImage *d2org = cvCreateImage(cvSize(256,256),IPL_DEPTH_32F,1); 
  cvResize(cvCreateGLCMImage(glcm,2),d2org,CV_INTER_NN);
  IplImage *d2 = cvCreateImage(cvGetSize(d2org),IPL_DEPTH_8U,1);
  cvConvertScaleAbs(d2org,d2,255,0);
  cvNormalize(d2,d2,0,255,CV_MINMAX);
  cvSaveImage("d2.png",d2);

  IplImage *d3org = cvCreateImage(cvSize(256,256),IPL_DEPTH_32F,1); 
  cvResize(cvCreateGLCMImage(glcm,3),d3org,CV_INTER_NN);
  IplImage *d3 = cvCreateImage(cvGetSize(d3org),IPL_DEPTH_8U,1);
  cvConvertScaleAbs(d3org,d3,255,0);
  cvNormalize(d3,d3,0,255,CV_MINMAX);
  cvSaveImage("d3.png",d3);

  cvNamedWindow("D0",1);
  cvNamedWindow("D1",1);
  cvNamedWindow("D2",1);
  cvNamedWindow("D3",1);
  cvShowImage("D0",d0);
  cvShowImage("D1",d1);
  cvShowImage("D2",d2);
  cvShowImage("D3",d3);
  cvWaitKey(0);

  cvReleaseGLCM(glcm,CV_GLCM_ALL);
  return 0;
}
int main( int argc, char** argv )
{
    IplImage *current_frame=NULL;
	IplImage *running_average_background=NULL;

	IplImage *static_background_image=NULL;
	IplImage *static_moving_mask_image=NULL;
	IplImage *running_average_background_image=NULL;
	IplImage *running_average_moving_mask_image=NULL;
	IplImage *running_gaussian_average_background_average=NULL;
	IplImage *running_gaussian_average_background_sd=NULL;
	IplImage *running_gaussian_average_sd_image=NULL;
	IplImage *running_gaussian_average_background_image=NULL;
	IplImage *running_gaussian_average_moving_mask_image=NULL;

	IplImage *change_and_remain_changed_background_image=NULL;
	IplImage *subtracted_image=NULL;
	IplImage *moving_mask_image=NULL;

    int user_clicked_key=0;
	int show_ch = 'm';
	bool paused = false;
    
    // Load the video (AVI) file
    CvCapture *capture = cvCaptureFromAVI( " " );   //Add here the inputh video path
    // Ensure AVI opened properly
    if( !capture )
		return 1;    
    
    // Get Frames Per Second in order to playback the video at the correct speed
    int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
    
	// Explain the User Interface
    printf( "Hot keys: \n"
		    "\tESC - quit the program\n"
            "\tSPACE - pause/resume the video\n");

	// Create display windows for images
	cvNamedWindow( "Input video", 0 );
    cvNamedWindow( "Static Background", 0 );
    cvNamedWindow( "Running Average Background", 0 );
    cvNamedWindow( "Running Gaussian Average Background", 0 );
    cvNamedWindow( "Running Gaussian Average Stan. Dev.", 0 );
    cvNamedWindow( "Moving Points - Static", 0 );
    cvNamedWindow( "Moving Points - Running Average", 0 );
    cvNamedWindow( "Moving Points - Running Gaussian Average", 0 );

	// Setup mouse callback on the original image so that the user can see image values as they move the
	// cursor over the image.
    cvSetMouseCallback( "Input video", on_mouse_show_values, 0 );
	window_name_for_on_mouse_show_values="Input video";

    while( user_clicked_key != ESC ) {
		// Get current video frame
        current_frame = cvQueryFrame( capture );
        if( !current_frame ) // No new frame available
			break;
		image_for_on_mouse_show_values = current_frame; // Assign image for mouse callback
		cvShowImage( "Input video", current_frame );

		if (static_background_image == NULL)
		{	// The first time around the loop create the images for processing
			// General purpose images
			subtracted_image = cvCloneImage( current_frame );
			// Static backgound images
			static_background_image = cvCloneImage( current_frame );
			static_moving_mask_image = cvCreateImage( cvGetSize(current_frame), 8, 3 );
			cvShowImage( "Static Background", static_background_image );
			// Running average images
			running_average_background = cvCreateImage( cvGetSize(current_frame), IPL_DEPTH_32F, 3 );
			//cvZero(running_average_background);
			cvConvert(current_frame, running_average_background);
			running_average_background_image = cvCloneImage( current_frame );
			running_average_moving_mask_image = cvCreateImage( cvGetSize(current_frame), 8, 3 );
			// Running Gaussian average images
			running_gaussian_average_background_image = cvCloneImage( current_frame );
			running_gaussian_average_sd_image = cvCloneImage( current_frame );
			running_gaussian_average_moving_mask_image = cvCreateImage( cvGetSize(current_frame), 8, 3 );
			running_gaussian_average_background_average = cvCreateImage( cvGetSize(current_frame), IPL_DEPTH_32F, 3 );
			cvConvert(current_frame, running_gaussian_average_background_average);
			running_gaussian_average_background_sd = cvCreateImage( cvGetSize(current_frame), IPL_DEPTH_32F, 3 );
			cvZero(running_gaussian_average_background_sd);
		}
		// Static Background Processing
		cvAbsDiff( current_frame, static_background_image, subtracted_image );
		cvThreshold( subtracted_image, static_moving_mask_image, 30, 255, CV_THRESH_BINARY );
        cvShowImage( "Moving Points - Static", static_moving_mask_image );

		// Running Average Background Processing
		cvRunningAvg( current_frame, running_average_background, 0.01 /*, moving_mask_image*/ );
		cvConvert( running_average_background, running_average_background_image );
		cvAbsDiff( current_frame, running_average_background_image, subtracted_image );
		cvThreshold( subtracted_image, running_average_moving_mask_image, 30, 255, CV_THRESH_BINARY );
		cvShowImage( "Running Average Background", running_average_background_image );
        cvShowImage( "Moving Points - Running Average", running_average_moving_mask_image );
		
		
		// Running Gaussian Average Background Processing
		
		update_running_gaussian_averages( current_frame, running_gaussian_average_background_average, running_gaussian_average_background_sd );
		cvConvertScaleAbs( running_gaussian_average_background_average, running_gaussian_average_background_image, 1.0, 0 );
		cvShowImage( "Running Gaussian Average Background", running_gaussian_average_background_image );
		cvConvertScaleAbs( running_gaussian_average_background_sd, running_gaussian_average_sd_image, 10.0, 0 );
		cvShowImage( "Running Gaussian Average Stan. Dev.", running_gaussian_average_sd_image );
		determine_moving_points_using_running_gaussian_averages( current_frame, running_gaussian_average_background_average, running_gaussian_average_background_sd, running_gaussian_average_moving_mask_image );
        cvShowImage( "Moving Points - Running Gaussian Average", running_gaussian_average_moving_mask_image );

        // Deal with user input, and wait for the delay between frames
		do {
			if( user_clicked_key == ' ' )
			{
				paused = !paused;
			}
			if (paused)
				user_clicked_key = cvWaitKey(0);
			else user_clicked_key = cvWaitKey( 1000 / fps );
		} while (( user_clicked_key != ESC ) && ( user_clicked_key != -1 ));
	}
    
    /* free memory */
    cvReleaseCapture( &capture );
 	cvDestroyWindow( "Input video" );
    cvDestroyWindow( "Static Background" );
    cvDestroyWindow( "Running Average Background" );
    cvDestroyWindow( "Running Gaussian Average Background" );
    cvDestroyWindow( "Running Gaussian Average Stan. Dev." );
    cvDestroyWindow( "Moving Points - Static" );
    cvDestroyWindow( "Moving Points - Running Average" );
    cvDestroyWindow( "Moving Points - Running Gaussian Average" );

    return 0;
}
Exemplo n.º 19
0
/**
 * main
 */
int main(int argc, const char **argv)
{
	// *** MODIFICATION: OpenCV modifications.
	// Load previous image.
	IplImage* prevImage = cvLoadImage("motion1.jpg", CV_LOAD_IMAGE_COLOR);
		
	// Create two arrays with the same number of channels than the original one.		
	avg1 = cvCreateMat(prevImage->height,prevImage->width,CV_32FC3);
	avg2 = cvCreateMat(prevImage->height,prevImage->width,CV_32FC3);
		
	// Create image of 32 bits.
	IplImage* image32 = cvCreateImage(cvSize(prevImage->width,prevImage->height), 32,3);
												
	// Convert image to 32 bits.
	cvConvertScale(prevImage,image32,1/255,0);
		
	// Set data from previous image into arrays.
	cvSetData(avg1,image32->imageData,image32->widthStep);
	cvSetData(avg2,image32->imageData,image32->widthStep);
	// *** MODIFICATION end
	
   // Our main data storage vessel..
   RASPISTILL_STATE state;

   MMAL_STATUS_T status = MMAL_SUCCESS;
   MMAL_PORT_T *camera_preview_port = NULL;
   MMAL_PORT_T *camera_video_port = NULL;
   MMAL_PORT_T *camera_still_port = NULL;
   MMAL_PORT_T *preview_input_port = NULL;
   MMAL_PORT_T *encoder_input_port = NULL;
   MMAL_PORT_T *encoder_output_port = NULL;

   bcm_host_init();

   // Register our application with the logging system
   vcos_log_register("fast", VCOS_LOG_CATEGORY);

   signal(SIGINT, signal_handler);

   default_status(&state);     
   
   if (state.verbose)
   {
      fprintf(stderr, "\n%s Camera App %s\n\n", basename(argv[0]), VERSION_STRING);      
   }

   // OK, we have a nice set of parameters. Now set up our components
   // We have three components. Camera, Preview and encoder.
   // Camera and encoder are different in stills/video, but preview
   // is the same so handed off to a separate module

   if ((status = create_camera_component(&state)) != MMAL_SUCCESS)
   {
      vcos_log_error("%s: Failed to create camera component", __func__);
   }
   else if ((status = raspipreview_create(&state.preview_parameters)) != MMAL_SUCCESS)
   {
      vcos_log_error("%s: Failed to create preview component", __func__);
      destroy_camera_component(&state);
   }
   else if ((status = create_encoder_component(&state)) != MMAL_SUCCESS)
   {
      vcos_log_error("%s: Failed to create encode component", __func__);
      raspipreview_destroy(&state.preview_parameters);
      destroy_camera_component(&state);
   }
   else
   {
      PORT_USERDATA callback_data;

      if (state.verbose)
         fprintf(stderr, "Starting component connection stage\n");
         
      camera_preview_port = state.camera_component->output[MMAL_CAMERA_PREVIEW_PORT];
      camera_video_port   = state.camera_component->output[MMAL_CAMERA_VIDEO_PORT];
      camera_still_port   = state.camera_component->output[MMAL_CAMERA_CAPTURE_PORT];
      preview_input_port  = state.preview_parameters.preview_component->input[0];
      encoder_input_port  = state.encoder_component->input[0];
      encoder_output_port = state.encoder_component->output[0];

      if (state.preview_parameters.wantPreview )
      {
         if (state.verbose)
         {
            fprintf(stderr, "Connecting camera preview port to preview input port\n");
            fprintf(stderr, "Starting video preview\n");
         }

         // *** USER: remove preview
         // Connect camera to preview
         //status = connect_ports(camera_preview_port, preview_input_port, &state.preview_connection);

      }
      else
      {
         status = MMAL_SUCCESS;
      }

      if (status == MMAL_SUCCESS)
      {
         VCOS_STATUS_T vcos_status;

         if (state.verbose)
            fprintf(stderr, "Connecting camera stills port to encoder input port\n");

         // Now connect the camera to the encoder
         status = connect_ports(camera_still_port, encoder_input_port, &state.encoder_connection);
         

         if (status != MMAL_SUCCESS)
         {
            vcos_log_error("%s: Failed to connect camera video port to encoder input", __func__);
            goto error;
         }

         // Set up our userdata - this is passed though to the callback where we need the information.
         // Null until we open our filename
         callback_data.file_handle = NULL;
         callback_data.pstate = &state;
         vcos_status = vcos_semaphore_create(&callback_data.complete_semaphore, "RaspiStill-sem", 0);

         vcos_assert(vcos_status == VCOS_SUCCESS);

         if (status != MMAL_SUCCESS)
         {
            vcos_log_error("Failed to setup encoder output");
            goto error;
         }
         
         FILE *output_file = NULL;
         
         int frame = 1;
         
         // Enable the encoder output port
         encoder_output_port->userdata = (struct MMAL_PORT_USERDATA_T *)&callback_data;
         
         if (state.verbose)
			fprintf(stderr, "Enabling encoder output port\n");
			
		// Enable the encoder output port and tell it its callback function
		status = mmal_port_enable(encoder_output_port, encoder_buffer_callback);
		
		// Create an empty matrix with the size of the buffer.
		CvMat* buf = cvCreateMat(1,60000,CV_8UC1);
		
		// Keep buffer that gets frames from queue.
		MMAL_BUFFER_HEADER_T *buffer;
		
		// Image to be displayed.
		IplImage* image;
		
		// Keep number of buffers and index for the loop.
		int num, q; 
		
		while(1) 
		{
			// Send all the buffers to the encoder output port
			num = mmal_queue_length(state.encoder_pool->queue);
			
			for (q=0;q<num;q++)
			{
				buffer = mmal_queue_get(state.encoder_pool->queue);
				
				if (!buffer)
					vcos_log_error("Unable to get a required buffer %d from pool queue", q);
					
				if (mmal_port_send_buffer(encoder_output_port, buffer)!= MMAL_SUCCESS)
					vcos_log_error("Unable to send a buffer to encoder output port (%d)", q);
			} // for
			
			if (mmal_port_parameter_set_boolean(camera_still_port, MMAL_PARAMETER_CAPTURE, 1) != MMAL_SUCCESS)
				vcos_log_error("%s: Failed to start capture", __func__);
			
			else
			{
				// Wait for capture to complete
				// For some reason using vcos_semaphore_wait_timeout sometimes returns immediately with bad parameter error
				// even though it appears to be all correct, so reverting to untimed one until figure out why its erratic
				vcos_semaphore_wait(&callback_data.complete_semaphore);
				if (state.verbose)
					fprintf(stderr, "Finished capture %d\n", frame);
			} // else
			
			// Copy buffer from camera to matrix.
			buf->data.ptr = buffer->data;
			
			// This workaround is needed for the code to work
			// *** TODO: investigate why.
			printf("Until here works\n");
			
			// Decode the image and display it.
			image = cvDecodeImage(buf, CV_LOAD_IMAGE_COLOR);
		
			// Destinations
			CvMat* res1 = cvCreateMat(image->height,image->width,CV_8UC3);
			CvMat* res2 = cvCreateMat(image->height,image->width,CV_8UC3);
		
			// Update running averages and then scale, calculate absolute values
			// and convert the result 8-bit.
			// *** USER:change the value of the weight.
			cvRunningAvg(image,avg2,0.0001, NULL);		
			cvConvertScaleAbs(avg2, res2, 1,0);
		
			cvRunningAvg(image,avg1,0.1, NULL);
			cvConvertScaleAbs(avg1, res1, 1,0);
				
			// Show images
			cvShowImage("img",image);
			cvShowImage("avg1",res1);
			cvShowImage("avg2",res2);
			cvWaitKey(20);
		
			// Update previous image.
			cvSaveImage("motion1.jpg", image, 0);
		} // end while 
		
		vcos_semaphore_delete(&callback_data.complete_semaphore);
         
      }
      else
      {
         mmal_status_to_int(status);
         vcos_log_error("%s: Failed to connect camera to preview", __func__);
      }

error:

      mmal_status_to_int(status);

      if (state.verbose)
         fprintf(stderr, "Closing down\n");

      // Disable all our ports that are not handled by connections
      check_disable_port(camera_video_port);
      check_disable_port(encoder_output_port);

      if (state.preview_parameters.wantPreview )
         mmal_connection_destroy(state.preview_connection);

      mmal_connection_destroy(state.encoder_connection);

      /* Disable components */
      if (state.encoder_component)
         mmal_component_disable(state.encoder_component);

      if (state.preview_parameters.preview_component)
         mmal_component_disable(state.preview_parameters.preview_component);

      if (state.camera_component)
         mmal_component_disable(state.camera_component);

      destroy_encoder_component(&state);
      raspipreview_destroy(&state.preview_parameters);
      destroy_camera_component(&state);

      if (state.verbose)
         fprintf(stderr, "Close down completed, all components disconnected, disabled and destroyed\n\n");
   }

   if (status != MMAL_SUCCESS)
      raspicamcontrol_check_configuration(128);
      
   return 0;
}
Exemplo n.º 20
0
void ImageProcessorCV::CalculateGradientImage(CByteImage *pInputImage, CByteImage *pOutputImage)
{
	if (pInputImage->width != pOutputImage->width || pInputImage->height != pOutputImage->height ||
		pOutputImage->type != CByteImage::eGrayScale)
		return;

	IplImage *pIplInputImage = IplImageAdaptor::Adapt(pInputImage);
	IplImage *pIplOutputImage = IplImageAdaptor::Adapt(pOutputImage);

	if (pInputImage->type == CByteImage::eGrayScale)
	{
		IplImage *diff = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
		IplImage *abs = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_8U, 1);
		
		cvSmooth(pIplInputImage, abs, CV_GAUSSIAN, 3, 3);
		cvSobel(abs, diff, 1, 0, 3);
		cvConvertScaleAbs(diff, pIplOutputImage);
		cvSobel(abs, diff, 0, 1, 3);
		cvConvertScaleAbs(diff, abs);
		cvAdd(abs, pIplOutputImage, pIplOutputImage, 0);
		
		cvReleaseImage(&diff);
		cvReleaseImage(&abs);
	}
	else if (pInputImage->type == CByteImage::eRGB24)
	{
		//	Determine Gradient Image by Irina Wchter
		//	instead of normal norm sqrt(x*x +y*y) use |x|+|y| because it is much faster
		IplImage *singleChannel0 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
		IplImage *singleChannel1 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
		IplImage *singleChannel2 = cvCreateImage(cvSize(pInputImage->width,pInputImage->height), IPL_DEPTH_8U, 1);
		IplImage *diff = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_16S, 1);
		IplImage *abs = cvCreateImage(cvSize(pInputImage->width, pInputImage->height), IPL_DEPTH_8U, 1);
		
		cvCvtPixToPlane(pIplInputImage, singleChannel0, singleChannel1, singleChannel2, NULL);
	
		cvSmooth(singleChannel0, singleChannel0, CV_GAUSSIAN, 3, 3);
		cvSobel(singleChannel0, diff, 1, 0, 3);
		cvConvertScaleAbs(diff, abs);
		cvSobel(singleChannel0, diff, 0, 1, 3);
		cvConvertScaleAbs(diff, singleChannel0);
		cvAdd(abs, singleChannel0, pIplOutputImage, 0);
	
		cvSmooth(singleChannel1, singleChannel1, CV_GAUSSIAN, 3, 3);
		cvSobel(singleChannel1, diff, 1, 0, 3);
		cvConvertScaleAbs(diff, abs);
		cvSobel(singleChannel1, diff, 0, 1, 3);
		cvConvertScaleAbs(diff, singleChannel1);
		cvAdd(abs, singleChannel1, singleChannel1, 0);
		cvMax(pIplOutputImage, singleChannel1, pIplOutputImage);
	
		cvSmooth(singleChannel2, singleChannel2, CV_GAUSSIAN, 3, 3);
		cvSobel(singleChannel2, diff, 1, 0, 3);
		cvConvertScaleAbs(diff, abs);
		cvSobel(singleChannel2, diff, 0, 1, 3);
		cvConvertScaleAbs(diff, singleChannel2);
		cvAdd(abs, singleChannel2, singleChannel2, 0);
		cvMax(pIplOutputImage, singleChannel2, pIplOutputImage);
	
		cvReleaseImage(&singleChannel0);
		cvReleaseImage(&singleChannel1);
		cvReleaseImage(&singleChannel2);
		cvReleaseImage(&diff);
		cvReleaseImage(&abs);
	}
	
	cvReleaseImageHeader(&pIplInputImage);
	cvReleaseImageHeader(&pIplOutputImage);
}
Exemplo n.º 21
0
int main(int argc, char *argv[])
{
	IplImage* img = 0;
	int height,width,step,channels;
	unsigned char *data;

	// load an image
	img= cvLoadImage("kantai.png");
	if(!img){
		printf("Could not load image file: %s\n",argv[1]);
		exit(0);
	}

	// get the image data
	height    = img->height;
	width     = img->width;
	step      = img->widthStep;
	channels  = img->nChannels;
	data      = (uchar *)img->imageData;

	printf("Processing a %dx%d image with %d channels\n",height,width,channels);
	printf("step = %d\n", step);

	IplImage* imgGrayscale = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); // 8-bit grayscale is enough.
	// convert to grayscale.
	cvCvtColor(img, imgGrayscale, CV_BGR2GRAY);

	// Create an image for the outputs
	IplImage* imgSobelX = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 ); // to prevent overflow.
	IplImage* imgSobelY = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );
	IplImage* imgSobelAdded = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );
	IplImage* imgSobel = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 ); // final image is enough to be an 8-bit plane.


	// Sobel
	cvSobel(imgGrayscale, imgSobelX, 1, 0, 3);
	cvSobel(imgGrayscale, imgSobelY, 0, 1, 3);
	cvAdd(imgSobelX, imgSobelY, imgSobelAdded);
	cvConvertScaleAbs(imgSobelAdded, imgSobel); //scaled to 8-bit level; important for visibility.


	//----------------------- OULINE EXTRACTION -------------------------------
	// Normal diff
	IplImage* imgNormDiff = cvCreateImage(cvGetSize(img), 8, 1);
	cvCopy(imgGrayscale,imgNormDiff);
	norm_diff(imgNormDiff);

	// Roberts
	IplImage* imgRoberts = cvCreateImage(cvGetSize(img), 8, 1);
	cvCopy(imgGrayscale,imgRoberts);
	roberts(imgRoberts);

	// Sobel
	IplImage* imgSobel2 = cvCreateImage(cvGetSize(img), 8, 1);
	cvCopy(imgGrayscale,imgSobel2);
	sobel(imgSobel2);

	// Laplacian
	IplImage* imgLap = cvCreateImage(cvGetSize(img), 8, 1);
	cvCopy(imgGrayscale,imgLap);
	laplacian(imgLap);

	//--------------------------- ENHANCEMENT --------------------------------
	// Laplacian
	IplImage* imgLap2 = cvCreateImage(cvGetSize(img), 8, 3);
	IplImage* imgRed = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgGreen = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgBlue = cvCreateImage(cvGetSize(img), 8, 1);

	cvSplit(img, imgRed, imgGreen, imgBlue, NULL);

	laplacian2(imgBlue);
	laplacian2(imgGreen);
	laplacian2(imgRed);
	cvMerge(imgRed,imgGreen,imgBlue, NULL, imgLap2);

	// Variant
	IplImage* imgVariant = cvCreateImage(cvGetSize(img), 8, 3);
	IplImage* imgRed2 = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgGreen2 = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgBlue2 = cvCreateImage(cvGetSize(img), 8, 1);

	cvSplit(img, imgRed2, imgGreen2, imgBlue2, NULL);

	variant(imgBlue2);
	variant(imgGreen2);
	variant(imgRed2);
	cvMerge(imgRed2,imgGreen2,imgBlue2, NULL, imgVariant);

	// Sobel
	IplImage* imgSobel3 = cvCreateImage(cvGetSize(img), 8, 3);
	IplImage* imgRed3 = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgGreen3 = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgBlue3 = cvCreateImage(cvGetSize(img), 8, 1);

	cvSplit(img, imgRed3, imgGreen3, imgBlue3, NULL);

	sobel2(imgBlue3);
	sobel2(imgGreen3);
	sobel2(imgRed3);
	cvMerge(imgRed3,imgGreen3,imgBlue3, NULL, imgSobel3);




	// create a window
	cvNamedWindow("Original", CV_WINDOW_KEEPRATIO);

	cvNamedWindow("Normal different line", CV_WINDOW_KEEPRATIO);
	cvNamedWindow("Roberts line", CV_WINDOW_FREERATIO);
	cvNamedWindow("Sobel line", CV_WINDOW_FREERATIO);
	cvNamedWindow("Laplacian line", CV_WINDOW_KEEPRATIO);

	cvNamedWindow("Laplacian Color", CV_WINDOW_KEEPRATIO);
	cvNamedWindow("Variant", CV_WINDOW_KEEPRATIO);
	cvNamedWindow("Sobel", CV_WINDOW_KEEPRATIO);
	/*cvNamedWindow( "Sobel-x" );
  cvNamedWindow( "Sobel-y" );
  cvNamedWindow( "Sobel-Added" );
  cvNamedWindow( "Sobel-Added (scaled)" );*/

	// show the image
	cvShowImage("Original", img);
	cvShowImage("Normal different line", imgNormDiff);
	cvShowImage("Roberts line",imgRoberts);
	cvShowImage("Sobel line", imgSobel2);
	cvShowImage("Laplacian line", imgLap);

	cvShowImage("Laplacian Color", imgLap2);
	cvShowImage("Variant", imgVariant);
	cvShowImage("Sobel", imgSobel3);

	/*cvShowImage("Sobel-x", imgSobelX);
  cvShowImage("Sobel-y", imgSobelY);
  cvShowImage("Sobel-Added", imgSobelAdded);
  cvShowImage("Sobel-Added (scaled)", imgSobel);*/

	// wait for a key
	cvWaitKey(0);

	// release the image
	cvReleaseImage(&img);
	cvReleaseImage(&imgGrayscale);
	cvReleaseImage(&imgNormDiff);
	cvReleaseImage(&imgRoberts);
	cvReleaseImage(&imgSobel2);
	cvReleaseImage(&imgLap);

	cvReleaseImage(&imgLap2);
	cvReleaseImage(&imgVariant);
	cvReleaseImage(&imgSobel3);

	cvReleaseImage(&imgSobelX);
	cvReleaseImage(&imgSobelY);
	cvReleaseImage(&imgSobelAdded);
	cvReleaseImage(&imgSobel);


	return 0;
}