Exemplo n.º 1
0
Arquivo: cd.c Projeto: j0sh/thesis
static int* block_coeffs(IplImage *img, int* plane_coeffs) {
    CvSize size = cvGetSize(img);
    IplImage *b = cvCreateImage(size, IPL_DEPTH_8U, 1);
    IplImage *g = cvCreateImage(size, IPL_DEPTH_8U, 1);
    IplImage *r = cvCreateImage(size, IPL_DEPTH_8U, 1);
    IplImage *trans = cvCreateImage(size, IPL_DEPTH_16S, 1);
    int dim = plane_coeffs[0] + plane_coeffs[1] + plane_coeffs[2];
    int sz = size.width*size.height/64*dim;
    int *buf = malloc(sizeof(int)*sz);
    unsigned *order_p0 = build_path(plane_coeffs[0], KERNS);
    unsigned *order_p1 = build_path(plane_coeffs[1], KERNS);
    unsigned *order_p2 = build_path(plane_coeffs[2], KERNS);

    cvSplit(img, b, g, r, NULL);

    wht2d(b, trans);
    quantize(trans, plane_coeffs[0], KERNS, order_p0, buf, dim);

    wht2d(g, trans);
    quantize(trans, plane_coeffs[1], KERNS, order_p1,
        buf+plane_coeffs[0], dim);

    wht2d(r, trans);
    quantize(trans, plane_coeffs[2], KERNS, order_p2,
        buf+plane_coeffs[0]+plane_coeffs[1], dim);

    cvReleaseImage(&trans);
    cvReleaseImage(&b);
    cvReleaseImage(&g);
    cvReleaseImage(&r);
    free(order_p0);
    free(order_p1);
    free(order_p2);

    return buf;
}
void CamShift::Track(IplImage *frame, CvRect &selection, bool calc_hist)
{
	int i, bin_w, c;

	cvCvtColor( frame, _hsv, CV_BGR2HSV );

	cvInRangeS( _hsv, cvScalar(0,_smin,MIN(_vmin,_vmax),0),
		cvScalar(180,256,MAX(_vmin,_vmax),0), _mask );
	cvSplit( _hsv, _hue, 0, 0, 0 );

	if(calc_hist)
	{
		float max_val = 0.f;
		cvSetImageROI( _hue, selection );
		cvSetImageROI( _mask, selection );
		cvCalcHist( &_hue, _hist, 0, _mask );
		cvGetMinMaxHistValue( _hist, 0, &max_val, 0, 0 );
		cvConvertScale( _hist->bins, _hist->bins, max_val ? 255. / max_val : 0., 0 );
		cvResetImageROI( _hue );
		cvResetImageROI( _mask );
		_track_window = selection; 
	}

	cvCalcBackProject( &_hue, _backproject, _hist );
	cvAnd( _backproject, _mask, _backproject, 0 );
	cvCamShift( _backproject, _track_window,
		cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
		&_track_comp, &_track_box );
	_track_window = _track_comp.rect;

	if( frame->origin )
		_track_box.angle = -_track_box.angle;

	selection = cvRect(_track_box.center.x-_track_box.size.width/2, _track_box.center.y-_track_box.size.height/2,
		selection.width, selection.height);
}
Exemplo n.º 3
0
IplImage* EqualizeHistColorImage(IplImage *pImage)
{
    IplImage *pEquaImage = cvCreateImage(cvGetSize(pImage), pImage->depth, 3);

    // 原图像分成各通道后再均衡化,最后合并即彩色图像的直方图均衡化
    const int MAX_CHANNEL = 4;
    IplImage *pImageChannel[MAX_CHANNEL] = {NULL};

    int i;
    for (i = 0; i < pImage->nChannels; i++)
        pImageChannel[i] = cvCreateImage(cvGetSize(pImage), pImage->depth, 1);

    cvSplit(pImage, pImageChannel[0], pImageChannel[1], pImageChannel[2], pImageChannel[3]);

    for (i = 0; i < pImage->nChannels; i++)
        cvEqualizeHist(pImageChannel[i], pImageChannel[i]);

    cvMerge(pImageChannel[0], pImageChannel[1], pImageChannel[2], pImageChannel[3], pEquaImage);

    for (i = 0; i < pImage->nChannels; i++)
        cvReleaseImage(&pImageChannel[i]);

    return pEquaImage;
}
Exemplo n.º 4
0
void sum_rgb(IplImage* src, IplImage* dst) {
	// Allocate individual image planes.
	IplImage* r = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	IplImage* g = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	IplImage* b = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);

	// Temporary storage.
	IplImage* s = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);

	// Split image onto the color planes.
	cvSplit(src, r, g, b, NULL);

	// Add equally weighted rgb values.
	cvAddWeighted(r, 1. / 3., g, 1. / 3., 0.0, s);
	cvAddWeighted(s, 2. / 3., b, 1. / 3., 0.0, s);

	// Truncate values above 100.
	cvThreshold(s, dst, 100, 100, CV_THRESH_TRUNC);

	cvReleaseImage(&r);
	cvReleaseImage(&g);
	cvReleaseImage(&b);
	cvReleaseImage(&s);
}
Exemplo n.º 5
0
int convRGB(IplImage* srcRGB, IplImage* dstRGB, CvSize sizIm)
{
	// ñîçäàåì Image 
	srcR = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );
	srcG = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );
	srcB = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );

	srcRR = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );
	srcGR = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );
	srcBR = cvCreateImage( sizIm, IPL_DEPTH_8U, 1 );

	// ðàçáèâàåì íà êàíàëû
	cvSplit(srcRGB, srcB, srcG, srcR, 0);
		
	// âûäåëÿåì äëÿ êàæäîãî êàíàëà ãðàíèöû
	cvInRangeS(srcR, cvScalar(Rmin), cvScalar(Rmax), srcRR);
	cvInRangeS(srcG, cvScalar(Gmin), cvScalar(Gmax), srcGR);
	cvInRangeS(srcB, cvScalar(Bmin), cvScalar(Bmax), srcBR);

	// "ñêëåèâàåì" êàíàëû
	cvAnd( srcRR, srcGR, dstRGB );
	cvAnd( dstRGB, srcBR, dstRGB );

	// âûâîäèì â îêíå èçîáðàæåíèå
	cvShowImage("RGBVideo", dstRGB);

	// îñâîáîæäàåì ðåñóðñû
	cvReleaseImage( &srcR );
	cvReleaseImage( &srcG );
	cvReleaseImage( &srcB );
	cvReleaseImage( &srcRR );
	cvReleaseImage( &srcGR );
	cvReleaseImage( &srcBR );
	
	return 0;
}
void BoatDetecting::initilizeTracking(){
	cvInRangeS(hsv, cvScalar(0, smin, MIN(vmin, vmax), 0), cvScalar(180, 256, MAX(vmin, vmax), 0), mask);
	// 10,256,30
	
	cvSplit(hsv, hue, 0, 0, 0);
	if (!isTrackingInitialized){ // 如果跟踪窗口未初始化
		float max_val = 0.f;		
		cvSetImageROI(hue, selection);
		cvSetImageROI(mask, selection);		
		cvCalcHist(&hue, hist, 0, mask);
		cvGetMinMaxHistValue(hist, 0, &max_val, 0, 0);
		cvConvertScale(hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0);
		cvResetImageROI(hue);
		cvResetImageROI(mask);
		trackWindow = selection;
		isTrackingInitialized = true;

	}

	cvCalcBackProject(&hue, backproject, hist);
	//cvShowImage("Hue Channel",backproject);
	
	cvAnd(backproject, mask, backproject, 0);
	
	cvCamShift(backproject, trackWindow, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 15, 2), &trackComp, 0);//初始化跟踪窗口以后直接用trackWindow做跟踪,每帧都会更新
	

	//if (trackComp.rect.width<90 && trackComp.rect.y<200){
	//	trackWindow = trackComp.rect;
	//}
	//if (trackComp.rect.y>200)
	//{
	//	trackWindow = trackComp.rect;
	//}
	trackWindow = trackComp.rect;
}
Exemplo n.º 7
0
int main()
{
    const IplImage* im1 = cvLoadImage("302.png",0);
    const IplImage* im2 = cvLoadImage("303.png",0);
    //int w_s = 10;
    int w = im1->width;
    int h = im1->height;
    //printf("Width = %d\nHeight = %d\n",w,h);
    CvMat* vel = cvCreateMat(h,w,CV_32FC2);
    CvMat* velx = cvCreateMat(h,w,CV_32FC1);
    CvMat* vely = cvCreateMat(h,w,CV_32FC1);
    CvMat* u = cvCreateMat(h/10, w/10, CV_32FC1); // Averaged Optical flows
    CvMat* v = cvCreateMat(h/10, w/10, CV_32FC1);

   //printf("matDimU = %d %d\nMatDimVel = %d %d\n ",cvGetMatSize(u),cvGetMatSize(velx));
   //printf("Ptr = %d %d \n",im1->data.ptr,velx->data.ptr);
    //cvCalcOpticalFlowLK(im1,im2,cvSize(4,4),velx,vely);
    //cvCalcOpticalFlowFarneback(const CvArr* prev, const CvArr* next, CvArr* flow,
    //          double pyr_scale, int levels, int winsize, int iterations, int poly_n, double poly_sigma, int flags) flag means to use Gaussian smoothing
    cvCalcOpticalFlowFarneback(im1, im2, vel,0.5, 1, 2, 2, 2, 0.17, 0);//, iterations, poly_n, poly_sigma
    cvSplit(vel, velx, vely, NULL, NULL);
    average_flow(velx, u);
    average_flow(vely, v);

    /*//cvSave("u.xml", u);
    //cvSave("v.xml", v);*/
    saveMat(u,"ux.m");
    saveMat(v,"vy.m");

/*    CvMat* Big = cvCreateMat(50,50,CV_32FC1);
    cvSetIdentity(Big);
    CvMat* small = cvCreateMat(5,5,CV_32FC1);
    average_flow(Big,small);
    printMat(small);*/
    return 0;
}
Exemplo n.º 8
0
static GstFlowReturn gst_gcs_transform_ip(GstBaseTransform * btrans, GstBuffer * gstbuf) 
{
  GstGcs *gcs = GST_GCS (btrans);

  GST_GCS_LOCK (gcs);

  //////////////////////////////////////////////////////////////////////////////
  // get image data from the input, which is RGBA or BGRA
  gcs->pImageRGBA->imageData = (char*)GST_BUFFER_DATA(gstbuf);
  cvSplit(gcs->pImageRGBA,   gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, gcs->pImgChX );
  cvCvtColor(gcs->pImageRGBA,  gcs->pImgRGB, CV_BGRA2BGR);


  //////////////////////////////////////////////////////////////////////////////
  ////////////////////////////////////////////////////////MOTION CUES INTEGR////
  //////////////////////////////////////////////////////////////////////////////

  //////////////////////////////////////////////////////////////////////////////
  // apply step 1. filtering using bilateral filter. Cannot happen in-place => scratch
  cvSmooth(gcs->pImgRGB, gcs->pImgScratch, CV_BILATERAL, 3, 50, 3, 0);
  // create GRAY image
  cvCvtColor(gcs->pImgScratch, gcs->pImgGRAY, CV_BGR2GRAY);

  // Frame difference the GRAY and the previous one
  // not intuitive: first smooth frames, then 
  cvCopy( gcs->pImgGRAY,   gcs->pImgGRAY_copy,  NULL);
  cvCopy( gcs->pImgGRAY_1, gcs->pImgGRAY_1copy, NULL);
  get_frame_difference( gcs->pImgGRAY_copy, gcs->pImgGRAY_1copy, gcs->pImgGRAY_diff);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, NULL, 3);
  cvDilate( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, NULL, 3);


  //////////////////////////////////////////////////////////////////////////////
  //////////////////////////////////////////////////////////////////////////////
  // ghost mapping
  gcs->dstTri[0].x = gcs->facepos.x - gcs->facepos.width/2 ;
  gcs->dstTri[0].y = gcs->facepos.y - gcs->facepos.height/2;
  gcs->dstTri[1].x = gcs->facepos.x - gcs->facepos.width/2;
  gcs->dstTri[1].y = gcs->facepos.y + gcs->facepos.height/2;
  gcs->dstTri[2].x = gcs->facepos.x + gcs->facepos.width/2;
  gcs->dstTri[2].y = gcs->facepos.y + gcs->facepos.height/2;

  if( gcs->ghostfilename){
    cvGetAffineTransform( gcs->srcTri, gcs->dstTri, gcs->warp_mat );
    cvWarpAffine( gcs->cvGhostBwResized, gcs->cvGhostBwAffined, gcs->warp_mat );
  }




  //////////////////////////////////////////////////////////////////////////////
  //////////////////////////////////////////////////////////////////////////////
  // GrabCut algorithm preparation and running

  gcs->facepos.x = gcs->facepos.x - gcs->facepos.width/2;
  gcs->facepos.y = gcs->facepos.y - gcs->facepos.height/2;

  // create an IplImage  with the skin colour pixels as 255
  compose_skin_matrix(gcs->pImgRGB, gcs->pImg_skin);
  // And the skin pixels with the movement mask
  cvAnd( gcs->pImg_skin,  gcs->pImgGRAY_diff,  gcs->pImgGRAY_diff);
  //cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5, 5, 3, 3, CV_SHAPE_RECT,NULL), 1);
  cvDilate(gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(7,7, 5,5, CV_SHAPE_RECT,NULL), 2);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5,5, 3,3, CV_SHAPE_RECT,NULL), 2);

  // if there is alpha==all 1's coming in, then we ignore it: prevents from no vibe before us
  if((0.75*(gcs->width * gcs->height) <= cvCountNonZero(gcs->pImgChX)))
    cvZero(gcs->pImgChX);
  // OR the input Alpha
  cvOr( gcs->pImgChX,  gcs->pImgGRAY_diff,  gcs->pImgGRAY_diff);


  //////////////////////////////////////////////////////////////////////////////
  // try to consolidate a single mask from all the sub-patches
  cvDilate(gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(7,7, 5,5, CV_SHAPE_RECT,NULL), 3);
  cvErode( gcs->pImgGRAY_diff, gcs->pImgGRAY_diff, cvCreateStructuringElementEx(5,5, 3,3, CV_SHAPE_RECT,NULL), 4);

  //////////////////////////////////////////////////////////////////////////////
  // use either Ghost or boxes-model to create a PR foreground starting point in gcs->grabcut_mask
  if( gcs->ghostfilename)
    compose_grabcut_seedmatrix3(gcs->grabcut_mask, gcs->cvGhostBwAffined, gcs->pImgGRAY_diff  );
  else{
    // toss it all to the bbox creation function, together with the face position and size
    compose_grabcut_seedmatrix2(gcs->grabcut_mask, gcs->facepos, gcs->pImgGRAY_diff, gcs->facefound );
  }


  //////////////////////////////////////////////////////////////////////////////
#ifdef KMEANS
  gcs->num_clusters = 18; // keep it even to simplify integer arithmetics
  cvCopy(gcs->pImgRGB, gcs->pImgRGB_kmeans, NULL);
  posterize_image(gcs->pImgRGB_kmeans);
  create_kmeans_clusters(gcs->pImgRGB_kmeans, gcs->kmeans_points, gcs->kmeans_clusters, 
                         gcs->num_clusters, gcs->num_samples);
  adjust_bodybbox_w_clusters(gcs->grabcut_mask, gcs->pImgRGB_kmeans, gcs->num_clusters, gcs->facepos);
#endif //KMEANS


  //////////////////////////////////////////////////////////////////////////////
  if( gcs->debug < 70)
    run_graphcut_iteration( &(gcs->GC), gcs->pImgRGB, gcs->grabcut_mask, &gcs->bbox_prev);



  // get a copy of GRAY for the next iteration
  cvCopy(gcs->pImgGRAY, gcs->pImgGRAY_1, NULL);

  //////////////////////////////////////////////////////////////////////////////
  // if we want to display, just overwrite the output
  if( gcs->display ){
    int outputimage = gcs->debug;
    switch( outputimage ){
    case 1: // output the GRAY difference
      cvCvtColor( gcs->pImgGRAY_diff, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 50:// Ghost remapped
      cvCvtColor( gcs->cvGhostBwAffined, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 51:// Ghost applied
      cvAnd( gcs->cvGhostBwAffined, gcs->pImgGRAY, gcs->pImgGRAY, NULL );
      cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 60:// Graphcut
      cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get only FG
      cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 127.0);
      cvCvtColor( gcs->grabcut_mask, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 61:// Graphcut applied on input/output image
      cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get only FG, PR_FG
      cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 255.0);
      cvAnd( gcs->grabcut_mask,  gcs->pImgGRAY,  gcs->pImgGRAY, NULL);
      cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR );

      cvRectangle(gcs->pImgRGB, cvPoint(gcs->bbox_now.x, gcs->bbox_now.y), 
                  cvPoint(gcs->bbox_now.x + gcs->bbox_now.width, gcs->bbox_now.y+gcs->bbox_now.height),
                  cvScalar(127,0.0), 1, 8, 0 );
     break;
    case 70:// bboxes
      cvZero( gcs->pImgGRAY );
      cvMul( gcs->grabcut_mask,  gcs->grabcut_mask,  gcs->pImgGRAY, 40.0 );
      cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 71:// bboxes applied on the original image
      cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get only FG, PR_FG
      cvMul( gcs->grabcut_mask,  gcs->pImgGRAY,  gcs->pImgGRAY, 1.0 );
      cvCvtColor( gcs->pImgGRAY, gcs->pImgRGB, CV_GRAY2BGR );
      break;
    case 72: // input alpha channel mapped to output
      cvCvtColor( gcs->pImgChX, gcs->pImgRGB, CV_GRAY2BGR );
      break;
#ifdef KMEANS
    case 80:// k-means output
      cvCopy(gcs->pImgRGB_kmeans, gcs->pImgRGB, NULL);
      break;
    case 81:// k-means output filtered with bbox/ghost mask
      cvSplit(gcs->pImgRGB_kmeans, gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, NULL        );
      cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get FG and PR_FG
      cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 255.0);     // scale any to 255.

      cvAnd( gcs->grabcut_mask,  gcs->pImgCh1,  gcs->pImgCh1, NULL );
      cvAnd( gcs->grabcut_mask,  gcs->pImgCh2,  gcs->pImgCh2, NULL );
      cvAnd( gcs->grabcut_mask,  gcs->pImgCh3,  gcs->pImgCh3, NULL );

      cvMerge(              gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, NULL, gcs->pImgRGB);
      break;
#endif //KMEANS
    default:
      break;
    }
  }

  //////////////////////////////////////////////////////////////////////////////
  // copy anyhow the fg/bg to the alpha channel in the output image alpha ch
  cvSplit(gcs->pImgRGB, gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, NULL        );
  cvAndS(gcs->grabcut_mask, cvScalar(1), gcs->grabcut_mask, NULL);  // get only FG and possible FG
  cvConvertScale( gcs->grabcut_mask, gcs->grabcut_mask, 255.0);
  gcs->pImgChA->imageData = (char*)gcs->grabcut_mask->data.ptr;

  cvMerge(              gcs->pImgCh1, gcs->pImgCh2, gcs->pImgCh3, gcs->pImgChA, gcs->pImageRGBA);

  gcs->numframes++;

  GST_GCS_UNLOCK (gcs);  
  
  return GST_FLOW_OK;
}
Exemplo n.º 9
0
CvMat* cvShowDFT1(IplImage* im, int dft_M, int dft_N,char* src)
{
    IplImage* realInput;
    IplImage* imaginaryInput;
    IplImage* complexInput;

    CvMat* dft_A, tmp;

    IplImage* image_Re;
    IplImage* image_Im;

    char str[80];

    double m, M;

    realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

    cvScale(im, realInput, 1.0, 0.0);
    cvZero(imaginaryInput);
    cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

    dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
    image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
    image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

    // copy A to dft_A and pad dft_A with zeros
    cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
    cvCopy( complexInput, &tmp, NULL );
    if( dft_A->cols > im->width )
    {
        cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
        cvZero( &tmp );
    }

    // no need to pad bottom part of dft_A with zeros because of
    // use nonzero_rows parameter in cvDFT() call below

    cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

    strcpy(str,"DFT -");
    strcat(str,src);
    cvNamedWindow(str, 0);

    // Split Fourier in real and imaginary parts
    cvSplit( dft_A, image_Re, image_Im, 0, 0 );

    // Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    cvPow( image_Re, image_Re, 2.0);
    cvPow( image_Im, image_Im, 2.0);
    cvAdd( image_Re, image_Im, image_Re, NULL);
    cvPow( image_Re, image_Re, 0.5 );

    // Compute log(1 + Mag)
    cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
    cvLog( image_Re, image_Re ); // log(1 + Mag)

    cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
    cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
    cvShowImage(str, image_Re);
    return(dft_A);
}
Exemplo n.º 10
0
int main(int argc, char ** argv)
{
    int height,width,step,channels,depth;
    uchar* data1;

    CvMat *dft_A;
    CvMat *dft_B;

    CvMat *dft_C;
    IplImage* im;
    IplImage* im1;

    IplImage* image_ReB;
    IplImage* image_ImB;

    IplImage* image_ReC;
    IplImage* image_ImC;
    IplImage* complex_ImC;
    CvScalar val;

    IplImage* k_image_hdr;
    int i,j,k;

    FILE *fp;
    fp = fopen("test.txt","w+");

    int dft_M,dft_N;
    int dft_M1,dft_N1;

    CvMat* cvShowDFT1(IplImage*, int, int,char*);
    void cvShowInvDFT1(IplImage*, CvMat*, int, int,char*);

    im1 = cvLoadImage( "../homer.jpg",1 );

    cvNamedWindow("Original-color", 0);
    cvShowImage("Original-color", im1);

    im = cvLoadImage( "../homer.jpg", CV_LOAD_IMAGE_GRAYSCALE );

    if( !im )
        return -1;

    cvNamedWindow("Original-gray", 0);
    cvShowImage("Original-gray", im);

    // Create a random noise matrix
    fp = fopen("test.txt","w+");
    int val_noise[357*383];
    for(i=0; i <im->height;i++){
        for(j=0;j<im->width;j++){
            fprintf(fp, "%d ",(383*i+j));
            val_noise[383*i+j] = rand() % 128;
        }
        fprintf(fp, "/n");
    }

    CvMat noise = cvMat(im->height,im->width, CV_8UC1,val_noise);

    // Add the random noise matric to the image
    cvAdd(im,&noise,im, 0);

    cvNamedWindow("Original + Noise", 0);
    cvShowImage("Original + Noise", im);

    cvSmooth( im, im, CV_GAUSSIAN, 7, 7, 0.5, 0.5 );
    cvNamedWindow("Gaussian Smooth", 0);
    cvShowImage("Gaussian Smooth", im);

    // Create a blur kernel
    IplImage* k_image;
    float r = rad;
    float radius=((int)(r)*2+1)/2.0;

    int rowLength=(int)(2*radius);
    printf("rowlength %d/n",rowLength);
    float kernels[rowLength*rowLength];
    printf("rowl: %i",rowLength);
    int norm=0; //Normalization factor
    int x,y;
    CvMat kernel;
    for(x = 0; x < rowLength; x++)
        for (y = 0; y < rowLength; y++)
            if (sqrt((x - (int)(radius) ) * (x - (int)(radius) ) + (y - (int)(radius))* (y - (int)(radius))) <= (int)(radius))
                norm++;
    // Populate matrix
    for (y = 0; y < rowLength; y++) //populate array with values
    {
        for (x = 0; x < rowLength; x++) {
            if (sqrt((x - (int)(radius) ) * (x - (int)(radius) ) + (y - (int)(radius))
                     * (y - (int)(radius))) <= (int)(radius)) {
                //kernels[y * rowLength + x] = 255;
                kernels[y * rowLength + x] =1.0/norm;
                printf("%f ",1.0/norm);
            }
            else{
                kernels[y * rowLength + x] =0;
            }
        }
    }

    kernel= cvMat(rowLength, // number of rows
                  rowLength, // number of columns
                  CV_32FC1, // matrix data type
                  &kernels);
    k_image_hdr = cvCreateImageHeader( cvSize(rowLength,rowLength), IPL_DEPTH_32F,1);
    k_image = cvGetImage(&kernel,k_image_hdr);

    height = k_image->height;
    width = k_image->width;
    step = k_image->widthStep/sizeof(float);
    depth = k_image->depth;

    channels = k_image->nChannels;
    //data1 = (float *)(k_image->imageData);
    data1 = (uchar *)(k_image->imageData);
    cvNamedWindow("blur kernel", 0);
    cvShowImage("blur kernel", k_image);

    dft_M = cvGetOptimalDFTSize( im->height - 1 );
    dft_N = cvGetOptimalDFTSize( im->width - 1 );

    //dft_M1 = cvGetOptimalDFTSize( im->height+99 - 1 );
    //dft_N1 = cvGetOptimalDFTSize( im->width+99 - 1 );

    dft_M1 = cvGetOptimalDFTSize( im->height+3 - 1 );
    dft_N1 = cvGetOptimalDFTSize( im->width+3 - 1 );

    printf("dft_N1=%d,dft_M1=%d/n",dft_N1,dft_M1);

    // Perform DFT of original image
    dft_A = cvShowDFT1(im, dft_M1, dft_N1,"original");
    //Perform inverse (check)
    //cvShowInvDFT1(im,dft_A,dft_M1,dft_N1, "original"); - Commented as it overwrites the DFT

    // Perform DFT of kernel
    dft_B = cvShowDFT1(k_image,dft_M1,dft_N1,"kernel");
    //Perform inverse of kernel (check)
    //cvShowInvDFT1(k_image,dft_B,dft_M1,dft_N1, "kernel");- Commented as it overwrites the DFT

    // Multiply numerator with complex conjugate
    dft_C = cvCreateMat( dft_M1, dft_N1, CV_64FC2 );

    printf("%d %d %d %d/n",dft_M,dft_N,dft_M1,dft_N1);

    // Multiply DFT(blurred image) * complex conjugate of blur kernel
    cvMulSpectrums(dft_A,dft_B,dft_C,CV_DXT_MUL_CONJ);
    //cvShowInvDFT1(im,dft_C,dft_M1,dft_N1,"blur1");

    // Split Fourier in real and imaginary parts
    image_ReC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
    image_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
    complex_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 2);

    printf("%d %d %d %d/n",dft_M,dft_N,dft_M1,dft_N1);

    //cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );
    cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );

    // Compute A^2 + B^2 of denominator or blur kernel
    image_ReB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
    image_ImB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);

    // Split Real and imaginary parts
    cvSplit( dft_B, image_ReB, image_ImB, 0, 0 );
    cvPow( image_ReB, image_ReB, 2.0);
    cvPow( image_ImB, image_ImB, 2.0);
    cvAdd(image_ReB, image_ImB, image_ReB,0);

    val = cvScalarAll(kappa);
    cvAddS(image_ReB,val,image_ReB,0);

    //Divide Numerator/A^2 + B^2
    cvDiv(image_ReC, image_ReB, image_ReC, 1.0);
    cvDiv(image_ImC, image_ReB, image_ImC, 1.0);

    // Merge Real and complex parts
    cvMerge(image_ReC, image_ImC, NULL, NULL, complex_ImC);

    // Perform Inverse
    cvShowInvDFT1(im, (CvMat *)complex_ImC,dft_M1,dft_N1,"O/p Wiener k=1 rad=2");

    cvWaitKey(-1);
    return 0;
}
Exemplo n.º 11
0
//=========================================
CvRect camKalTrack(IplImage* frame, camshift_kalman_tracker& camKalTrk) {
//=========================================
	if (!frame)
		printf("Input frame empty!\n");

	cvCopy(frame, camKalTrk.image, 0);
	cvCvtColor(camKalTrk.image, camKalTrk.hsv, CV_BGR2HSV); // BGR to HSV

	if (camKalTrk.trackObject) {
		int _vmin = vmin, _vmax = vmax;
		cvInRangeS(camKalTrk.hsv, cvScalar(0, smin, MIN(_vmin,_vmax), 0), cvScalar(180, 256, MAX(_vmin,_vmax), 0), camKalTrk.mask); // MASK
		cvSplit(camKalTrk.hsv, camKalTrk.hue, 0, 0, 0); //  HUE
		if (camKalTrk.trackObject < 0) {
			float max_val = 0.f;
			boundaryCheck(camKalTrk.originBox, frame->width, frame->height);
			cvSetImageROI(camKalTrk.hue, camKalTrk.originBox); // for ROI
			cvSetImageROI(camKalTrk.mask, camKalTrk.originBox); // for camKalTrk.mask
			cvCalcHist(&camKalTrk.hue, camKalTrk.hist, 0, camKalTrk.mask); //
			cvGetMinMaxHistValue(camKalTrk.hist, 0, &max_val, 0, 0);
			cvConvertScale(camKalTrk.hist->bins, camKalTrk.hist->bins, max_val ? 255. / max_val : 0., 0); //  bin  [0,255]
			cvResetImageROI(camKalTrk.hue); // remove ROI
			cvResetImageROI(camKalTrk.mask);
			camKalTrk.trackWindow = camKalTrk.originBox;
			camKalTrk.trackObject = 1;
			camKalTrk.lastpoint = camKalTrk.predictpoint = cvPoint(camKalTrk.trackWindow.x + camKalTrk.trackWindow.width / 2,
					camKalTrk.trackWindow.y + camKalTrk.trackWindow.height / 2);
			getCurrState(camKalTrk.kalman, camKalTrk.lastpoint, camKalTrk.predictpoint);//input curent state
		}
		//(x,y,vx,vy),
		camKalTrk.prediction = cvKalmanPredict(camKalTrk.kalman, 0);//predicton=kalman->state_post

		camKalTrk.predictpoint = cvPoint(cvRound(camKalTrk.prediction->data.fl[0]), cvRound(camKalTrk.prediction->data.fl[1]));

		camKalTrk.trackWindow = cvRect(camKalTrk.predictpoint.x - camKalTrk.trackWindow.width / 2, camKalTrk.predictpoint.y
				- camKalTrk.trackWindow.height / 2, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);

		camKalTrk.trackWindow = checkRectBoundary(cvRect(0, 0, frame->width, frame->height), camKalTrk.trackWindow);

		camKalTrk.searchWindow = cvRect(camKalTrk.trackWindow.x - region, camKalTrk.trackWindow.y - region, camKalTrk.trackWindow.width + 2
				* region, camKalTrk.trackWindow.height + 2 * region);

		camKalTrk.searchWindow = checkRectBoundary(cvRect(0, 0, frame->width, frame->height), camKalTrk.searchWindow);

		cvSetImageROI(camKalTrk.hue, camKalTrk.searchWindow);
		cvSetImageROI(camKalTrk.mask, camKalTrk.searchWindow);
		cvSetImageROI(camKalTrk.backproject, camKalTrk.searchWindow);

		cvCalcBackProject( &camKalTrk.hue, camKalTrk.backproject, camKalTrk.hist ); // back project

		cvAnd(camKalTrk.backproject, camKalTrk.mask, camKalTrk.backproject, 0);

		camKalTrk.trackWindow = cvRect(region, region, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);

		if (camKalTrk.trackWindow.height > 5 && camKalTrk.trackWindow.width > 5) {
			// calling CAMSHIFT
			cvCamShift(camKalTrk.backproject, camKalTrk.trackWindow, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1),
					&camKalTrk.trackComp, &camKalTrk.trackBox);

			/*cvMeanShift( camKalTrk.backproject, camKalTrk.trackWindow,
			 cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
			 &camKalTrk.trackComp);*/
		}
		else {
			camKalTrk.trackComp.rect.x = 0;
			camKalTrk.trackComp.rect.y = 0;
			camKalTrk.trackComp.rect.width = 0;
			camKalTrk.trackComp.rect.height = 0;
		}

		cvResetImageROI(camKalTrk.hue);
		cvResetImageROI(camKalTrk.mask);
		cvResetImageROI(camKalTrk.backproject);
		camKalTrk.trackWindow = camKalTrk.trackComp.rect;
		camKalTrk.trackWindow = cvRect(camKalTrk.trackWindow.x + camKalTrk.searchWindow.x, camKalTrk.trackWindow.y
				+ camKalTrk.searchWindow.y, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);

		camKalTrk.measurepoint = cvPoint(camKalTrk.trackWindow.x + camKalTrk.trackWindow.width / 2, camKalTrk.trackWindow.y
				+ camKalTrk.trackWindow.height / 2);
		camKalTrk.realposition->data.fl[0] = camKalTrk.measurepoint.x;
		camKalTrk.realposition->data.fl[1] = camKalTrk.measurepoint.y;
		camKalTrk.realposition->data.fl[2] = camKalTrk.measurepoint.x - camKalTrk.lastpoint.x;
		camKalTrk.realposition->data.fl[3] = camKalTrk.measurepoint.y - camKalTrk.lastpoint.y;
		camKalTrk.lastpoint = camKalTrk.measurepoint;//keep the current real position

		//measurement x,y
		cvMatMulAdd( camKalTrk.kalman->measurement_matrix/*2x4*/, camKalTrk.realposition/*4x1*/,/*measurementstate*/0, camKalTrk.measurement );
		cvKalmanCorrect(camKalTrk.kalman, camKalTrk.measurement);

		cvRectangle(frame, cvPoint(camKalTrk.trackWindow.x, camKalTrk.trackWindow.y), cvPoint(camKalTrk.trackWindow.x
				+ camKalTrk.trackWindow.width, camKalTrk.trackWindow.y + camKalTrk.trackWindow.height), CV_RGB(255,128,0), 4, 8, 0);
	}
	// set new selection if it exists
	if (camKalTrk.selectObject && camKalTrk.selection.width > 0 && camKalTrk.selection.height > 0) {
		cvSetImageROI(camKalTrk.image, camKalTrk.selection);
		cvXorS(camKalTrk.image, cvScalarAll(255), camKalTrk.image, 0);
		cvResetImageROI(camKalTrk.image);
	}

	return camKalTrk.trackWindow;
}
Exemplo n.º 12
0
void E_Saturation_Value::Edit(ImgFile_Ptr pFile)
{
	m_pEditDialog->SetProgPos(0);

	//hsvに変換
	cvCvtColor( m_editImage, m_hsvImage, CV_BGR2HSV );
	m_pEditDialog->SetProgPos(10);

	//分割
	cvSplit( m_hsvImage, m_hueImage, m_saturationImage, m_valueImage, NULL );
	m_pEditDialog->SetProgPos(20);

	//彩度を加算
	cvSet( m_addData, cvScalar(abs(s_)), NULL );
	m_pEditDialog->SetProgPos(25);
	if(s_ >=0){
		cvAdd( m_saturationImage, m_addData, m_saturationImage );
	}
	else{
		cvSub( m_saturationImage, m_addData, m_saturationImage );
	}
	m_pEditDialog->SetProgPos(35);

	//明度を加算
	cvSet( m_addData, cvScalar(abs(v_)), NULL );
	m_pEditDialog->SetProgPos(45);
	if(v_ >= 0){
		cvAdd( m_valueImage, m_addData, m_valueImage );
	}
	else{
		cvSub( m_valueImage, m_addData, m_valueImage );
	}
	m_pEditDialog->SetProgPos(55);

	//合成
	cvMerge( m_hueImage, m_saturationImage, m_valueImage, NULL, m_hsvImage);
	m_pEditDialog->SetProgPos(65);

	//hsvからBGRに変換
	cvCvtColor( m_hsvImage, m_hsvImage, CV_HSV2BGR );
	m_pEditDialog->SetProgPos(75);

	ucvCvtColor(m_hsvImage, m_editedImage, CV_BGR2BGRA);

	//コピー
	m_pEditNode->edit_img.ImgBlt(
		m_pEditNode->blt_rect.left - m_pEditNode->node_rect.left,
		m_pEditNode->blt_rect.top - m_pEditNode->node_rect.top,
		m_pEditNode->blt_rect.right - m_pEditNode->blt_rect.left,
		m_pEditNode->blt_rect.bottom - m_pEditNode->blt_rect.top,
		m_editedImage,
		0, 0,
		IPLEXT_RASTER_CODE::COPY,
		m_mask,
		0, 0);
	m_pEditDialog->SetProgPos(85);

	//
	m_pEditLayerHandle->Update( &(m_pEditNode->blt_rect ));
	m_pEditDialog->SetProgPos(100);
}
Exemplo n.º 13
0
JNIEXPORT
jbooleanArray
JNICALL
Java_org_siprop_opencv_OpenCV_faceDetect(JNIEnv* env,
										jobject thiz,
										jintArray photo_data1,
										jintArray photo_data2,
										jint width,
										jint height) {
	LOGV("Load desp.");

	int i, x, y;
	int* pixels;
	IplImage *frameImage;
	
	IplImage *backgroundImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *grayImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *differenceImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	
	IplImage *hsvImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 3 );
	IplImage *hueImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *saturationImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *valueImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *thresholdImage1 = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *thresholdImage2 = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *thresholdImage3 = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	IplImage *faceImage = cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );
	
	CvMoments moment;
	double m_00;
	double m_10;
	double m_01;
	int gravityX;
	int gravityY;

	jbooleanArray res_array;
	int imageSize;



	// Load Image
	pixels = env->GetIntArrayElements(photo_data1, 0);
	frameImage = loadPixels(pixels, width, height);
	if(frameImage == 0) {
		LOGV("Error loadPixels.");
		return 0;
	}
	
	
	cvCvtColor( frameImage, backgroundImage, CV_BGR2GRAY );
	
	
	pixels = env->GetIntArrayElements(photo_data2, 0);
	frameImage = loadPixels(pixels, width, height);
	if(frameImage == 0) {
		LOGV("Error loadPixels.");
		return 0;
	}
	cvCvtColor( frameImage, grayImage, CV_BGR2GRAY );
	cvAbsDiff( grayImage, backgroundImage, differenceImage );
	
	cvCvtColor( frameImage, hsvImage, CV_BGR2HSV );
	LOGV("Load cvCvtColor.");
	cvSplit( hsvImage, hueImage, saturationImage, valueImage, 0 );
	LOGV("Load cvSplit.");
	cvThreshold( hueImage, thresholdImage1, THRESH_BOTTOM, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY );
	cvThreshold( hueImage, thresholdImage2, THRESH_TOP, THRESHOLD_MAX_VALUE, CV_THRESH_BINARY_INV );
	cvAnd( thresholdImage1, thresholdImage2, thresholdImage3, 0 );
	LOGV("Load cvAnd.");
	
	cvAnd( differenceImage, thresholdImage3, faceImage, 0 );
	
	cvMoments( faceImage, &moment, 0 );
	m_00 = cvGetSpatialMoment( &moment, 0, 0 );
	m_10 = cvGetSpatialMoment( &moment, 1, 0 );
	m_01 = cvGetSpatialMoment( &moment, 0, 1 );
	gravityX = m_10 / m_00;
	gravityY = m_01 / m_00;
	LOGV("Load cvMoments.");


	cvCircle( frameImage, cvPoint( gravityX, gravityY ), CIRCLE_RADIUS,
		 CV_RGB( 255, 0, 0 ), LINE_THICKNESS, LINE_TYPE, 0 );




	CvMat stub, *mat_image;
    int channels, ipl_depth;
    mat_image = cvGetMat( frameImage, &stub );
    channels = CV_MAT_CN( mat_image->type );

    ipl_depth = cvCvToIplDepth(mat_image->type);

	WLNonFileByteStream* m_strm = new WLNonFileByteStream();
    loadImageBytes(mat_image->data.ptr, mat_image->step, mat_image->width,
                             mat_image->height, ipl_depth, channels, m_strm);
	LOGV("Load loadImageBytes.");


	imageSize = m_strm->GetSize();
	res_array = env->NewBooleanArray(imageSize);
	LOGV("Load NewByteArray.");
    if (res_array == 0) {
        return 0;
    }
    env->SetBooleanArrayRegion(res_array, 0, imageSize, (jboolean*)m_strm->GetByte());
	LOGV("Load SetBooleanArrayRegion.");




	cvReleaseImage( &backgroundImage );
	cvReleaseImage( &grayImage );
	cvReleaseImage( &differenceImage );
	cvReleaseImage( &hsvImage );
	cvReleaseImage( &hueImage );
	cvReleaseImage( &saturationImage );
	cvReleaseImage( &valueImage );
	cvReleaseImage( &thresholdImage1 );
	cvReleaseImage( &thresholdImage2 );
	cvReleaseImage( &thresholdImage3 );
	cvReleaseImage( &faceImage );
	cvReleaseImage( &frameImage );
	m_strm->Close();
	SAFE_DELETE(m_strm);

	return res_array;

}
Exemplo n.º 14
0
void setLowThreshold( float scale ) {
	cvConvertScale( IdiffF, Iscratch, scale );
	cvAdd( Iscratch, IavgF, IlowF );
	cvSplit( IlowF, Ilow1, Ilow2, Ilow3, 0 );
}
Exemplo n.º 15
0
void setHighThreshold( float scale ) {
	cvConvertScale( IdiffF, Iscratch, scale );
	cvAdd( Iscratch, IavgF, IhiF );
	cvSplit( IhiF, Ihi1, Ihi2, Ihi3, 0 );
}
Exemplo n.º 16
0
static GstFlowReturn
gst_skin_detect_transform (GstOpencvVideoFilter * base, GstBuffer * buf,
    IplImage * img, GstBuffer * outbuf, IplImage * outimg)
{
  GstSkinDetect *filter = GST_SKIN_DETECT (base);



  filter->cvRGB->imageData = (char *) img->imageData;
  filter->cvSkin->imageData = (char *) outimg->imageData;

  /* SKIN COLOUR BLOB DETECTION */
  if (HSV == filter->method) {
    cvCvtColor (filter->cvRGB, filter->cvHSV, CV_RGB2HSV);
    cvCvtPixToPlane (filter->cvHSV, filter->cvH, filter->cvS, filter->cvV, 0);  /*  Extract the 3 color components. */

    /*  Detect which pixels in each of the H, S and V channels are probably skin pixels. 
       Assume that skin has a Hue between 0 to 18 (out of 180), and Saturation above 50, and Brightness above 80. */
    cvThreshold (filter->cvH, filter->cvH2, 10, UCHAR_MAX, CV_THRESH_BINARY);   /* (hue > 10) */
    cvThreshold (filter->cvH, filter->cvH, 20, UCHAR_MAX, CV_THRESH_BINARY_INV);        /* (hue < 20) */
    cvThreshold (filter->cvS, filter->cvS, 48, UCHAR_MAX, CV_THRESH_BINARY);    /* (sat > 48) */
    cvThreshold (filter->cvV, filter->cvV, 80, UCHAR_MAX, CV_THRESH_BINARY);    /* (val > 80) */

    /*  erode the HUE to get rid of noise. */
    cvErode (filter->cvH, filter->cvH, NULL, 1);

    /*  Combine all 3 thresholded color components, so that an output pixel will only 
       be white (255) if the H, S and V pixels were also white.
       imageSkin = (hue > 10) ^ (hue < 20) ^ (sat > 48) ^ (val > 80), where   ^ mean pixels-wise AND */
    cvAnd (filter->cvH, filter->cvS, filter->cvSkinPixels1, NULL);
    cvAnd (filter->cvSkinPixels1, filter->cvH2, filter->cvSkinPixels1, NULL);
    cvAnd (filter->cvSkinPixels1, filter->cvV, filter->cvSkinPixels1, NULL);

    cvCvtColor (filter->cvSkinPixels1, filter->cvRGB, CV_GRAY2RGB);
  } else if (RGB == filter->method) {
    cvCvtPixToPlane (filter->cvRGB, filter->cvR, filter->cvG, filter->cvB, 0);  /*  Extract the 3 color components. */
    cvAdd (filter->cvR, filter->cvG, filter->cvAll, NULL);
    cvAdd (filter->cvB, filter->cvAll, filter->cvAll, NULL);    /*  All = R + G + B */
    cvDiv (filter->cvR, filter->cvAll, filter->cvRp, 1.0);      /*  R' = R / ( R + G + B) */
    cvDiv (filter->cvG, filter->cvAll, filter->cvGp, 1.0);      /*  G' = G / ( R + G + B) */

    cvConvertScale (filter->cvR, filter->cvR2, 1.0, 0.0);
    cvCopy (filter->cvGp, filter->cvGp2, NULL);
    cvCopy (filter->cvRp, filter->cvRp2, NULL);

    cvThreshold (filter->cvR2, filter->cvR2, 60, UCHAR_MAX, CV_THRESH_BINARY);  /* (R > 60) */
    cvThreshold (filter->cvRp, filter->cvRp, 0.42, UCHAR_MAX, CV_THRESH_BINARY);        /* (R'> 0.4) */
    cvThreshold (filter->cvRp2, filter->cvRp2, 0.6, UCHAR_MAX, CV_THRESH_BINARY_INV);   /* (R'< 0.6) */
    cvThreshold (filter->cvGp, filter->cvGp, 0.28, UCHAR_MAX, CV_THRESH_BINARY);        /* (G'> 0.28) */
    cvThreshold (filter->cvGp2, filter->cvGp2, 0.4, UCHAR_MAX, CV_THRESH_BINARY_INV);   /* (G'< 0.4) */

    /*  Combine all 3 thresholded color components, so that an output pixel will only
       be white (255) if the H, S and V pixels were also white. */

    cvAnd (filter->cvR2, filter->cvRp, filter->cvSkinPixels2, NULL);
    cvAnd (filter->cvRp, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);
    cvAnd (filter->cvRp2, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);
    cvAnd (filter->cvGp, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);
    cvAnd (filter->cvGp2, filter->cvSkinPixels2, filter->cvSkinPixels2, NULL);

    cvConvertScale (filter->cvSkinPixels2, filter->cvdraft, 1.0, 0.0);
    cvCvtColor (filter->cvdraft, filter->cvRGB, CV_GRAY2RGB);
  }

  /* After this we have a RGB Black and white image with the skin, in 
     filter->cvRGB. We can postprocess by applying 1 erode-dilate and 1
     dilate-erode, or alternatively 1 opening-closing all together, with
     the goal of removing small (spurious) skin spots and creating large
     connected areas */
  if (filter->postprocess) {
    cvSplit (filter->cvRGB, filter->cvChA, NULL, NULL, NULL);

    cvErode (filter->cvChA, filter->cvChA,
        cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_RECT, NULL), 1);
    cvDilate (filter->cvChA, filter->cvChA,
        cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_RECT, NULL), 2);
    cvErode (filter->cvChA, filter->cvChA,
        cvCreateStructuringElementEx (3, 3, 1, 1, CV_SHAPE_RECT, NULL), 1);

    cvCvtColor (filter->cvChA, filter->cvRGB, CV_GRAY2RGB);
  }

  cvCopy (filter->cvRGB, filter->cvSkin, NULL);

  return GST_FLOW_OK;
}
Exemplo n.º 17
0
void CvAdaptiveSkinDetector::process(IplImage *inputBGRImage, IplImage *outputHueMask)
{
    IplImage *src = inputBGRImage;

    int h, v, i, l;
    bool isInit = false;

    nFrameCount++;

    if (imgHueFrame == NULL)
    {
        isInit = true;
        initData(src, nSamplingDivider, nSamplingDivider);
    }

    unsigned char *pShrinked, *pHueFrame, *pMotionFrame, *pLastGrayFrame, *pFilteredFrame, *pGrayFrame;
    pShrinked = (unsigned char *)imgShrinked->imageData;
    pHueFrame = (unsigned char *)imgHueFrame->imageData;
    pMotionFrame = (unsigned char *)imgMotionFrame->imageData;
    pLastGrayFrame = (unsigned char *)imgLastGrayFrame->imageData;
    pFilteredFrame = (unsigned char *)imgFilteredFrame->imageData;
    pGrayFrame = (unsigned char *)imgGrayFrame->imageData;

    if ((src->width != imgHueFrame->width) || (src->height != imgHueFrame->height))
    {
        cvResize(src, imgShrinked);
        cvCvtColor(imgShrinked, imgHSVFrame, CV_BGR2HSV);
    }
    else
    {
        cvCvtColor(src, imgHSVFrame, CV_BGR2HSV);
    }

    cvSplit(imgHSVFrame, imgHueFrame, imgSaturationFrame, imgGrayFrame, 0);

    cvSetZero(imgMotionFrame);
    cvSetZero(imgFilteredFrame);

    l = imgHueFrame->height * imgHueFrame->width;

    for (i = 0; i < l; i++)
    {
        v = (*pGrayFrame);
        if ((v >= GSD_INTENSITY_LT) && (v <= GSD_INTENSITY_UT))
        {
            h = (*pHueFrame);
            if ((h >= GSD_HUE_LT) && (h <= GSD_HUE_UT))
            {
                if ((h >= nSkinHueLowerBound) && (h <= nSkinHueUpperBound))
                    ASD_INTENSITY_SET_PIXEL(pFilteredFrame, h);

                if (ASD_IS_IN_MOTION(pLastGrayFrame, v, 7))
                    ASD_INTENSITY_SET_PIXEL(pMotionFrame, h);
            }
        }
        pShrinked += 3;
        pGrayFrame++;
        pLastGrayFrame++;
        pMotionFrame++;
        pHueFrame++;
        pFilteredFrame++;
    }

    if (isInit)
        cvCalcHist(&imgHueFrame, skinHueHistogram.fHistogram);

    cvCopy(imgGrayFrame, imgLastGrayFrame);

    cvErode(imgMotionFrame, imgTemp);  // eliminate disperse pixels, which occur because of the camera noise
    cvDilate(imgTemp, imgMotionFrame);

    cvCalcHist(&imgMotionFrame, histogramHueMotion.fHistogram);

    skinHueHistogram.mergeWith(&histogramHueMotion, fHistogramMergeFactor);

    skinHueHistogram.findCurveThresholds(nSkinHueLowerBound, nSkinHueUpperBound, 1 - fHuePercentCovered);

    switch (nMorphingMethod)
    {
        case MORPHING_METHOD_ERODE :
            cvErode(imgFilteredFrame, imgTemp);
            cvCopy(imgTemp, imgFilteredFrame);
            break;
        case MORPHING_METHOD_ERODE_ERODE :
            cvErode(imgFilteredFrame, imgTemp);
            cvErode(imgTemp, imgFilteredFrame);
            break;
        case MORPHING_METHOD_ERODE_DILATE :
            cvErode(imgFilteredFrame, imgTemp);
            cvDilate(imgTemp, imgFilteredFrame);
            break;
    }

    if (outputHueMask != NULL)
        cvCopy(imgFilteredFrame, outputHueMask);
};
void THISCLASS::OnStep() {
	IplImage *inputimage = mCore->mDataStructureImageColor.mImage;
	//Check the images
	if (! inputimage)
	{
		AddError(wxT("No input Image"));
		return;
	}
	if (inputimage->nChannels != 3)
	{
		AddError(wxT("Input image has not 3 channels."));
		return;
	}
	if (! mBackgroundImage)
	{
		AddError(wxT("Background image not accessible"));
		return;
	}
	if ((cvGetSize(inputimage).height != cvGetSize(mBackgroundImage).height) || (cvGetSize(inputimage).width != cvGetSize(mBackgroundImage).width))
	{
		AddError(wxT("Input and background images have not the same dimension"));
		return;
	}

	//Check for the color system of the input image (The loaded image is BGR, OpenCV default) and convert the background respectively
	if (strncmp(mCore->mDataStructureImageColor.mImage->channelSeq, mBackgroundImage->channelSeq, 3))
	{
		//Make a temporary clone of the image in 3 seperate channels
		IplImage* tmpImage[3];
		for (int i = 0;i < 3;i++)
			tmpImage[i] = cvCreateImage(cvGetSize(mBackgroundImage), 8, 1);
		cvSplit(mBackgroundImage, tmpImage[0], tmpImage[1], tmpImage[2], NULL);
		CvScalar tmpBackgroundMean = mBackgroundImageMean;
		//Modify the sequence of the channels in the background
		for (int i = 0;i < 3;i++)
			//If the channel is not the same, search for the corresponding channel to copy, else copy the channel directly
			if (inputimage->channelSeq[i] != mBackgroundImage->channelSeq[i])
				for (int j = 0;j < 3;j++)
					if (inputimage->channelSeq[i] == mBackgroundImage->channelSeq[j])
					{
						cvSetImageCOI(mBackgroundImage, i + 1);
						cvCopy(tmpImage[j], mBackgroundImage);
						//Remove the COI
						cvSetImageCOI(mBackgroundImage, 0);
						mBackgroundImageMean.val[i] = tmpBackgroundMean.val[j];
					}
		strcpy(mBackgroundImage->channelSeq, inputimage->channelSeq);
		for (int i = 0; i < 3;i++)
			cvReleaseImage(&(tmpImage[i]));
	}

	try {
		// Correct the tmpImage with the difference in image mean
		if (mCorrectMean)
		{
			CvScalar tmpScalar = cvAvg(inputimage);
			cvAddS(inputimage, cvScalar(mBackgroundImageMean.val[0] - tmpScalar.val[0], mBackgroundImageMean.val[1] - tmpScalar.val[1], mBackgroundImageMean.val[2] - tmpScalar.val[2]), inputimage);
		}

		// Background Substraction
		cvAbsDiff(inputimage, mBackgroundImage, inputimage);
	} catch (...) {
		AddError(wxT("Background subtraction failed."));
	}

	// Set the display
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(inputimage);
	}
}
Exemplo n.º 19
0
Arquivo: main.c Projeto: mafraba/weedo
int main(int argc, char** argv)
{
    // Load and display original image
    puts("Loading image...");
    CvMat* img = cvLoadImageM(PATH, CV_LOAD_IMAGE_COLOR);
    CvMat* orig = cvCloneMat(img);
    cvCvtColor(img, img, CV_BGR2Lab);
    if (SMOOTH_ORIGINAL)
    {
        cvSmooth(img, img, CV_GAUSSIAN, SMOOTH_ORIGINAL, 0, 0, 0);
    }
    
    //chromacity(img);

    //show(ORIGINAL_IMAGE_WINDOW_NAME, orig);
    //show(PRETREATED_IMAGE_WINDOW_NAME, img);

    // Generate a Gabor filter bank
    puts("Generating Gabor filter bank...");
    FilterBank filter_bank;
    generate_gabor_filter_bank(&filter_bank,
                               N_BANDWIDTHS, bandwidths,
                               N_FREQS, spatial_frequencies,
                               N_ORIENTATIONS, orientations);

    // Separate each channel
    puts("Separating channels...");
    CvMat *ch1 = cvCreateMat(img->rows, img->cols, CV_8UC1);
    CvMat *ch2 = cvCreateMat(img->rows, img->cols, CV_8UC1);
    CvMat *ch3 = cvCreateMat(img->rows, img->cols, CV_8UC1);
    cvSplit(img, ch1, ch2, ch3, NULL);

    // Apply the filter bank on each one of them
    puts("Applying filters...");
    CvMat **results = (CvMat**) malloc(3 * filter_bank.size * sizeof (CvMat*));
    CvMat **filtered_channel_1 = results;
    apply_filter_bank(&filter_bank, ch1, filtered_channel_1);
    CvMat **filtered_channel_2 = results + filter_bank.size;
    apply_filter_bank(&filter_bank, ch2, filtered_channel_2);
    CvMat **filtered_channel_3 = results + 2 * filter_bank.size;
    apply_filter_bank(&filter_bank, ch3, filtered_channel_3);

    // Now sort the samples
    puts("Sorting...");
    int n_channels = (IGNORAR_L ? 2 : 3);
    results = (IGNORAR_L ? filtered_channel_2 : results);
    CvMat *samples;
    sort_samples(n_channels * filter_bank.size, results, &samples);
    printf("Samples: %d(x%d)", samples->rows, samples->cols);
    fflush(stdout);

    // And cluster them
    printf("Clustering... ");
    CvScalar color_tab[8];
    color_tab[0] = CV_RGB(255, 0, 0);
    color_tab[1] = CV_RGB(0, 255, 0);
    color_tab[2] = CV_RGB(0, 0, 255);
    color_tab[3] = CV_RGB(0, 255, 255);
    color_tab[4] = CV_RGB(255, 0, 255);
    color_tab[5] = CV_RGB(255, 255, 0);
    color_tab[6] = CV_RGB(255, 255, 255);
    color_tab[7] = CV_RGB(0, 0, 0);

    CvMat *labels = cvCreateMat(samples->rows, 1, CV_32SC1);
    cvKMeans2(samples, K_CLUSTERS, labels,
              cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 1.0),
              10, NULL, 0, NULL, NULL);
    puts("done");
    fflush(stdout);

    CvMat *color_labels = cvCreateMat(img->rows, img->cols, CV_8UC3);
    CvMat **classes = malloc(K_CLUSTERS * sizeof (CvMat*));
    for (int i = 0; i < K_CLUSTERS; i++)
    {
        classes[i] = cvCreateMat(img->rows, img->cols, CV_8UC1);
        cvZero(classes[i]);
    }
    img_from_labels(labels, classes, color_labels, color_tab);
    //show("Labels", labeled_img);

    CvMat *mix = cvClone(img);
    cvAddWeighted(orig, 0.7, color_labels, 0.3, 0, mix);

    //
    puts("Outputting...");
    char out_file_name[256];
    sprintf(out_file_name, "%s/%s.png", OUTPUT_PATH, "original");
    cvSaveImage(out_file_name, orig, NULL);
    output_base_channels(img);
    if (!IGNORAR_L)
    {
        output_filtered_images("CH1", filter_bank.size, filtered_channel_1);
    }
    output_filtered_images("CH2", filter_bank.size, filtered_channel_2);
    output_filtered_images("CH3", filter_bank.size, filtered_channel_3);
    output_filter_bank(&filter_bank);
    // output labels
    output_classes(classes, orig);
    // output colored and mix
    sprintf(out_file_name, "%s/%s.png", OUTPUT_PATH, "coloured");
    cvSaveImage(out_file_name, color_labels, NULL);
    sprintf(out_file_name, "%s/%s.png", OUTPUT_PATH, "mix");
    cvSaveImage(out_file_name, mix, NULL);

    //show("Mix", mix);
    //    cvWaitKey(0);
    //    cvWaitKey(0);
    //    cvWaitKey(0);
    // Should do some cleanup here... :_(

    return (EXIT_SUCCESS);
}
Exemplo n.º 20
0
int main(int argc, char** argv) {
  bool isStop = false;
  CvCapture *capture = NULL;
  capture = cvCreateCameraCapture(0);
  //capture = cvCaptureFromAVI("dnp-shokai1.avi");
  if(capture == NULL){
    printf("capture device not found!!");
    return -1;
  }

  IplImage *img = NULL;
  img = cvQueryFrame(capture);
  const int w = img->width;
  const int h = img->height;

  IplImage *imgBg = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgGray = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgDiff = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgHsv = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 3);
  IplImage *imgHue = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgSat = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgVal = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgThreshold1 = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgThreshold2 = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgThreshold3 = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);
  IplImage *imgSkin = cvCreateImage(cvSize(w, h), IPL_DEPTH_8U, 1);

  char winNameCapture[] = "Capture";
  char winNameDiff[] = "Difference";
  char winNameBg[] = "Background";
  char winNameSkin[] = "Skin";

  cvNamedWindow(winNameCapture, CV_WINDOW_AUTOSIZE);
  cvNamedWindow(winNameDiff, CV_WINDOW_AUTOSIZE);
  cvNamedWindow(winNameBg, CV_WINDOW_AUTOSIZE);
  cvNamedWindow(winNameSkin, CV_WINDOW_AUTOSIZE);

  img = cvQueryFrame(capture);
  cvCvtColor(img, imgBg, CV_BGR2GRAY);  

  int waitKey;
  while (1) {
    if(!isStop){
      if((img = cvQueryFrame(capture)) == NULL) break;
      cvCvtColor(img, imgGray, CV_BGR2GRAY);
      cvAbsDiff(imgGray, imgBg, imgDiff);

      cvCvtColor(img, imgHsv, CV_BGR2HSV);
      cvSplit(imgHsv, imgHue, imgSat, imgVal, NULL);
      cvThreshold(imgHue, imgThreshold1, COLOR_BOTTOM, 255, CV_THRESH_BINARY);
      cvThreshold(imgHue, imgThreshold2, COLOR_TOP, 255, CV_THRESH_BINARY_INV);
      cvAnd(imgThreshold1, imgThreshold2, imgThreshold3, NULL);

      cvAnd(imgDiff, imgThreshold3, imgSkin, NULL);

      cvShowImage(winNameCapture, img);
      cvShowImage(winNameDiff, imgDiff);
      cvShowImage(winNameBg, imgBg);
      cvShowImage(winNameSkin, imgSkin);
    }

    waitKey = cvWaitKey(33);
    if(waitKey == 'q') break;
    if(waitKey == 'b'){ // 背景再取得
      img = cvQueryFrame(capture);
      cvCvtColor(img, imgBg, CV_BGR2GRAY);
    }
    if(waitKey == ' '){
      isStop = !isStop;
      if(isStop) printf("stop\n");
      else printf("start\n");
    }
  }
  
  cvReleaseCapture(&capture);
  cvDestroyWindow(winNameCapture);
  cvDestroyWindow(winNameDiff);
  cvDestroyWindow(winNameBg);
  cvDestroyWindow(winNameSkin);
  return 0;
}
void
icvCrossCorr( const CvArr* _img, const CvArr* _templ, CvArr* _corr,
              CvPoint anchor, double delta, int borderType )
{
    // disable OpenMP in the case of Visual Studio,
    // otherwise the performance drops significantly
#undef USE_OPENMP
#if !defined _MSC_VER || defined CV_ICC
    #define USE_OPENMP 1
#endif

    const double block_scale = 4.5;
    const int min_block_size = 256;
    cv::Ptr<CvMat> dft_img[CV_MAX_THREADS];
    cv::Ptr<CvMat> dft_templ;
    std::vector<uchar> buf[CV_MAX_THREADS];
    int k, num_threads = 0;
    
    CvMat istub, *img = (CvMat*)_img;
    CvMat tstub, *templ = (CvMat*)_templ;
    CvMat cstub, *corr = (CvMat*)_corr;
    CvSize dftsize, blocksize;
    int depth, templ_depth, corr_depth, max_depth = CV_32F,
        cn, templ_cn, corr_cn, buf_size = 0,
        tile_count_x, tile_count_y, tile_count;

    img = cvGetMat( img, &istub );
    templ = cvGetMat( templ, &tstub );
    corr = cvGetMat( corr, &cstub );

    if( CV_MAT_DEPTH( img->type ) != CV_8U &&
        CV_MAT_DEPTH( img->type ) != CV_16U &&
        CV_MAT_DEPTH( img->type ) != CV_32F &&
        CV_MAT_DEPTH( img->type ) != CV_64F )
        CV_Error( CV_StsUnsupportedFormat,
        "The function supports only 8u, 16u and 32f data types" );

    if( !CV_ARE_DEPTHS_EQ( img, templ ) && CV_MAT_DEPTH( templ->type ) != CV_32F )
        CV_Error( CV_StsUnsupportedFormat,
        "Template (kernel) must be of the same depth as the input image, or be 32f" );
    
    if( !CV_ARE_DEPTHS_EQ( img, corr ) && CV_MAT_DEPTH( corr->type ) != CV_32F &&
        CV_MAT_DEPTH( corr->type ) != CV_64F )
        CV_Error( CV_StsUnsupportedFormat,
        "The output image must have the same depth as the input image, or be 32f/64f" );

    if( (!CV_ARE_CNS_EQ( img, corr ) || CV_MAT_CN(templ->type) > 1) &&
        (CV_MAT_CN( corr->type ) > 1 || !CV_ARE_CNS_EQ( img, templ)) )
        CV_Error( CV_StsUnsupportedFormat,
        "The output must have the same number of channels as the input (when the template has 1 channel), "
        "or the output must have 1 channel when the input and the template have the same number of channels" );

    depth = CV_MAT_DEPTH(img->type);
    cn = CV_MAT_CN(img->type);
    templ_depth = CV_MAT_DEPTH(templ->type);
    templ_cn = CV_MAT_CN(templ->type);
    corr_depth = CV_MAT_DEPTH(corr->type);
    corr_cn = CV_MAT_CN(corr->type);

    CV_Assert( corr_cn == 1 || delta == 0 );

    max_depth = MAX( max_depth, templ_depth );
    max_depth = MAX( max_depth, depth );
    max_depth = MAX( max_depth, corr_depth );
    if( depth > CV_8U )
        max_depth = CV_64F;

    /*if( img->cols < templ->cols || img->rows < templ->rows )
        CV_Error( CV_StsUnmatchedSizes,
        "Such a combination of image and template/filter size is not supported" );*/

    if( corr->rows > img->rows + templ->rows - 1 ||
        corr->cols > img->cols + templ->cols - 1 )
        CV_Error( CV_StsUnmatchedSizes,
        "output image should not be greater than (W + w - 1)x(H + h - 1)" );

    blocksize.width = cvRound(templ->cols*block_scale);
    blocksize.width = MAX( blocksize.width, min_block_size - templ->cols + 1 );
    blocksize.width = MIN( blocksize.width, corr->cols );
    blocksize.height = cvRound(templ->rows*block_scale);
    blocksize.height = MAX( blocksize.height, min_block_size - templ->rows + 1 );
    blocksize.height = MIN( blocksize.height, corr->rows );

    dftsize.width = cvGetOptimalDFTSize(blocksize.width + templ->cols - 1);
    if( dftsize.width == 1 )
        dftsize.width = 2;
    dftsize.height = cvGetOptimalDFTSize(blocksize.height + templ->rows - 1);
    if( dftsize.width <= 0 || dftsize.height <= 0 )
        CV_Error( CV_StsOutOfRange, "the input arrays are too big" );

    // recompute block size
    blocksize.width = dftsize.width - templ->cols + 1;
    blocksize.width = MIN( blocksize.width, corr->cols );
    blocksize.height = dftsize.height - templ->rows + 1;
    blocksize.height = MIN( blocksize.height, corr->rows );

    dft_templ = cvCreateMat( dftsize.height*templ_cn, dftsize.width, max_depth );

#ifdef USE_OPENMP
    num_threads = cvGetNumThreads();
#else
    num_threads = 1;
#endif

    for( k = 0; k < num_threads; k++ )
        dft_img[k] = cvCreateMat( dftsize.height, dftsize.width, max_depth );

    if( templ_cn > 1 && templ_depth != max_depth )
        buf_size = templ->cols*templ->rows*CV_ELEM_SIZE(templ_depth);

    if( cn > 1 && depth != max_depth )
        buf_size = MAX( buf_size, (blocksize.width + templ->cols - 1)*
            (blocksize.height + templ->rows - 1)*CV_ELEM_SIZE(depth));

    if( (corr_cn > 1 || cn > 1) && corr_depth != max_depth )
        buf_size = MAX( buf_size, blocksize.width*blocksize.height*CV_ELEM_SIZE(corr_depth));

    if( buf_size > 0 )
    {
        for( k = 0; k < num_threads; k++ )
            buf[k].resize(buf_size);
    }

    // compute DFT of each template plane
    for( k = 0; k < templ_cn; k++ )
    {
        CvMat dstub, *src, *dst, temp;
        CvMat* planes[] = { 0, 0, 0, 0 };
        int yofs = k*dftsize.height;

        src = templ;
        dst = cvGetSubRect( dft_templ, &dstub, cvRect(0,yofs,templ->cols,templ->rows));
    
        if( templ_cn > 1 )
        {
            planes[k] = templ_depth == max_depth ? dst :
                cvInitMatHeader( &temp, templ->rows, templ->cols, templ_depth, &buf[0][0] );
            cvSplit( templ, planes[0], planes[1], planes[2], planes[3] );
            src = planes[k];
            planes[k] = 0;
        }

        if( dst != src )
            cvConvert( src, dst );

        if( dft_templ->cols > templ->cols )
        {
            cvGetSubRect( dft_templ, dst, cvRect(templ->cols, yofs,
                          dft_templ->cols - templ->cols, templ->rows) );
            cvZero( dst );
        }
        cvGetSubRect( dft_templ, dst, cvRect(0,yofs,dftsize.width,dftsize.height) );
        cvDFT( dst, dst, CV_DXT_FORWARD + CV_DXT_SCALE, templ->rows );
    }

    tile_count_x = (corr->cols + blocksize.width - 1)/blocksize.width;
    tile_count_y = (corr->rows + blocksize.height - 1)/blocksize.height;
    tile_count = tile_count_x*tile_count_y;

#if defined _OPENMP && defined USE_OPENMP
    #pragma omp parallel for num_threads(num_threads) schedule(dynamic)
#endif
    // calculate correlation by blocks
    for( k = 0; k < tile_count; k++ )
    {
#ifdef USE_OPENMP
        int thread_idx = cvGetThreadNum();
#else
        int thread_idx = 0;
#endif
        int x = (k%tile_count_x)*blocksize.width;
        int y = (k/tile_count_x)*blocksize.height;
        int i, yofs;
        CvMat sstub, dstub, *src, *dst, temp;
        CvMat* planes[] = { 0, 0, 0, 0 };
        CvMat* _dft_img = dft_img[thread_idx];
        uchar* _buf = buf_size > 0 ? &buf[thread_idx][0] : 0;
        CvSize csz = { blocksize.width, blocksize.height }, isz;
        int x0 = x - anchor.x, y0 = y - anchor.y;
        int x1 = MAX( 0, x0 ), y1 = MAX( 0, y0 ), x2, y2;
        csz.width = MIN( csz.width, corr->cols - x );
        csz.height = MIN( csz.height, corr->rows - y );
        isz.width = csz.width + templ->cols - 1;
        isz.height = csz.height + templ->rows - 1;
        x2 = MIN( img->cols, x0 + isz.width );
        y2 = MIN( img->rows, y0 + isz.height );
        
        for( i = 0; i < cn; i++ )
        {
            CvMat dstub1, *dst1;
            yofs = i*dftsize.height;

            src = cvGetSubRect( img, &sstub, cvRect(x1,y1,x2-x1,y2-y1) );
            dst = cvGetSubRect( _dft_img, &dstub,
                cvRect(0,0,isz.width,isz.height) );
            dst1 = dst;
            
            if( x2 - x1 < isz.width || y2 - y1 < isz.height )
                dst1 = cvGetSubRect( _dft_img, &dstub1,
                    cvRect( x1 - x0, y1 - y0, x2 - x1, y2 - y1 ));

            if( cn > 1 )
            {
                planes[i] = dst1;
                if( depth != max_depth )
                    planes[i] = cvInitMatHeader( &temp, y2 - y1, x2 - x1, depth, _buf );
                cvSplit( src, planes[0], planes[1], planes[2], planes[3] );
                src = planes[i];
                planes[i] = 0;
            }

            if( dst1 != src )
                cvConvert( src, dst1 );

            if( dst != dst1 )
                cvCopyMakeBorder( dst1, dst, cvPoint(x1 - x0, y1 - y0), borderType );

            if( dftsize.width > isz.width )
            {
                cvGetSubRect( _dft_img, dst, cvRect(isz.width, 0,
                      dftsize.width - isz.width,dftsize.height) );
                cvZero( dst );
            }

            cvDFT( _dft_img, _dft_img, CV_DXT_FORWARD, isz.height );
            cvGetSubRect( dft_templ, dst,
                cvRect(0,(templ_cn>1?yofs:0),dftsize.width,dftsize.height) );

            cvMulSpectrums( _dft_img, dst, _dft_img, CV_DXT_MUL_CONJ );
            cvDFT( _dft_img, _dft_img, CV_DXT_INVERSE, csz.height );

            src = cvGetSubRect( _dft_img, &sstub, cvRect(0,0,csz.width,csz.height) );
            dst = cvGetSubRect( corr, &dstub, cvRect(x,y,csz.width,csz.height) );

            if( corr_cn > 1 )
            {
                planes[i] = src;
                if( corr_depth != max_depth )
                {
                    planes[i] = cvInitMatHeader( &temp, csz.height, csz.width,
                                                 corr_depth, _buf );
                    cvConvertScale( src, planes[i], 1, delta );
                }
                cvMerge( planes[0], planes[1], planes[2], planes[3], dst );
                planes[i] = 0;                    
            }
            else
            {
                if( i == 0 )
                    cvConvertScale( src, dst, 1, delta );
                else
                {
                    if( max_depth > corr_depth )
                    {
                        cvInitMatHeader( &temp, csz.height, csz.width,
                                         corr_depth, _buf );
                        cvConvert( src, &temp );
                        src = &temp;
                    }
                    cvAcc( src, dst );
                }
            }
        }
    }
}
Exemplo n.º 22
0
Arquivo: map.cpp Projeto: Metalab/CGSG
const PolygonList& ViennaMap::loadFragment(int fragX, int fragY) {
  //TODO: keep track of the images that are being loaded so we don't issue
  //two load requests for the same picture. this will be important if 
  //we are used in a multi-threaded environment
  IplImage *img = getImage(fragX, fragY);

  if (SDL_mutexP(cvRessourcesGuard) == -1)
    throw "could not acquire cvRessourcesGuard";

  //get one color channel and set white to zero
  cvSplit(img, tempBinarizedImage, NULL, NULL, NULL);
  cvThreshold(tempBinarizedImage, tempBinarizedImage, 250, 255, CV_THRESH_TOZERO_INV);
  
  //find polygons
  CvSeq *contours, *polys;
  cvFindContours(tempBinarizedImage, cvMemStorage, &contours, sizeof(CvContour),
                  CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
  polys = cvApproxPoly(contours, sizeof(CvContour), cvMemStorage, CV_POLY_APPROX_DP, 1, 1);

  //create MapFragment
  MapFragment *frag = new MapFragment();

  //read polygons
  for (; polys; polys = polys->h_next) {
    if (polys->total < 3) continue;
    Polygon* polygon = new Polygon(polys->total); 
    bool incomplete = false;

    CvPoint *point;

    for (int i=0; i < polys->total; i++) {
      point = (CvPoint*)cvGetSeqElem(polys, i);
      int x, y;
      x = point->x + fragX*fragmentImageWidth;
      y = point->y + fragY*fragmentImageHeight;
      (*polygon)[i].x = x;
      (*polygon)[i].y = y;

      if (x == 1 || y == 1 || x == fragmentImageWidth-2 || y == fragmentImageHeight-2)
        incomplete = true;
    }

    if (!incomplete)
      frag->polygons.push_back(polygon);
    else
      frag->incompletePolygons.push_back(polygon);
  }

  //clean up
  cvClearMemStorage(cvMemStorage);
  cvReleaseImage(&img);

  if (SDL_mutexV(cvRessourcesGuard) == -1)
    throw "could not release cvRessourcesGuard";

  if (SDL_mutexP(fragmentGuard) == -1)
    throw "could not acquire fragmentGuard";

  //TODO: tryCompletePolygons
  //throw "not implemented";
  
  //add map fragment to list
  fragments.push_back(frag);

  if (SDL_mutexV(fragmentGuard) == -1)
    throw "could not release fragmentGuard";

  return frag->polygons;
}
Exemplo n.º 23
0
int main(int argc, char ** argv)
{
int height,width,step,channels;
uchar* data;
uchar* data1;
int i,j,k;
float s;

CvMat *dft_A;
CvMat *dft_B;
CvMat *dft_C;
IplImage* im;
IplImage* im1;

IplImage* image_ReB;
IplImage* image_ImB;

IplImage* image_ReC;
IplImage* image_ImC;
IplImage* complex_ImC;

IplImage* image_ReDen;
IplImage* image_ImDen;

FILE *fp;
fp = fopen("test.txt","w+");

int dft_M,dft_N;
int dft_M1,dft_N1;
CvMat* cvShowDFT();
void cvShowInvDFT();

im1 = cvLoadImage( "kutty-1.jpg",1 );
cvNamedWindow("original-color", 0);
cvShowImage("original-color", im1);
im = cvLoadImage( "kutty-1.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !im )
return -1;

cvNamedWindow("original-gray", 0);
cvShowImage("original-gray", im);
// Create blur kernel (non-blind)
//float vals[]={.000625,.000625,.000625,.003125,.003125,.003125,.000625,.000625,.000625};
//float vals[]={-0.167,0.333,0.167,-0.167,.333,.167,-0.167,.333,.167};

float vals[]={.055,.055,.055,.222,.222,.222,.055,.055,.055};
CvMat kernel = cvMat(3, // number of rows
3, // number of columns
CV_32FC1, // matrix data type
vals);
IplImage* k_image_hdr;
IplImage* k_image;

k_image_hdr = cvCreateImageHeader(cvSize(3,3),IPL_DEPTH_64F,2);
k_image = cvCreateImage(cvSize(3,3),IPL_DEPTH_64F,1);
k_image = cvGetImage(&kernel,k_image_hdr);

/*IplImage* k_image;
k_image = cvLoadImage( "kernel4.bmp",0 );*/
cvNamedWindow("blur kernel", 0);

height = k_image->height;
width = k_image->width;
step = k_image->widthStep;

channels = k_image->nChannels;
//data1 = (float *)(k_image->imageData);
data1 = (uchar *)(k_image->imageData);

cvShowImage("blur kernel", k_image);

dft_M = cvGetOptimalDFTSize( im->height - 1 );
dft_N = cvGetOptimalDFTSize( im->width - 1 );

//dft_M1 = cvGetOptimalDFTSize( im->height+99 - 1 );
//dft_N1 = cvGetOptimalDFTSize( im->width+99 - 1 );

dft_M1 = cvGetOptimalDFTSize( im->height+3 - 1 );
dft_N1 = cvGetOptimalDFTSize( im->width+3 - 1 );

// Perform DFT of original image
dft_A = cvShowDFT(im, dft_M1, dft_N1,"original");
//Perform inverse (check & comment out) - Commented as it overwrites dft_A
//cvShowInvDFT(im,dft_A,dft_M1,dft_N1,fp, "original");

// Perform DFT of kernel
dft_B = cvShowDFT(k_image,dft_M1,dft_N1,"kernel");
//Perform inverse of kernel (check & comment out) - commented as it overwrites dft_B
//cvShowInvDFT(k_image,dft_B,dft_M1,dft_N1,fp, "kernel");

// Multiply numerator with complex conjugate
dft_C = cvCreateMat( dft_M1, dft_N1, CV_64FC2 );

printf("%d %d %d %d\n",dft_M,dft_N,dft_M1,dft_N1);

// Multiply DFT(blurred image) * complex conjugate of blur kernel
cvMulSpectrums(dft_A,dft_B,dft_C,CV_DXT_MUL_CONJ);

// Split Fourier in real and imaginary parts
image_ReC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
image_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
complex_ImC = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 2);

printf("%d %d %d %d\n",dft_M,dft_N,dft_M1,dft_N1);

//cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );
cvSplit( dft_C, image_ReC, image_ImC, 0, 0 );

// Compute A^2 + B^2 of denominator or blur kernel
image_ReB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);
image_ImB = cvCreateImage( cvSize(dft_N1, dft_M1), IPL_DEPTH_64F, 1);

// Split Real and imaginary parts
cvSplit( dft_B, image_ReB, image_ImB, 0, 0 );
cvPow( image_ReB, image_ReB, 2.0);
cvPow( image_ImB, image_ImB, 2.0);
cvAdd(image_ReB, image_ImB, image_ReB,0);

//Divide Numerator/A^2 + B^2
cvDiv(image_ReC, image_ReB, image_ReC, 1.0);
cvDiv(image_ImC, image_ReB, image_ImC, 1.0);

// Merge Real and complex parts
cvMerge(image_ReC, image_ImC, NULL, NULL, complex_ImC);

// Perform Inverse
cvShowInvDFT(im, complex_ImC,dft_M1,dft_N1,fp,"deblur");
cvWaitKey(-1);
return 0;
}
Exemplo n.º 24
0
int cam() //calling main
{
    int hdims = 16;
    printf("I am main");
    CvCapture* capture = cvCreateCameraCapture(1); //determining usb camera
    CvHistogram *hist = 0;
    CvMemStorage* g_storage = NULL;
    Display *display=construct_display();
    int x,y, tmpx=0, tmpy=0, chk=0;
    IplImage* image=0;
    IplImage* lastimage1=0;
    IplImage* lastimage=0;
    IplImage* diffimage;
    IplImage* bitimage;
    IplImage* src=0,*hsv=0,*hue=0,*backproject=0;
    IplImage* hsv1=0,*hue1=0,*histimg=0,*frame=0,*edge=0;
    float* hranges;
    cvNamedWindow( "CA", CV_WINDOW_AUTOSIZE ); //display window 3
    //Calculation of Histogram//
    cvReleaseImage(&src);
    src= cvLoadImage("images/skin.jpg"); //taking patch
    while(1)
    {
        frame = cvQueryFrame( capture ); //taking frame by frame for image prcessing
        int j=0;
        float avgx=0;
        float avgy=0;
        if( !frame ) break;
        //#########################Background Substraction#########################//
        if(!image)
        {
            image=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1);
            bitimage=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1);
            diffimage=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1);
            lastimage=cvCreateImage(cvSize(frame->width,frame->height),frame->depth,1);
        }
        cvCvtColor(frame,image,CV_BGR2GRAY);
        if(!lastimage1)
        {
            lastimage1=cvLoadImage("images/img.jpg");
        }
        cvCvtColor(lastimage1,lastimage,CV_BGR2GRAY);
        cvAbsDiff(image,lastimage,diffimage);
        cvThreshold(diffimage,bitimage,65,225,CV_THRESH_BINARY);
        cvInRangeS(bitimage,cvScalar(0),cvScalar(30),bitimage);
        cvSet(frame,cvScalar(0,0,0),bitimage);
        cvReleaseImage(&hsv);
        hsv= cvCreateImage( cvGetSize(src), 8, 3 );
        cvReleaseImage(&hue);
        hue= cvCreateImage( cvGetSize(src), 8, 1);
        cvCvtColor(src,hsv,CV_BGR2HSV);
        cvSplit(hsv,hue,0,0,0);
        float hranges_arr[] = {0,180};
        hranges = hranges_arr;
        hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 );
        cvCalcHist(&hue, hist, 0, 0 );
        cvThreshHist( hist, 100 );
        //#############################Display histogram##############################//
        cvReleaseImage(&histimg);
        histimg = cvCreateImage( cvSize(320,200), 8, 3 );
        cvZero( histimg );
        int bin_w = histimg->width / hdims;
        //#### Calculating the Probablity of Finding the skin with in-built method ###//
        if(0)
        {
            free (backproject);
            free (hsv1);
            free (hue1);
        }
        cvReleaseImage(&backproject);
        backproject= cvCreateImage( cvGetSize(frame), 8, 1 );
        cvReleaseImage(&hsv1);
        hsv1 = cvCreateImage( cvGetSize(frame), 8, 3);
        cvReleaseImage(&hue1);
        hue1 = cvCreateImage( cvGetSize(frame), 8, 1);
        cvCvtColor(frame,hsv1,CV_BGR2HSV);
        cvSplit(hsv1,hue1,0,0,0);
        cvCalcBackProject( &hue1, backproject, hist );
        cvSmooth(backproject,backproject,CV_GAUSSIAN);
        cvSmooth(backproject,backproject,CV_MEDIAN);
        if( g_storage == NULL )
        g_storage = cvCreateMemStorage(0);
        else
        cvClearMemStorage( g_storage );
        CvSeq* contours=0;
        CvSeq* result =0;
        cvFindContours(backproject, g_storage, &contours );
        if(contours)
        {
            result=cvApproxPoly(contours, sizeof(CvContour), g_storage,
            CV_POLY_APPROX_DP, 7, 1);
        }
        cvZero( backproject);
        for( ; result != 0; result = result->h_next )
        {
            double area = cvContourArea( result );
            cvDrawContours( backproject,result, CV_RGB(255,255, 255), CV_RGB(255,0, 255)
            , -1,CV_FILLED, 8 );
            for( int i=1; i<=result-> total; i++ )
            {
                if(i>=1 and abs(area)>300)
                {
                    CvPoint* p2 = CV_GET_SEQ_ELEM( CvPoint, result, i );
                    if(1)
                    {
                        avgx=avgx+p2->x;
                        avgy=avgy+p2->y;
                        j=j+1;
                        cvCircle(backproject,cvPoint(p2->x,p2->y ),10,
                        cvScalar(255,255,255));
                    }
                }
            }
        }
        cvCircle( backproject, cvPoint(avgx/j, avgy/j ), 40, cvScalar(255,255,255) );
        x = ( avgx/j );
        y = ( avgy/j );
        x=( (x*1240)/640 )-20;
        y=( (y*840)/480 )-20;
        if ( (abs(tmpx-x)>6 or abs(tmpy-y)>6 ) and j )
        {
            tmpx = x;
            tmpy = y;
            chk=0;
        }
        else chk++;
        mouse_move1( tmpx, tmpy, display );
        if ( chk==10 )
        {
            mouse_click( 5, 2, display );
            mouse_click( 5, 3, display );
        }
        cvSaveImage( "final.jpg", frame );
        cvSaveImage( "final1.jpg", backproject );
        cvShowImage( "CA", backproject );
        char c = cvWaitKey(33);
        if( c == 27 )
        break; //function break and destroying windows if press <escape> key
    }
    cvReleaseCapture( &capture );
    cvDestroyWindow( "CA" );
}
Exemplo n.º 25
0
RTC::ReturnCode_t Sepia::onExecute(RTC::UniqueId ec_id)
{
    // Common CV actions
    // 新しいデータのチェック
    if (m_image_origIn.isNew()) 
    {
        // InPortデータの読み込み
        m_image_origIn.read();

        // サイズが変わったときだけ再生成する
        if(m_in_height != m_image_orig.height || m_in_width != m_image_orig.width)
        {
            printf("[onExecute] Size of input image is not match!\n");

            m_in_height = m_image_orig.height;
            m_in_width  = m_image_orig.width;
            
            if(m_image_buff       != NULL)
                cvReleaseImage(&m_image_buff);
            if(m_hsvImage         != NULL)
                cvReleaseImage(&m_hsvImage);
            if(m_hueImage         != NULL)
                cvReleaseImage(&m_hueImage);
            if(m_saturationImage  != NULL)
                cvReleaseImage(&m_saturationImage);
            if(m_valueImage       != NULL)
                cvReleaseImage(&m_valueImage);
            if(m_mergeImage       != NULL)
                cvReleaseImage(&m_mergeImage);
            if(m_destinationImage != NULL)
                cvReleaseImage(&m_destinationImage);

            // サイズ変換のためTempメモリーをよいする
            m_image_buff       = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);

	        m_hsvImage         = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);

            m_hueImage         = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
	        m_saturationImage  = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);
	        m_valueImage       = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);

	        m_mergeImage       = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
	        m_destinationImage = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
        }

        // InPortの画像データをIplImageのimageDataにコピー
        memcpy(m_image_buff->imageData,(void *)&(m_image_orig.pixels[0]),m_image_orig.pixels.length());

        // Anternative actions

        //	BGRからHSVに変換する
        cvCvtColor(m_image_buff, m_hsvImage, CV_BGR2HSV);

        //	HSV画像をH、S、V画像に分ける
        cvSplit(m_hsvImage, m_hueImage, m_saturationImage, m_valueImage, NULL); 

        //	HとSの値を変更する
        cvSet(m_hueImage,        cvScalar( m_nHue ),        NULL);
        cvSet(m_saturationImage, cvScalar( m_nSaturation ), NULL);

        //	3チャンネルを結合
        cvMerge(m_hueImage, m_saturationImage, m_valueImage, NULL, m_mergeImage);

        //	HSVからBGRに変換する
        cvCvtColor(m_mergeImage, m_destinationImage, CV_HSV2BGR);

        // 画像データのサイズ取得
        int len = m_destinationImage->nChannels * m_destinationImage->width * m_destinationImage->height;
                
        // 画面のサイズ情報を入れる
        m_image_sepia.pixels.length(len);        
        m_image_sepia.width  = m_destinationImage->width;
        m_image_sepia.height = m_destinationImage->height;

        // 反転した画像データをOutPortにコピー
        memcpy((void *)&(m_image_sepia.pixels[0]), m_destinationImage->imageData,len);

        // 反転した画像データをOutPortから出力する。
        m_image_sepiaOut.write();
    }

    return RTC::RTC_OK;
}
Exemplo n.º 26
0
int main(int argc, char* argv[]){
	CCmdLine cmdLine;
	
	cmdLine.SplitLine(argc, argv);

	if ( !(cmdLine.HasSwitch("-i") && cmdLine.HasSwitch("-o") && (cmdLine.HasSwitch("-pos") || cmdLine.HasSwitch("-f") || cmdLine.HasSwitch("-pbOnly"))) ){
		fprintf(stderr, "usage: %s -i <image> -o <output-directory> < -pos <x> <y> | -f <fixation-points-file> > [ -pb <probabilistic-boundary-prefix ] [ -flow <optical-flow-file> ] [ -sobel ]\n",argv[0]);
		fprintf(stderr, "OR \t %s -pbOnly -i <image> -o <output-probabilistic-boundary-prefix>\n",argv[0]);
		exit(1);
	}
	class segLayer frame1;
	char tmp[80];
	strcpy (tmp, cmdLine.GetArgument("-i", 0).c_str());

	int64 tic1,tic2,tic3,tic4;
	double ticFrequency = cvGetTickFrequency()*1000000;
	tic1=cvGetTickCount();

	IplImage *im=cvLoadImage(tmp), *im2;
	#ifdef CUDA_SUPPORT
	int maxWidth=640;
	#else
	int maxWidth=640;
	#endif
	bool resized=false;
	float scale=1;
	if(cvGetSize(im).width>maxWidth){
		scale=maxWidth/(double)(cvGetSize(im).width);
		printf("Image too big, resizing it for the segmentation...\n");
	    	int newHeight=(int)(cvGetSize(im).height*scale);
	    	im2=cvCreateImage( cvSize(maxWidth,newHeight), IPL_DEPTH_8U, 3 );
	    	cvResize(im,im2);
		resized=true;
	}else{
		im2=im;
	}
  	frame1.setImage(im2);

	if (cmdLine.HasSwitch("-pb")){
		strcpy (tmp, cmdLine.GetArgument("-pb", 0).c_str());
		frame1.readPbBoundary(tmp);
	}else{

		// Edge detection!
		if (cmdLine.HasSwitch("-sobel"))
	  		frame1.edgeSobel();
		else{
	#ifdef CUDA_SUPPORT
			if(!get_lock()){
				fprintf(stderr,"Impossible to get the lock...\n");
				exit(1);
			}
			frame1.edgeGPU(false);
			if(!release_lock()){
				fprintf(stderr,"Impossible to release the lock...\n");
				exit(1);
			}
	#else
			frame1.edgeCGTG();
	#endif
		}

		tic2=cvGetTickCount();

		if (cmdLine.HasSwitch("-flow")){
			strcpy (tmp, cmdLine.GetArgument("-flow", 0).c_str());
			IplImage *flow=cvLoadImage(tmp);
			IplImage *flow32 = cvCreateImage(cvGetSize(flow), IPL_DEPTH_32F,3);
			IplImage *flowU = cvCreateImage(cvGetSize(flow), IPL_DEPTH_32F,1);
	  		IplImage *flowV = cvCreateImage(cvGetSize(flow), IPL_DEPTH_32F,1);
			cvConvertScale(flow, flow32, 40/255.,-20);
			cvSplit(flow32,flowU,NULL,NULL,NULL);
			cvSplit(flow32,NULL,flowV,NULL,NULL);
			frame1.setU(flowU);
			frame1.setV(flowV);
			cvReleaseImage(&flow);
			cvReleaseImage(&flow32);
		}


		frame1.generatePbBoundary();
	}
	
	if (cmdLine.HasSwitch("-pbOnly")){
		strcpy (tmp, cmdLine.GetArgument("-o", 0).c_str());
		frame1.savePbBoundary(tmp);
	}else{
		frame1.allocateMemForContours();// Don't forget to allocate memory to store the region contours.
		//select fixation point!
		if(cmdLine.HasSwitch("-pos")){
			float x,y;
			sscanf(cmdLine.GetArgument("-pos", 0).c_str(),"%f",&x);
			sscanf(cmdLine.GetArgument("-pos", 1).c_str(),"%f",&y);
			frame1.assignFixPt((int)(x*scale), (int)(y*scale));
		}else{
			strcpy (tmp, cmdLine.GetArgument("-f", 0).c_str());
			frame1.readFixPts(tmp,scale);
		}
		//segment
		frame1.segmentAllFixs();		
		
		tic3=cvGetTickCount();

		//display!
		//frame1.displayCurrSegs(-1);
		strcpy (tmp, cmdLine.GetArgument("-o", 0).c_str());
		//sprintf(tmp,"%s/",tmp);
		if(resized)
			frame1.saveResizedRegions(tmp,cvGetSize(im).width,cvGetSize(im).height);
		else
			frame1.saveRegions(tmp);
		//release memory!
		frame1.deallocateMemForContours();
	}

	tic4=cvGetTickCount();
	printf("\n\nTotal time = %f\n",(tic4-tic1)/ticFrequency);
	if(!cmdLine.HasSwitch("-pb"))
		printf("\t edges detection = %f\n",(tic2-tic1)/ticFrequency);
	if(!cmdLine.HasSwitch("-pbOnly"))
		printf("\t segmentation = %f\n",(tic3-tic2)/ticFrequency);
  	return 0;
	
}
Exemplo n.º 27
0
int main(int argc, char ** argv)
{
    const char* filename = argc >=2 ? argv[1] : "lena.jpg";
    IplImage * im;

    IplImage * realInput;
    IplImage * imaginaryInput;
    IplImage * complexInput;
    int dft_M, dft_N;
    CvMat* dft_A, tmp;
    IplImage * image_Re;
    IplImage * image_Im;
    double m, M;

    im = cvLoadImage( filename, CV_LOAD_IMAGE_GRAYSCALE );
    if( !im )
        return -1;

    realInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    imaginaryInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 1);
    complexInput = cvCreateImage( cvGetSize(im), IPL_DEPTH_64F, 2);

    cvScale(im, realInput, 1.0, 0.0);
    cvZero(imaginaryInput);
    cvMerge(realInput, imaginaryInput, NULL, NULL, complexInput);

    dft_M = cvGetOptimalDFTSize( im->height - 1 );
    dft_N = cvGetOptimalDFTSize( im->width - 1 );

    dft_A = cvCreateMat( dft_M, dft_N, CV_64FC2 );
    image_Re = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);
    image_Im = cvCreateImage( cvSize(dft_N, dft_M), IPL_DEPTH_64F, 1);

    // copy A to dft_A and pad dft_A with zeros
    cvGetSubRect( dft_A, &tmp, cvRect(0,0, im->width, im->height));
    cvCopy( complexInput, &tmp, NULL );
    if( dft_A->cols > im->width )
    {
        cvGetSubRect( dft_A, &tmp, cvRect(im->width,0, dft_A->cols - im->width, im->height));
        cvZero( &tmp );
    }

    // no need to pad bottom part of dft_A with zeros because of
    // use nonzero_rows parameter in cvDFT() call below

    cvDFT( dft_A, dft_A, CV_DXT_FORWARD, complexInput->height );

    cvNamedWindow("win", 0);
    cvNamedWindow("magnitude", 0);
    cvShowImage("win", im);

    // Split Fourier in real and imaginary parts
    cvSplit( dft_A, image_Re, image_Im, 0, 0 );

    // Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
    cvPow( image_Re, image_Re, 2.0);
    cvPow( image_Im, image_Im, 2.0);
    cvAdd( image_Re, image_Im, image_Re, NULL);
    cvPow( image_Re, image_Re, 0.5 );

    // Compute log(1 + Mag)
    cvAddS( image_Re, cvScalarAll(1.0), image_Re, NULL ); // 1 + Mag
    cvLog( image_Re, image_Re ); // log(1 + Mag)


    // Rearrange the quadrants of Fourier image so that the origin is at
    // the image center
    cvShiftDFT( image_Re, image_Re );

    cvMinMaxLoc(image_Re, &m, &M, NULL, NULL, NULL);
    cvScale(image_Re, image_Re, 1.0/(M-m), 1.0*(-m)/(M-m));
    cvShowImage("magnitude", image_Re);

    cvWaitKey(-1);
    return 0;
}
Exemplo n.º 28
0
ReturnType HandsMotionTracking::onExecute()
{
	// 영상을 Inport로부터 취득
	opros_any *pData = ImageIn.pop();
	RawImage result;

	//아웃 데이터
	std::vector<PositionDataType> data;


	if(pData != NULL){
		
		// 포트로 부터 이미지 취득
		RawImage Image = ImageIn.getContent(*pData);
		RawImageData *RawImage = Image.getImage();


		// 현재영상의 크기를 취득
		m_in_width = RawImage->getWidth();
		m_in_height = RawImage->getHeight();

		// 메모리 한번 해제해주고
		if(m_image_buff			!= NULL)
			cvReleaseImage(&m_image_buff);
		if(m_image_dest         != NULL)
			cvReleaseImage(&m_image_dest);
		if(m_image_dest2        != NULL)
			cvReleaseImage(&m_image_dest2);

		if(m_image_th			!= NULL)
			cvReleaseImage(&m_image_th);
		if(m_image_th2			!= NULL)
			cvReleaseImage(&m_image_th2);

		// 이미지용 메모리 할당
        m_image_buff   = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);//원본 이미지
		m_image_dest   = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);
		m_image_dest2  = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 3);

		m_image_th     = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);//영역 추출 이미지
		m_image_th2    = cvCreateImage(cvSize(m_in_width, m_in_height), IPL_DEPTH_8U, 1);//영역 추출 이미지
		
		
		if(!video_flag)
		{
			std::string cpath = getProperty("opros.component.dir");
			std::string file = getProperty("VideoFile");
			if (file == "") file = "sample.avi";

			std::string path = cpath + file;

			m_video	= NULL;
			m_video = cvCreateFileCapture(path.c_str()); //비디오
			video_flag = true;// 비디오가 계속 새로 재생됨을 방지
			
		}

		// 영상에 대한 정보를 확보!memcpy
		memcpy(m_image_buff->imageData, RawImage->getData(), RawImage->getSize());

		// 출력용
		cvCopy(m_image_buff, m_image_dest, 0);

		// 색상 분리용 이미지
		IplImage* m_image_YCrCb = cvCreateImage(cvGetSize(m_image_buff), IPL_DEPTH_8U, 3);
		IplImage* m_Y  = cvCreateImage(cvGetSize(m_image_buff), IPL_DEPTH_8U, 1);
		IplImage* m_Cr = cvCreateImage(cvGetSize(m_image_buff), IPL_DEPTH_8U, 1);
		IplImage* m_Cb = cvCreateImage(cvGetSize(m_image_buff), IPL_DEPTH_8U, 1);

		cvCvtColor(m_image_buff, m_image_YCrCb, CV_RGB2YCrCb);   //RGB - > YCrCV 변환

		cvSplit(m_image_YCrCb, m_Y, m_Cr, m_Cb, NULL);   //채널 분리

		//추출이 필요한 영역 픽셀 데이터 저장 변수
		unsigned char m_Cr_val = 0;			
		unsigned char m_Cb_val = 0;

		// 살색추출
		for(int i=0;i<m_image_buff->height;i++)            
		{
			for(int j=0;j<m_image_buff->width;j++)
			{  
				//Cr 영역과 Cb 영역 추출
				m_Cr_val = (unsigned char)m_Cr->imageData[i*m_Cr->widthStep+j];
				m_Cb_val = (unsigned char)m_Cb->imageData[i*m_Cb->widthStep+j];

				//살색에 해당하는 영역인지 검사
				if( (77 <= m_Cr_val) && (m_Cr_val <= 127) && (133 <= m_Cb_val) && (m_Cb_val <= 173) )
				{
					// 살색부분은 하얀색
					m_image_buff->imageData[i*m_image_buff->widthStep+j*3+0] = (unsigned char)255; 
					m_image_buff->imageData[i*m_image_buff->widthStep+j*3+1] = (unsigned char)255;
					m_image_buff->imageData[i*m_image_buff->widthStep+j*3+2] = (unsigned char)255; 
				}
				else
				{ 
					// 나머지는 검정색
					m_image_buff->imageData[i*m_image_buff->widthStep+j*3+0]= 0;
					m_image_buff->imageData[i*m_image_buff->widthStep+j*3+1]= 0;
					m_image_buff->imageData[i*m_image_buff->widthStep+j*3+2]= 0;
				}
			}
		}
		
		//살색 추출한 영상을 이진화
		cvCvtColor(m_image_buff, m_image_th, CV_RGB2GRAY); 

		//잡영 제거를 위한 연산
		cvDilate (m_image_th, m_image_th, NULL, 2);//팽창
		cvErode  (m_image_th, m_image_th, NULL, 2);//침식	

		//변수 및 이미지 메모리 초기화
		int temp_num = 0;
		int StartX , StartY, EndX , EndY;
		int nNumber = 0;
		m_nThreshold	= 100;

		if( m_rec_out != NULL )
		{
			delete m_rec_out;

			m_rec_out	= NULL;
			m_nBlobs_out	= _DEF_MAX_BLOBS;
		}
		else
		{
			m_rec_out	= NULL;
			m_nBlobs_out	= _DEF_MAX_BLOBS;
		}
	
		if( m_image_th2 != NULL )	
			cvReleaseImage( &m_image_th2 );

		//레이블링 할 영상 따로 생성
		m_image_th2			= cvCloneImage( m_image_th );

		//레이블링 할 이미지의 크기 저장
		int nWidth	= m_image_th2->width;
		int nHeight = m_image_th2->height;

		//해당 영상 크기만큼 버프 설정
		unsigned char* tmpBuf = new unsigned char [nWidth * nHeight];

		for(int j=0; j<nHeight ;j++)	
			for(int i=0; i<nWidth ;i++)	
				//전 픽셀 순회
				tmpBuf[j*nWidth+i] = (unsigned char)m_image_th2->imageData[j*m_image_th2->widthStep+i];
		
////// 레이블링을 위한 포인트 초기화

		m_vPoint_out = new Visited [nWidth * nHeight];
		
		for(int nY = 0; nY < nHeight; nY++)
		{
			for(int nX = 0; nX < nWidth; nX++)
			{
				m_vPoint_out[nY * nWidth + nX].bVisitedFlag		= FALSE;
				m_vPoint_out[nY * nWidth + nX].ptReturnPoint.x	= nX;
				m_vPoint_out[nY * nWidth + nX].ptReturnPoint.y	= nY;
			}
		}

////// 레이블링 수행
		for(int nY = 0; nY < nHeight; nY++)
		{
			for(int nX = 0; nX < nWidth; nX++)
			{
				if(tmpBuf[nY * nWidth + nX] == 255)		// Is this a new component?, 255 == Object
				{
					temp_num++;

					tmpBuf[nY * nWidth + nX] = temp_num;
					
					StartX = nX, StartY = nY, EndX = nX, EndY= nY;

					__NRFIndNeighbor(tmpBuf, nWidth, nHeight, nX, nY, &StartX, &StartY, &EndX, &EndY, m_vPoint_out);

					if(__Area(tmpBuf, StartX, StartY, EndX, EndY, nWidth, temp_num) < m_nThreshold)
					{
		 				for(int k = StartY; k <= EndY; k++)
						{
							for(int l = StartX; l <= EndX; l++)
							{
								if(tmpBuf[k * nWidth + l] == temp_num)
									tmpBuf[k * nWidth + l] = 0;
							}
						}
						--temp_num;

						if(temp_num > 250)
							temp_num = 0;
					}
				}
			}
		}
		// 포인트 메모리 해제
		delete m_vPoint_out;

		//결과 보존
		nNumber = temp_num;
		
		//레이블링 수만큼 렉트 생성
		if( nNumber != _DEF_MAX_BLOBS )		
			m_rec_out = new CvRect [nNumber];
	
		//렉트 만들기
			if( nNumber != 0 )	
				DetectLabelingRegion(nNumber, tmpBuf, nWidth, nHeight,m_rec_out);

				for(int j=0; j<nHeight; j++)
					for(int i=0; i<nWidth ; i++)
						m_image_th2->imageData[j*m_image_th2->widthStep+i] = tmpBuf[j*nWidth+i];
		
				delete tmpBuf;
	
		//레이블링 수 보존		
		m_nBlobs_out = nNumber;
	
		//레이블링 영역 거르기
		int nMaxWidth	= m_in_height * 9 / 10;	   // 영상 가로 전체 크기의 90% 이상인 레이블은 제거
		int nMaxHeight	= m_in_width  * 9 / 10;	   // 영상 세로 전체 크기의 90% 이상인 레이블은 제거

		//최소영역과 최대영역 지정- 화면 크기에 영향 받음..
		_BlobSmallSizeConstraint( 5, 150, m_rec_out, &m_nBlobs_out);
		_BlobBigSizeConstraint(nMaxWidth, nMaxHeight,m_rec_out, &m_nBlobs_out);

		//앞으로 쓸 메모리 등록
		storage1 = cvCreateMemStorage(0);
		storage2 = cvCreateMemStorage(0);

		//변수 초기화
		CvPoint point;
		CvSeq* seq[10];
		CvSeq* hull;
		CvPoint end_pt;
		CvPoint center;

		//내보낼 데이터 초기화
		outData[0].x = 0, outData[0].y  = 0;
		outData[1].x = 0, outData[1].y  = 0;
		outData[2].x = 0, outData[2].y  = 0;

		int num = 0;
		int temp_x = 0;
		int temp_y = 0;
		int rect = 0;
		
		//만일을 대비하여 준비한 시퀸스 배열의 크기를 초과하지 않도록 조절
		//일단 한곳에서만 영상이 나오도록 조절..
		if(m_nBlobs_out > 1)
		{
			m_nBlobs_out = 1;
		}

		//레이블링 영역 내의 처리 시작 
		for( int i=0; i <  m_nBlobs_out; i++ )
		{
			//사각형 그리기에 필요한 두점 저장
			CvPoint	pt1 = cvPoint(	m_rec_out[i].x, m_rec_out[i].y );
			CvPoint pt2 = cvPoint(	pt1.x + m_rec_out[i].width,pt1.y + m_rec_out[i].height );

			// 컬러값 설정
			CvScalar color	= cvScalar( 0, 0, 255 );

			//레이블 사각형 그리기 - 확인용
			//cvDrawRect( m_image_dest, pt1, pt2, color);
			
			//레이블을 관심영역으로 지정할 이미지 생성
			temp_mask = cvCreateImage(cvSize(m_rec_out[i].width, m_rec_out[i].height),8,1);
			temp_mask2 = cvCreateImage(cvSize(m_rec_out[i].width, m_rec_out[i].height),8,1);
			
			//관심영역 지정
			cvSetImageROI(m_image_th, m_rec_out[i]);
				
			//관심영역 추출
			cvCopy(m_image_th, temp_mask, 0);

			//관심영역 해제
			cvResetImageROI(m_image_th);

			
			//관심영역 내의 오브젝트 처리를 위한 시퀸스 생성
			seq[i] = cvCreateSeq(CV_SEQ_KIND_GENERIC | CV_32SC2,sizeof(CvContour),sizeof(CvPoint), storage1);
			
			//관심영역에서 추출한이미지의 흰색 픽셀값으로 시퀸스 생성
			for(int j =0; j < temp_mask ->height ; j++)
			{
				for(int k = 0; k < temp_mask ->width; k++)
				{				
					if((unsigned char)temp_mask->imageData[j*temp_mask->widthStep+k] == 255)
					{
						point.x = k;		//흰색 픽셀 x좌표 저장
						point.y = j;		//흰색 픽셀 y좌표 저장
						cvSeqPush(seq[i], &point);	//시퀸스 구조체에 해당 좌표 삽입
						temp_x += point.x; //좌표 누적
						temp_y += point.y; //좌표 누적
						num++;             //픽셀 수 카운트

					}	
				}
			} 
			
			//좌표 초기화
			point.x				= 0;
			point.y				= 0;
			end_pt.x			= 0;
			end_pt.y			= 0;
			center.x			= 0;
			center.y			= 0;
			
			CvPoint dist_pt;			//중심점과의 최대거리를 찾을 컨백스헐 저장
			double fMaxDist		= 0;    //중심점과의 최대거리 저장
			double fDist		= 0;	//거리계산에 사용
		
	
			//중심점 찾기 - 픽셀의  평균값 찾기
			if(num != 0)
			{
				center.x			= (int)temp_x/num; //평균 좌표값 구하기
				center.y			= (int)temp_y/num; //평균 좌표값 구하기
			}

			//관심영역 설정
			cvSetImageROI(m_image_dest, m_rec_out[i]);

/////////컨백스헐 그리기////////
			if(seq[i]->total !=0)
			{	
				//컨백스헐 구하기
				hull = cvConvexHull2(seq[i], 0, CV_COUNTER_CLOCKWISE, 0);	
				point = **CV_GET_SEQ_ELEM(CvPoint*, hull,hull->total-1);

				//구한 컨백스헐 라인으로 그리기
				for(int x = 0; x < hull->total; x++)
				{
					CvPoint hull_pt = **CV_GET_SEQ_ELEM(CvPoint*, hull,x);

					//컨백스헐 라인 그리기
					//cvLine(m_image_dest, point, hull_pt, CV_RGB(255, 255, 0 ),2, 8);
					point = hull_pt;

					//최대 거리 구하기
					dist_pt =  **CV_GET_SEQ_ELEM(CvPoint*, hull,x);

					fDist = sqrt((double)((center.x - dist_pt.x) * (center.x - dist_pt.x) 
						+ (center.y - dist_pt.y) * (center.y - dist_pt.y)));

					if(fDist > fMaxDist)
					{
						max_pt = dist_pt;
						fMaxDist = fDist;
					}
				}
			}
			

			//중심점그리기
			cvCircle(m_image_dest,center,5, CV_RGB(0,0,255), 5);

			//내보낼 중심점 데이터 저장
			outData[0].x = center.x;
			outData[0].y = center.y;
	
			
////////마스크 만들기///////

			//중심점을 기준으로 그릴 마스크 이미지 생성
			circle_mask = cvCreateImage(cvGetSize(temp_mask), 8, 1);
			
			//바탕은 검은색으로
			cvSetZero(circle_mask);
			
			//흰색 원 - 손 영상과의 연산을 위해 바이너리 이미지에 그리기
			int radi = (int)m_rec_out[i].height/2.9; // 원 크기 수동조절..

			//흰색 원과 흰색 네모로 구성된 마스크 영상 생성을 위한 그리기
			cvCircle(circle_mask, center, radi, CV_RGB(255,255,255),CV_FILLED);
			cvDrawRect(circle_mask, cvPoint(center.x - radi, center.y),cvPoint(center.x + radi, pt2.y),
				 CV_RGB(255,255,255),CV_FILLED);

			//마스크 추출
			cvSub(temp_mask, circle_mask, temp_mask, 0);

	
///////관심영역 레이블링 - 손가락 끝 추출//////

			//변수 및 이미지 메모리 초기화
			int temp_num_in = 0;
			int StartX_in , StartY_in, EndX_in , EndY_in;
			int nNumber_in = 0;
			m_nThreshold_in	= 10;

			if( m_rec_in != NULL )
			{
				delete m_rec_in;

				m_rec_in	= NULL;
				m_nBlobs_in	= _DEF_MAX_BLOBS;
			}
			else
			{
				m_rec_in	= NULL;
				m_nBlobs_in	= _DEF_MAX_BLOBS;
			}

			if( temp_mask2 != NULL )	
				cvReleaseImage( &temp_mask2 );

			temp_mask2			= cvCloneImage( temp_mask );	

			//들어온 이미지의 크기 저장
			int nWidth	= temp_mask2->width;
			int nHeight = temp_mask2->height;
		
			//영상 크기만큼 버프 설정
			unsigned char* tmpBuf_in = new unsigned char [nWidth * nHeight];

			for(int j=0; j<nHeight ;j++)	
				for(int i=0; i<nWidth ;i++)
					//전 픽셀 순회
					tmpBuf_in[j*nWidth+i] = (unsigned char)temp_mask2->imageData[j*temp_mask2->widthStep+i];
				
	
		/////// 레이블링을 위한 포인트 초기화 ////////
			
			m_vPoint_in = new Visited [nWidth * nHeight];

			for(int nY = 0; nY < nHeight; nY++)
			{
				for(int nX = 0; nX < nWidth; nX++)
				{
					m_vPoint_in[nY * nWidth + nX].bVisitedFlag		= FALSE;
					m_vPoint_in[nY * nWidth + nX].ptReturnPoint.x	= nX;
					m_vPoint_in[nY * nWidth + nX].ptReturnPoint.y	= nY;
				}
			}

			////레이블링 수행
			for(int nY = 0; nY < nHeight; nY++)
			{
				for(int nX = 0; nX < nWidth; nX++)
				{
					if(tmpBuf_in[nY * nWidth + nX] == 255)		// Is this a new component?, 255 == Object
					{
						temp_num_in++;

						tmpBuf_in[nY * nWidth + nX] = temp_num_in;
						
						StartX_in = nX, StartY_in = nY, EndX_in = nX, EndY_in= nY;

						__NRFIndNeighbor(tmpBuf_in, nWidth, nHeight, nX, nY, 
							&StartX_in, &StartY_in, &EndX_in, &EndY_in,m_vPoint_in);

						if(__Area(tmpBuf_in, StartX_in, StartY_in, EndX_in, EndY_in, nWidth, temp_num_in) < m_nThreshold_in)
						{
		 					for(int k = StartY_in; k <= EndY_in; k++)
							{
								for(int l = StartX_in; l <= EndX_in; l++)
								{
									if(tmpBuf_in[k * nWidth + l] == temp_num_in)
										tmpBuf_in[k * nWidth + l] = 0;
								}
							}
							--temp_num_in;

							if(temp_num_in > 250)
								temp_num_in = 0;
						}
					}
				}
			}
			// 포인트 메모리 해제
			delete m_vPoint_in;

			//레이블링 수 보존
			nNumber_in = temp_num_in;

			if( nNumber_in != _DEF_MAX_BLOBS )		
				m_rec_in = new CvRect [nNumber_in];
		
				if( nNumber_in != 0 )	
					DetectLabelingRegion(nNumber_in, tmpBuf_in, nWidth, nHeight,m_rec_in);
				
				for(int j=0; j<nHeight; j++)
					for(int i=0; i<nWidth ; i++)
						temp_mask2->imageData[j*temp_mask2->widthStep+i] = tmpBuf_in[j*nWidth+i];
			
					delete tmpBuf_in;
				
			m_nBlobs_in = nNumber_in;

			//최소영역과 최대영역 설정
			_BlobSmallSizeConstraint( 5, 5, m_rec_in, &m_nBlobs_in);
			_BlobBigSizeConstraint( temp_mask2->width, temp_mask2->height,m_rec_in, &m_nBlobs_in);

			//선언 및 초기화
			CvPoint center_in;
			CvPoint point_in;
			
			point_in.x = 0;
			point_in.y = 0;
			center_in.x = 0;
			center_in.x = 0;
			CvSeq* seq_in[20];

			//준비한 시퀸스 배열크기를 초과하지 않도록 조절
			if(m_nBlobs_in > 20)
			{
				m_nBlobs_in =20;
			}

			for( int ni =0; ni <  m_nBlobs_in; ni++ )
			{		
				//사각형 그리기에 필요한 두 점 저장
				CvPoint	pt1 = cvPoint(	m_rec_in[ni].x, m_rec_in[ni].y );
				CvPoint pt2 = cvPoint(	pt1.x + m_rec_in[ni].width,pt1.y + m_rec_in[ni].height );

				//색상값 설정
				CvScalar color	= cvScalar( 255,0 , 255 );
				
				//레이블 사각형 그리기
				//cvDrawRect( m_image_dest, pt1, pt2, color);
				
				//처리할 손끝 마스크 생성할 메모리 할당
				in_mask = cvCreateImage(cvSize(m_rec_in[ni].width, m_rec_in[ni].height),8,1);

				//관심영역 설정
				cvSetImageROI(temp_mask, m_rec_in[ni]);
				
				//필요한 영역 복사
				cvCopy(temp_mask, in_mask, 0);

				//관심영역 해제
				cvResetImageROI(temp_mask);

				//관심영역 내의 오브젝트 처리를 위한 시퀸스 생성
				seq_in[ni] = cvCreateSeq(CV_SEQ_KIND_GENERIC | CV_32SC2,sizeof(CvContour),sizeof(CvPoint), storage2);

				//초기화
				int temp_x_in = 0;
				int temp_y_in = 0;	
				int num_in = 0;
				
				//관심영역에서 추출한이미지의 흰색 픽셀값으로 시퀸스 생성
				for(int j =0; j < in_mask ->height ; j++)
				{
					for(int k = 0; k < in_mask ->width; k++)
					{				
						if((unsigned char)in_mask->imageData[j*in_mask->widthStep+k] == 255)
						{
							point_in.x = k;		//흰색 픽셀 x좌표 저장
							point_in.y = j;		//흰색 픽셀 y좌표 저장
							cvSeqPush(seq_in[ni], &point_in);	//시퀸스 구조체에 해당 좌표 삽입
							temp_x_in += point_in.x; //좌표 누적
							temp_y_in += point_in.y; //좌표 누적
							num_in++;             //픽셀 수 카운트

						}

					}
				}
				
				//초기화
				max_pt_in.x = 0;
				max_pt_in.y = 0;					
				double fMaxDist_in	= 0;
				double fDist_in	= 0;
			
				//중심점 찾기 - 픽셀의  평균값 찾기
				if(num_in != 0)
				{
					center_in.x			= (int)temp_x_in/num_in + pt1.x; //평균 좌표값 구하기
					center_in.y			= (int)temp_y_in/num_in + pt1.y; //평균 좌표값 구하기
					
				}

				//우선 끝점이 2개일때만..
				if(m_nBlobs_in == 2)  
				{	
					//초기화
					finger_pt[ni].x = NULL;
					finger_pt[ni].y = NULL;
					finger_pt[ni].x = NULL;
					finger_pt[ni].y = NULL;

					if(seq_in[ni]->total !=0)
					{	
						//컨백스헐 구하기 - 윤곽선의 좌표 정보 겟
						CvSeq* hull_in = cvConvexHull2(seq_in[ni], 0, CV_COUNTER_CLOCKWISE, 0);	
						//point_in = **CV_GET_SEQ_ELEM(CvPoint*, hull_in,hull_in->total-1);


						//구한 컨백스헐 라인으로 그리기
						for(int nx = 0; nx < hull_in->total; nx++)
						{
							CvPoint hull_pt_in = **CV_GET_SEQ_ELEM(CvPoint*, hull_in,nx);
							hull_pt_in.x = hull_pt_in.x  + pt1.x;
							hull_pt_in.y = hull_pt_in.y + pt1.y;

							//중심점과 해당영역의 컨백스 헐 지점간의 거리 계산
							fDist_in = sqrt((double)((center.x - hull_pt_in.x) * (center.x - hull_pt_in.x) 
								+ (center.y - hull_pt_in.y) * (center.y - hull_pt_in.y)));

							//거리가 먼 점 찾기
							if(fDist_in > fMaxDist_in)
							{
								max_pt_in = hull_pt_in;
								fMaxDist_in = fDist_in;
								
							}
						}
					}				
				
					//최대점 보존
					finger_pt[ni].x = max_pt_in.x ;
					finger_pt[ni].y = max_pt_in.y ;
						
					//관심영역 해제할 경우의 값으로 보정
					finger_pt[ni].x = finger_pt[ni].x + m_rec_out[i].x;
					finger_pt[ni].y = finger_pt[ni].y + m_rec_out[i].y;		
				}
Exemplo n.º 29
0
//---------------------------------------------------------------
//【関数名 】:cv_ColorExtraction
//【処理概要】:色抽出
//【引数  】:src_img        = 入力画像(8bit3ch)
//      :dst_img        = 出力画像(8bit3ch)
//      :code        = 色空間の指定(CV_BGR2HSV,CV_BGR2Labなど)
//      :ch1_lower    = ch1のしきい値(小)
//      :ch1_upper    = ch1のしきい値(大)
//      :ch2_lower    = ch2のしきい値(小)
//      :ch2_upper    = ch2のしきい値(大)
//      :ch3_lower    = ch3のしきい値(小)
//      :ch3_upper    = ch3のしきい値(大)
//【戻り値 】:なし
//【備考  】:lower <= upperの場合、lower以上upper以下の範囲を抽出、
//      :lower >  upperの場合、upper以下lower以上の範囲を抽出します。
//---------------------------------------------------------------
void cv_ColorExtraction(IplImage* src_img, IplImage* dst_img,
                        int code,
                        int ch1_lower, int ch1_upper,
                        int ch2_lower, int ch2_upper,
                        int ch3_lower, int ch3_upper
                       ) {

    int i, k;

    IplImage *Color_img;
    IplImage *ch1_img, *ch2_img, *ch3_img;
    IplImage *Mask_img;

    int lower[3];
    int upper[3];
    int val[3];

    CvMat *lut;

    //codeに基づいたカラー変換
    Color_img = cvCreateImage(cvGetSize(src_img), src_img->depth, src_img->nChannels);
    cvCvtColor(src_img, Color_img, code);

    //3ChのLUT作成
    lut    = cvCreateMat(256, 1, CV_8UC3);

    lower[0] = ch1_lower;
    lower[1] = ch2_lower;
    lower[2] = ch3_lower;

    upper[0] = ch1_upper;
    upper[1] = ch2_upper;
    upper[2] = ch3_upper;

    for (i = 0; i < 256; i++) {
        for (k = 0; k < 3; k++) {
            if (lower[k] <= upper[k]) {
                if ((lower[k] <= i) && (i <= upper[k])) {
                    val[k] = 255;
                } else {
                    val[k] = 0;
                }
            } else {
                if ((i <= upper[k]) || (lower[k] <= i)) {
                    val[k] = 255;
                } else {
                    val[k] = 0;
                }
            }
        }
        //LUTの設定
        cvSet1D(lut, i, cvScalar(val[0], val[1], val[2]));
    }

    //3ChごとのLUT変換(各チャンネルごとに2値化処理)
    cvLUT(Color_img, Color_img, lut);
    cvReleaseMat(&lut);

    //各チャンネルごとのIplImageを確保する
    ch1_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);
    ch2_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);
    ch3_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);

    //チャンネルごとに二値化された画像をそれぞれのチャンネルに分解する
    cvSplit(Color_img, ch1_img, ch2_img, ch3_img, NULL);

    //3Ch全てのANDを取り、マスク画像を作成する。
    Mask_img = cvCreateImage(cvGetSize(Color_img), Color_img->depth, 1);
    cvAnd(ch1_img, ch2_img, Mask_img);
    cvAnd(Mask_img, ch3_img, Mask_img);

    //入力画像(src_img)のマスク領域を出力画像(dst_img)へコピーする
    cvZero(dst_img);
    cvCopy(src_img, dst_img, Mask_img);

    //解放
    cvReleaseImage(&Color_img);
    cvReleaseImage(&ch1_img);
    cvReleaseImage(&ch2_img);
    cvReleaseImage(&ch3_img);
    cvReleaseImage(&Mask_img);

}
Exemplo n.º 30
0
void GuiInit(char *guiFilename) {
	GtkBuilder      *builder; 
	GtkWidget       *window;
   	GtkLabel	*labelStatus;

	int 		numBins = 256;
	float 		range[] = {0, 255};
	float 		*ranges[] = { range };

    	builder = gtk_builder_new();
    	gtk_builder_add_from_file(builder, guiFilename, NULL);
	//Assign all the widgets
    	window = GTK_WIDGET(gtk_builder_get_object(builder, "window"));
    	imageMain = GTK_WIDGET(gtk_builder_get_object(builder, "imageMain"));
    	
    	imageRedHist = GTK_WIDGET(gtk_builder_get_object(builder, "imageRedHist"));
    	imageGreenHist = GTK_WIDGET(gtk_builder_get_object(builder, "imageGreenHist"));
    	imageBlueHist = GTK_WIDGET(gtk_builder_get_object(builder, "imageBlueHist"));

	labelStatus = GTK_WIDGET(gtk_builder_get_object(builder, "labelStatus"));
    	adjHistLow = GTK_WIDGET(gtk_builder_get_object(builder, "adjHistLow"));
    	adjHistHigh = GTK_WIDGET(gtk_builder_get_object(builder, "adjHistHigh"));
	txtMinHist = GTK_WIDGET(gtk_builder_get_object(builder, "txtMinHist"));
	txtMaxHist = GTK_WIDGET(gtk_builder_get_object(builder, "txtMaxHist"));
	//Assign all the function calls
	gtk_builder_connect_signals(builder, NULL);
	//unreference the builder object
    	g_object_unref(G_OBJECT(builder));

	gtk_widget_show(window);               

	gtk_label_set_text(labelStatus, ""); 
	//convert stacked image to 8bit image
	imgStack8bit = AdjustHistogram(imgStack, (float)0.0, (float)255.0);

	//Generate histogram from 8bit stacked image
	
	hist = cvCreateHist(1, &numBins, CV_HIST_ARRAY, ranges, 1);
	cvClearHist(hist);
	
	imgRed = cvCreateImage(cvGetSize(imgStack8bit), 8, 1);
	imgGreen = cvCreateImage(cvGetSize(imgStack8bit), 8, 1);
	imgBlue = cvCreateImage(cvGetSize(imgStack8bit), 8, 1);

	cvSplit(imgStack8bit, imgBlue, imgGreen, imgRed, NULL);

	cvCalcHist(&imgRed, hist, 0, 0);
	imgHistRed = DrawHistogram(hist,1,1);
	cvClearHist(hist);

	cvCalcHist(&imgGreen, hist, 0, 0);
	imgHistGreen = DrawHistogram(hist,1,1);
	cvClearHist(hist);
 
	cvCalcHist(&imgBlue, hist, 0, 0);
	imgHistBlue = DrawHistogram(hist,1,1);
 	cvClearHist(hist);
	
	//Display the histogram images
	UpdateRedHistImage(imgHistRed);
	UpdateGreenHistImage(imgHistGreen);	 
	UpdateBlueHistImage(imgHistBlue);	 

	//Update the main image with the scaled stack image
	UpdateMainImage(imgStack8bit);	 

		 
	//Enter the GUI main
	gtk_main();
}