コード例 #1
0
ファイル: TamatarVision.cpp プロジェクト: etoy/TamatarTracker
void TamatarVision::update() {
    vidGrabber.grabFrame();
    if (vidGrabber.isFrameNew()) {
        // load image from videograbber
        colorImg.setFromPixels(vidGrabber.getPixels(), camWidth, camHeight);
        // convert to grayscale
        cvCvtColor( colorImg.getCvImage(), grayImg.getCvImage(), CV_RGB2GRAY );
        grayImg.flagImageChanged();
        
        // equalize histogram
        if (doHistEqualize) {
            cvEqualizeHist(grayImg.getCvImage(), grayImg.getCvImage() );
        }
        
        // `morphological opening`
        if (doMorphEx) {
            int anchor = morphExRadius / 2;
            structure = cvCreateStructuringElementEx(morphExRadius, morphExRadius, anchor, anchor, CV_SHAPE_ELLIPSE);
            cvCopy(grayImg.getCvImage(), grayImg2.getCvImage());
            cvMorphologyEx(grayImg2.getCvImage(), grayImg.getCvImage(), NULL, structure, CV_MOP_OPEN);
        }
        
        if (doSmoothing) {
            //grayImg2 = grayImg;
            //smoothSigmaColor=20;
            //smoothSigmaSpatial=20;
            //cvSmooth(grayImg2.getCvImage(), grayImg.getCvImage(), CV_BILATERAL, 9, 9, smoothSigmaColor, smoothSigmaSpatial);
            cvSmooth(grayImg.getCvImage(), grayImg.getCvImage(), CV_GAUSSIAN, 3, 3, 2, 2);
        }
        
        //grayImg.threshold(120);
        
        // threshold
        if (doThreshold) {
            //            grayImg.threshold(threshold);
            grayImg2 = grayImg;
            cvThreshold(grayImg2.getCvImage(), grayImg.getCvImage(), threshold, thresholdMax, CV_THRESH_TOZERO);
            //   cvAdaptiveThreshold(grayImg2.getCvImage(), grayImg.getCvImage(), threshold, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_, 3, 5);
        }
        
        if (doCanny) {
            cvCanny(grayImg.getCvImage(), grayImg.getCvImage(), cannyThres1, cannyThres2, 3);
        }
        
        //cvCanny5grayImg.getCvImage(),grayImg.getCvImage(), 120, 180, 3);
        //cvSobel(grayImg.getCvImage(), grayImg.getCvImage(), 1, 1, 3);
        if (doCircles) {
            CvMemStorage* storage = cvCreateMemStorage(0);
            circles = cvHoughCircles(grayImg.getCvImage(), storage, CV_HOUGH_GRADIENT, 2, grayImg.getHeight()/4, circleEdgeThres, circleAccThres, circleMinRadius, circleMaxRadius);
        }
        
        if (doContours) {
            contourFinder.findContours(grayImg, 10, (camWidth*camHeight)/2, 20, false, true);
        }
    }        
}
コード例 #2
0
bool detect_and_draw( IplImage* srcImg, CvRect& roi)
{
    double scale = 1.1;
    IplImage* gray = cvCreateImage( cvSize(srcImg->width,srcImg->height), 8, 1 );
    IplImage* small_img = cvCreateImage( cvSize( cvRound (srcImg->width/scale),
                                         cvRound (srcImg->height/scale)),
                                         8, 1 );
    int i;

    cvCvtColor( srcImg, gray, CV_BGR2GRAY );
    cvResize( gray, small_img, CV_INTER_LINEAR );
    cvEqualizeHist( small_img, small_img );
    cvClearMemStorage( storage );

    if( cascade )
    {
        CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
                                            1.1, 2, 0/*CV_HAAR_DO_CANNY_PRUNING*/,
                                            cvSize(20, 20) );

        int index = 0;
        //must contain face
        if (faces->total==0)
        {
            printf("\n--Error with the face image: no face detected!\n");
            return false;
        }
        //more than one face
        else if (faces->total>1)
        {
            printf("\n--Warning with the face image: more than one face detected!\n");

            //get rect with max area
            double area = 0.0;
            for (int i = 0; i < faces->total; i++)
            {
                CvRect* tr = (CvRect*)cvGetSeqElem( faces, i );
                if(tr->height*tr->width > area)
                {
                    area = tr->height*tr->width;
                    index = i;
                }
            }
        }
        //get roi
        CvRect* r = (CvRect*)cvGetSeqElem( faces, index );
        roi.x = r->x*scale;
        roi.y = r->y*scale;
        roi.width = r->width*scale;
        roi.height = r->height*scale;
    }
    cvReleaseImage( &gray );
    cvReleaseImage( &small_img );
    return true;
}
コード例 #3
0
ファイル: MEHistogram.cpp プロジェクト: panpeter90/bgslibuse
void MEHistogramTransform::HistogramEqualize(MEImage& image)
{
  DiscreteStretchingDone = false;
  IplImage* cvDest8bitImg = NULL;
  IplImage* cvDestImg = NULL;

  switch (image.GetLayers())
  {
    case 1:
      // Grayscale image
      cvDest8bitImg = cvCreateImage(cvSize(image.GetWidth(), image.GetHeight()), 8, 1);
      cvEqualizeHist((IplImage*)image.GetIplImage(), cvDest8bitImg);
      image.SetIplImage((void*)cvDest8bitImg);
      cvReleaseImage(&cvDest8bitImg);
      break;

    case 3:
      // RGB image
      cvDestImg = cvCreateImage(cvSize(image.GetWidth(), image.GetHeight()), 8, 3);
      IplImage *cvR, *cvG, *cvB;

      cvR = cvCreateImage(cvSize(image.GetWidth(), image.GetHeight()), 8, 1);
      cvG = cvCreateImage(cvSize(image.GetWidth(), image.GetHeight()), 8, 1);
      cvB = cvCreateImage(cvSize(image.GetWidth(), image.GetHeight()), 8, 1);

      cvSplit((IplImage*)image.GetIplImage(), cvR, cvG, cvB, NULL);
      cvEqualizeHist(cvR, cvR);
      cvEqualizeHist(cvG, cvG);
      cvEqualizeHist(cvB, cvB);
      cvMerge(cvR, cvG, cvB, NULL, cvDestImg);

      image.SetIplImage((void*)cvDestImg);
      cvReleaseImage(&cvR);
      cvReleaseImage(&cvG);
      cvReleaseImage(&cvB);
      cvReleaseImage(&cvDestImg);
      break;

    default:
      break;
  }
}
コード例 #4
0
ファイル: porndetect.c プロジェクト: nuarlyss/porndetect
void detect_and_draw( IplImage* img, int muncul )
{
    static CvScalar colors[] = 
    {
        {{0,0,255}},
        {{0,128,255}},
        {{0,255,255}},
        {{0,255,0}},
        {{255,128,0}},
        {{255,255,0}},
        {{255,0,0}},
        {{255,0,255}}
    };

    double scale = 1.3;
    IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
    IplImage* small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
                         cvRound (img->height/scale)),
                     8, 1 );
    int i;

    cvCvtColor( img, gray, CV_BGR2GRAY );
    cvResize( gray, small_img, CV_INTER_LINEAR );
    cvEqualizeHist( small_img, small_img );
    cvClearMemStorage( storage );

    if( cascade )
    {
        double t = (double)cvGetTickCount();
        CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
                                            1.1, 2, 0/*CV_HAAR_DO_CANNY_PRUNING*/,
                                            cvSize(30, 30) );
        t = (double)cvGetTickCount() - t;
        //printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
	printf( "%s detected area = %d\n", input_name, faces->total);
        for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
            CvPoint center;
            int radius;
            center.x = cvRound((r->x + r->width*0.5)*scale);
            center.y = cvRound((r->y + r->height*0.5)*scale);
            radius = cvRound((r->width + r->height)*0.25*scale);
            cvCircle( img, center, radius, colors[i%8], CV_FILLED, 8, 0 );
        }
    }

    if(muncul==1) 
    {
      cvShowImage( "result", img );
    }
    cvReleaseImage( &gray );
    cvReleaseImage( &small_img );
}
コード例 #5
0
ファイル: main.cpp プロジェクト: MiaoChaoran/shiyan
void detect_and_draw(IplImage* img )
{
    double scale=1.2;
    static CvScalar colors[] = {
        {{0,0,255}},{{0,128,255}},{{0,255,255}},{{0,255,0}},
        {{255,128,0}},{{255,255,0}},{{255,0,0}},{{255,0,255}}
    };//Just some pretty colors to draw with

    //Image Preparation
    //
    IplImage* gray = cvCreateImage(cvSize(img->width,img->height),8,1);
    IplImage* small_img=cvCreateImage(cvSize(cvRound(img->width/scale),cvRound(img->height/scale)),8,1);
    cvCvtColor(img,gray, CV_BGR2GRAY);
    cvResize(gray, small_img, CV_INTER_LINEAR);

    cvEqualizeHist(small_img,small_img); //Ö±·½Í¼¾ùºâ

    //Detect objects if any
    //
    cvClearMemStorage(storage);
    double t = (double)cvGetTickCount();
    CvSeq* objects = cvHaarDetectObjects(small_img,
                                         cascade,
                                         storage,
                                         1.1,
                                         2,
                                         0/*CV_HAAR_DO_CANNY_PRUNING*/,
                                         cvSize(30,30));

    t = (double)cvGetTickCount() - t;
    printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );

    //Loop through found objects and draw boxes around them
    for(int i=0; i<(objects? objects->total:0); ++i)
    {
        CvRect* r=(CvRect*)cvGetSeqElem(objects,i);
        cvRectangle(img, cvPoint(r->x*scale,r->y*scale), cvPoint((r->x+r->width)*scale,(r->y+r->height)*scale), colors[i%8]);
    }
    for( int i = 0; i < (objects? objects->total : 0); i++ )
    {
        CvRect* r = (CvRect*)cvGetSeqElem( objects, i );
        CvPoint center;
        int radius;
        center.x = cvRound((r->x + r->width*0.5)*scale);
        center.y = cvRound((r->y + r->height*0.5)*scale);
        radius = cvRound((r->width + r->height)*0.25*scale);
        cvCircle( img, center, radius, colors[i%8], 3, 8, 0 );
    }

    cvShowImage( "result", img );
    cvReleaseImage(&gray);
    cvReleaseImage(&small_img);
}
コード例 #6
0
ファイル: facevue.cpp プロジェクト: VIVAlab/FaceVue
//Return regions of detected faces
vector<Rect> FaceVue::detect_ALLFacesROI(const IplImage *frame,IplImage* input)
{
    cvCvtColor(frame,input,CV_RGB2GRAY);
    std::vector<Rect> faces;
    cvEqualizeHist(input,input);

    /// TO DO What is 1.25,2, Size(30,30)
    detection_Model->detectMultiScale( input, faces, 1.25, 2, Size(30, 30) );

    target_Face->index = -1;

    return faces;
}
コード例 #7
0
ファイル: adaboostDetect.cpp プロジェクト: Riseley/Drone
int adaboostDetect::detectAndDraw(IplImage* img, CvRect** regions) {
    double t = (double) cvGetTickCount();
    int fii = 0;
    IplImage* gray = cvCreateImage(cvSize(img->width, img->height), 8, 1);
    IplImage* smallImg = cvCreateImage( cvSize( cvRound (img->width/scaleFactor),
                                               cvRound (img->height/scaleFactor)), 8, 1 );
    cvCvtColor(img, gray, CV_BGR2GRAY);
    cvResize(gray, smallImg,CV_INTER_LINEAR);
    cvEqualizeHist(smallImg, smallImg);
    cvClearMemStorage(storage);
    
    int nx1, nx2, ny1, ny2;
    CvRect* nR;
    
    if (!cascade) {
        return 0;
    }
    
    CvSeq* faces = cvHaarDetectObjects( smallImg, cascade, storage, scaleFactor, minNeighbours, flags, minSize);
    for (int i=0; i<(faces ? faces->total : 0); i++) {
        if (i == 0) {
            nR = (CvRect*) malloc(1 * sizeof(CvRect));
        } else {
            nR = (CvRect*) realloc(nR, (i+1) * sizeof(CvRect));
        }
        CvRect* r = (CvRect*) cvGetSeqElem(faces, i);
        CvPoint center;
        center.x = cvRound((r->x + r->width * 0.5) * scaleFactor);
        center.y = cvRound((r->y + r->height * 0.5) * scaleFactor);
        nx1 = cvRound(r->x * scaleFactor);
        ny1 = cvRound(r->y * scaleFactor);
        nx2 = cvRound((r->x + r->width) * scaleFactor);
        ny2 = cvRound((r->y + r->height) * scaleFactor);
        nR[fii] = cvRect(nx1, ny1, nx2-nx1, ny2-ny1);
        CvScalar color;
        color = CV_RGB(0, 255, 0);
        cvRectangle(img, cvPoint(nx1, ny1), cvPoint(nx2, ny2), color);
        fii++;
    }
    
    *regions = nR;
    
    cvShowImage("result", img);
    cvReleaseImage(&gray);
    cvReleaseImage(&smallImg);
    t = (double) cvGetTickCount() - t;
    printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
    return fii;
}
コード例 #8
0
int maskByDistance2Grayscale(IplImage *src, IplImage *dst, int minDistance) {
	IplImage *tmp3d = cvCreateImage(cvGetSize(src), src->depth, 3);
	ocvDistance2Grayscale(src, tmp3d);
	cvSmooth(tmp3d, tmp3d, CV_GAUSSIAN, 9, 0, 0, 0);
	cvCvtColor(tmp3d, dst, CV_RGB2GRAY);
	cvReleaseImage(&tmp3d);
	#if DIP_DESKTOP
	cvEqualizeHist(dst, dst);
	#endif
	//CVSHOW("grayscale2", dst->width*2/3, dst->height*2/3, dst->width/2, dst->height/2, dst);
	#if DIP_MOBILE
	cvThreshold(dst, dst, minDistance, 255, CV_THRESH_BINARY);
	#endif
	return 0;
}
コード例 #9
0
IplImage* preprocessFace(IplImage *img, CvRect *face) {
  cvSetImageROI(img, *face);
  IplImage *faceColor = cvCreateImage(cvSize(100,100), img->depth, img->nChannels);
  cvResize(img, faceColor);
  cvResetImageROI(img);

  IplImage *faceNormalized = cvCreateImage(cvGetSize(faceColor), IPL_DEPTH_8U, 1);

  cvCvtColor(faceColor, faceNormalized, CV_BGR2GRAY);

  cvEqualizeHist(faceNormalized, faceNormalized);

  cvReleaseImage(&faceColor);
  return faceNormalized;
}
コード例 #10
0
ファイル: Tracker.cpp プロジェクト: caomw/MILTracker1.01
bool			Tracker::initFace(TrackerParams* params, Matrixu &frame)
{
	const char* cascade_name = "haarcascade_frontalface_alt_tree.xml";
	const int minsz = 20;
	if( Tracker::facecascade == NULL )
		Tracker::facecascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );

	frame.createIpl();
	IplImage *img = frame.getIpl();
	IplImage* gray = cvCreateImage( cvSize(img->width, img->height), IPL_DEPTH_8U, 1 );
    cvCvtColor(img, gray, CV_BGR2GRAY );
	frame.freeIpl();
	cvEqualizeHist(gray, gray);

	CvMemStorage* storage = cvCreateMemStorage(0);
	cvClearMemStorage(storage);
	CvSeq* faces = cvHaarDetectObjects(gray, Tracker::facecascade, storage, 1.05, 3, CV_HAAR_DO_CANNY_PRUNING ,cvSize(minsz, minsz));
	
	int index = faces->total-1;
	CvRect* r = (CvRect*)cvGetSeqElem( faces, index );
	
	

	while(r && (r->width<minsz || r->height<minsz || (r->y+r->height+10)>frame.rows() || (r->x+r->width)>frame.cols() ||
		r->y<0 || r->x<0)){
		r = (CvRect*)cvGetSeqElem( faces, --index);
	}

	//if( r == NULL ){
	//	cout << "ERROR: no face" << endl;
	//	return false;
	//}
	//else 
	//	cout << "Face Found: " << r->x << " " << r->y << " " << r->width << " " << r->height << endl;
	if( r==NULL )
		return false;

	//fprintf(stderr,"x=%f y=%f xmax=%f ymax=%f imgw=%f imgh=%f\n",(float)r->x,(float)r->y,(float)r->x+r->width,(float)r->y+r->height,(float)frame.cols(),(float)frame.rows());

	params->_initstate.resize(4);
	params->_initstate[0]	= (float)r->x;// - r->width;
	params->_initstate[1]	= (float)r->y;// - r->height;
	params->_initstate[2]	= (float)r->width;
	params->_initstate[3]	= (float)r->height+10;


	return true;
}
コード例 #11
0
CybRegionTrackInfo *CybHaarTracker::detect(IplImage *img) {

	double scale = 1.3;
	CybRegionTrackInfo *region= NULL;
	IplImage* gray = cvCreateImage(cvSize(img->width, img->height), 8, 1);
	IplImage* small_img = cvCreateImage(cvSize(cvRound(img->width/scale),
			cvRound(img->height/scale)), 8, 1);

	cvCvtColor(img, gray, CV_BGR2GRAY);
	cvResize(gray, small_img, CV_INTER_LINEAR);
	cvEqualizeHist(small_img, small_img);
	cvClearMemStorage(storage);

	if (cascade) {
		double t = (double)cvGetTickCount();
		CvSeq* faces = cvHaarDetectObjects(small_img, cascade, storage, 1.1, 2,
				0/*CV_HAAR_DO_CANNY_PRUNING*/, cvSize(30, 30) );
		t = (double)cvGetTickCount() - t;

		if (faces && (faces->total > 0)) {
			CvRect* r = (CvRect*)cvGetSeqElem(faces, 0);

			region = new CybRegionTrackInfo((int)((r->x)*scale),
					(int)((r->x + r->width)*scale),
					(int)((r->y)*scale),
					(int)((r->y + r->height)*scale));

			int meanx = (region->getMaxX() + region->getMinX())/2;
			int meany = (region->getMaxY() + region->getMinY())/2;

			if (dbg_mode) {
				cvLine(img, cvPoint(meanx, meany), cvPoint(meanx, meany), 
				CV_RGB(50, 50 , 50), 4, 8, 0);
				cvRectangle(img, cvPoint((int)(region->getMinX()),
						(int)(region->getMinY())), cvPoint(
						(int)(region->getMaxX()), (int) (region->getMaxY())), 
				CV_RGB(150, 150, 150), 2, 8, 0);

			}
		}

	}

	cvReleaseImage( &gray);
	cvReleaseImage( &small_img);

	return region;
}
コード例 #12
0
ファイル: camera.c プロジェクト: electricface/dde
static void detect(IplImage* frame)
{
    double const scale = 1.3;

    IplImage* gray = cvCreateImage(cvSize(frame->width, frame->height), 8, 1);
    IplImage* small_img = cvCreateImage(cvSize(cvRound(frame->width/scale),
                                               cvRound(frame->height/scale)),
                                        8, 1);
    cvCvtColor(frame, gray, CV_RGB2GRAY);
    cvResize(gray, small_img, CV_INTER_LINEAR);
    cvEqualizeHist(small_img, small_img);

    if (storage == NULL)
        storage = cvCreateMemStorage(0);

    if (cascade == NULL)
        cascade = (CvHaarClassifierCascade*)cvLoad(CASCADE_NAME, 0, 0, 0);

    cvClearMemStorage(storage);
    CvSeq* objects = NULL;
    objects = cvHaarDetectObjects(small_img, cascade, storage, scale,
                                  3, 0
                                  | CV_HAAR_FIND_BIGGEST_OBJECT
                                  , cvSize(0, 0), cvSize(0, 0));

    if (objects && objects->total > 0) {
        g_timer_stop(recognition_info.timer);
        double diff_time = g_timer_elapsed(recognition_info.timer, NULL);
        if (diff_time < recognition_info.DELAY_TIME)
            goto out;

        for (int i = 0; i < objects->total; ++i) {
            CvRect* r = (CvRect*)cvGetSeqElem(objects, i);
            cvRectangle(frame, cvPoint(r->x * scale, r->y * scale),
                        cvPoint((r->x + r->width) * scale, (r->y + r->height) * scale),
                        cvScalar(0, 0xff, 0xff, 0), 4, 8, 0);
        }

        recognition_info.reco_state = START_RECOGNIZING;
    } else {
        g_timer_start(recognition_info.timer);
    }

out:
    cvReleaseImage(&gray);
    cvReleaseImage(&small_img);
}
コード例 #13
0
//////////////////////////////////
// loadFaceImgArray()
//
int EigenFace::loadFaceImgArray(char * filename)
{
	FILE * imgListFile = 0;
	char imgFilename[512];
	int iFace, nFaces=0;
  
  
	// open the input file
	if( !(imgListFile = fopen(filename, "r")) )
	{
		LOGE("Can\'t open file %s\n", filename);
		return 0;
	}
  
	// count the number of faces
	while( fgets(imgFilename, 512, imgListFile) ) ++nFaces;
	rewind(imgListFile);
  
	// allocate the face-image array and person number matrix
	faceImgArr        = (IplImage **)cvAlloc( nFaces*sizeof(IplImage *) );
	personNumTruthMat = cvCreateMat( 1, nFaces, CV_32SC1 );
  
	// store the face images in an array
	for(iFace=0; iFace<nFaces; iFace++)
	{
		// read person number and name of image file
		fscanf(imgListFile,
           "%d %s", personNumTruthMat->data.i+iFace, imgFilename);
    
		// load the face image
    LOGD("load from %s", imgFilename);
    IplImage* tmpImage = cvLoadImage(imgFilename, CV_LOAD_IMAGE_GRAYSCALE);
    faceImgArr[iFace] = cvCloneImage(tmpImage);
    cvEqualizeHist(tmpImage, faceImgArr[iFace]);
    
		if( !faceImgArr[iFace] )
		{
			LOGE("Can\'t load image from %s\n", imgFilename);
			return 0;
		}
	}
  
	fclose(imgListFile);
  
	return nFaces;
}
コード例 #14
0
int findFaces(CvArr* src, CvRect **rects, double scale, double *t)
{
	CvSize srcSize = cvGetSize(src);
	IplImage *gray = cvCreateImage(srcSize, IPL_DEPTH_8U, 1);
	cvCvtColor(src, gray, CV_BGR2GRAY);

	double fx = 1 / scale;
	srcSize.width = (int)cvRound(fx * srcSize.width);
	srcSize.height = (int)cvRound(fx * srcSize.height);

	IplImage *smallImg = cvCreateImage(srcSize, IPL_DEPTH_8U, 1);
	cvResize(gray, smallImg, CV_INTER_LINEAR);
	cvReleaseImage(&gray);

	cvEqualizeHist(smallImg, smallImg);

	char frontalFaceFile[] = RESOURCES "haarcascade_frontalface_alt.xml";

	cv::CascadeClassifier cascade;
	if (!cascade.load(frontalFaceFile)) {
		printf("ERROR: Could not load classifier cascade\r");
		return -1;
	}
	std::vector<cv::Rect> faces, faces2;
	*t = (double)cvGetTickCount();
	cascade.detectMultiScale(smallImg, faces,
		1.1, 2, 0
		//|CASCADE_FIND_BIGGEST_OBJECT
		//|CASCADE_DO_ROUGH_SEARCH
		| cv::CASCADE_SCALE_IMAGE,
		cv::Size(30, 30));

	*t = (double)cvGetTickCount() - *t;
	*t /= ((double)cvGetTickFrequency() * 1000);

	*rects = (CvRect *)calloc(faces.size() + 1, sizeof(CvRect));
	for (size_t i = 0; i < faces.size(); i++) {
		CvRect face = faces[i];
		face = cvRect(cvRound(face.x * scale), cvRound(face.y * scale), cvRound(face.width * scale), cvRound(face.height * scale));
		(*rects)[i] = face;
	}

	return faces.size();
}
コード例 #15
0
void display(IplImage& img) {
    do {
      std::vector<Rect> faces;
      IplImage* grey;

      cvCvtColor(img, grey, CV_BGR2GRAY);
      cvEqualizeHist(grey, grey);

      faceCascade.detectMultiScale(grey, faces, 1.1, 5, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30));

      for (size_t i = 0; i < faces.size(); i++) {
        Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
        ellipse( img, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
      }

      cvShowImage("Display", img);
      pressedKey = cvWaitKey(1);
    } while (pressedKey != 27);
}
コード例 #16
0
ファイル: teste.cpp プロジェクト: RenatoPoulicer/HR
int teste()
{
	int i, c;
	IplImage *src_img = 0, *src_gray = 0;
	const char *cascade_name = "/opt/local/share/opencv/haarcascades/haarcascade_frontalface_default.xml";
	CvHaarClassifierCascade *cascade = 0;
	CvMemStorage *storage = 0;
	CvSeq *faces;

	cascade = (CvHaarClassifierCascade *)cvLoad(cascade_name, 0, 0, 0);
	cvNamedWindow("Capture", CV_WINDOW_AUTOSIZE);
	CvCapture *capture = cvCreateCameraCapture(0);
	assert(capture != NULL);

	while (1) {
		src_img = cvQueryFrame(capture);
		src_gray = cvCreateImage(cvGetSize(src_img), IPL_DEPTH_8U, 1);

		storage = cvCreateMemStorage(0);
		cvClearMemStorage(storage);
		cvCvtColor(src_img, src_gray, CV_BGR2GRAY);
		cvEqualizeHist(src_gray, src_gray);

		faces = cvHaarDetectObjects(src_gray, cascade, storage,
			1.11, 4, 0, cvSize(40, 40));
		for (i = 0; i < (faces ? faces->total : 0); i++) {
			CvRect *r = (CvRect *)cvGetSeqElem(faces, i);
			doMosaic(src_img, r->x, r->y, r->width, r->height, 20);
		}

		cvShowImage("Capture", src_img);
		cvReleaseImage(&src_gray);

		c = cvWaitKey(2);
		if (c == '\x1b')
			break;
	}

	cvReleaseCapture(&capture);
	cvDestroyWindow("Capture");

	return 0;
}
コード例 #17
0
ファイル: camera.cpp プロジェクト: JackFan-Z/MetroVision
/** Detects and draws The Objects that have been defined in the .xml
 	cascade file.
 */
void Camera::detectAndDrawHaar(IplImage* img, double scale){
	/* In an faux Ict-T voice "Colors" */
    static CvScalar colors[] = {
        {{0,0,255}}, {{0,128,255}}, {{0,255,255}}, {{0,255,0}},
        {{255,128,0}}, {{255,255,0}}, {{255,0,0}}, {{255,0,255}} 
    };

	/* Image needs to be 1-channel (grayscale) and scaled down. I'm
	 * using the scale values used in the OpenCV book as a starting point.
     */
    IplImage* gray = cvCreateImage( cvSize(img->width, img->height), 8,1);
    IplImage* small_img = cvCreateImage( cvSize( cvRound(img->width/scale),
            					 		 cvRound(img->height/scale)), 8, 1);
    cvCvtColor( img, gray, CV_BGR2GRAY);
    cvResize( gray, small_img, CV_INTER_LINEAR);
    cvEqualizeHist( small_img, small_img);

	/* Use built in function to detect if there are any objects */
    cvClearMemStorage(storageCascade);
    CvSeq* objects = cvHaarDetectObjects(
            small_img,  
            cascade,    
            storageCascade,
            1.1,
            2,
            0,
            cvSize(30,30)
    );

    /* Iterate through all objects and draw a box around them */
    for( int i = 0; i < (objects ? objects->total: 0); i++){
        CvRect* r = (CvRect*) cvGetSeqElem( objects, i);
        cvRectangle(
        	img,
            cvPoint(r->x,r->y),
            cvPoint(r->x+r->width, r->y+r->height),
            colors[i%8]
        );
    }
    cvReleaseImage( &gray);
    cvReleaseImage( &small_img);
   
}
コード例 #18
0
ファイル: testdetecet.cpp プロジェクト: xiaoque/Qrobot
void detect( IplImage* img ,CvMemStorage* storage,CvHaarClassifierCascade* cascade)
{
    double scale = 2;
    IplImage* gray = cvCreateImage( cvSize(img->width,img->height), IPL_DEPTH_8U, 1 );
    IplImage* small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
                         cvRound (img->height/scale)),
                     IPL_DEPTH_8U, 1 );
	
    int i;
 
    cvCvtColor( img, gray, CV_BGR2GRAY );
    cvResize( gray, small_img, CV_INTER_LINEAR );

    cvEqualizeHist( small_img, small_img );
	//cvShowImage("small",small_img);
    cvClearMemStorage( storage );

			CvPoint frame_center;
			frame_center.x = cvRound(img->width*0.5);
			frame_center.y = cvRound(img->height*0.5);
			cvCircle( img, frame_center, 64,cvScalar(255,0,0), 2, 8, 0 );
 
    if( cascade )
    {
        double t = (double)cvGetTickCount();
        CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
                                            1.3, 1, CV_HAAR_DO_CANNY_PRUNING,
                                            cvSize(30, 30) );
        
		show_detecttime(t);

        for( i = 0; i < (faces ? faces->total : 0); i++ )
        {
           draw( img, faces, i, scale );
        }
    }
 
    cvShowImage( "result", img );
    cvReleaseImage( &gray );
    cvReleaseImage( &small_img );
}
コード例 #19
0
void PreProcess( IplImage* src, IplImage** dest )
{
    if ( *dest )
        cvReleaseImage(&*dest);

    try
    {
        FaceDetector* fd = new FaceDetector(src, false);
        fd->Detect(true);

        if ( !fd->GetFaceVec().empty() )
        {
            // get the face from the face detector
            IplImage* face = fd->GetFaceVec()[0];

            int width = 100;
            int height = 100;

            *dest = cvCreateImage(cvSize(width, height), src->depth, 1);
            if ( !*dest )
                throw std::string("PreProcess could not create dest image");

            if ( src->nChannels != 1 )
                ConvertToGreyScale(face, face);

            Resize(face, *dest);

            // do histogram equalization on the found face
            cvEqualizeHist(*dest, *dest);
        }
        else
        {
            throw std::string("FaceDetector could not find face");
        }
    }
    catch ( ... )
    {
        throw;
    }
}
コード例 #20
0
ファイル: adaboostDetect.cpp プロジェクト: Riseley/Drone
int adaboostDetect::detectObject(IplImage* img, CvRect** regions) {
    int nHeads = 0;
    IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
    IplImage* smallImg = cvCreateImage( cvSize( cvRound (img->width/scaleFactor),
                                                cvRound (img->height/scaleFactor)), 8, 1 );
    cvCvtColor(img, gray, CV_BGR2GRAY);
    cvResize(gray, smallImg, CV_INTER_LINEAR);
    cvEqualizeHist(smallImg, smallImg);
    cvClearMemStorage(storage);
    
    int nx1, nx2, ny1, ny2;
	CvRect* nR;
    assert(cascade);
		
    CvSeq* faces = cvHaarDetectObjects( smallImg, cascade, storage, scaleFactor, minNeighbours, flags, minSize);
    for (int i=0; i<(faces ? faces->total : 0); i++) {
        CvRect* r = (CvRect*) cvGetSeqElem(faces, i);
        if (nHeads == 0) {
            nR = (CvRect*) malloc(1 * sizeof(CvRect));
        } else {
            nR = (CvRect*) realloc(nR, (nHeads+1) * sizeof(CvRect));
        }
        
        if ((r->width <= maxSize.width) && (r->height <= maxSize.height)) {
            nx1 = cvRound(r->x * scaleFactor);
            ny1 = cvRound(r->y * scaleFactor);
            nx2 = cvRound((r->x + r->width) * scaleFactor);
            ny2 = cvRound((r->y + r->height) * scaleFactor);
            nR[nHeads] = cvRect(nx1, ny1, nx2-nx1, ny2-ny1);
            nHeads++;
        }
    }
    
    *regions = nR;
    
    cvReleaseImage(&gray);
    cvReleaseImage(&smallImg);
    
    return nHeads;
}
コード例 #21
0
void CFaceDetect::check(int* pixels)
{
	IplImage* img = cvCreateImage(cvSize(capW,capH),IPL_DEPTH_8U,3);

	int sz = capW*capH;
	int step = img->widthStep;
	int chnl = img->nChannels;
 
	for (int i=0;i<sz;i++) {
		unsigned int px = pixels[i];
		unsigned char r = (px>>16) & 0x000000ff;
		unsigned char g = (px>>8) & 0x000000ff;
		unsigned char b = (px) & 0x000000ff;
		int row = i/img->width;
		int col = i%img->width;
		unsigned char* tmp = &((unsigned char*)(img->imageData + row*step))[col*chnl];
		
		tmp[0] = b;
		tmp[1] = g;
		tmp[2] = r;
	}
	
	IplImage* grayImg = cvCreateImage(cvSize(img->width,img->height),IPL_DEPTH_8U,1);
	IplImage* smallImg = cvCreateImage(cvSize(cvRound(img->width/scale),cvRound(img->height/scale)),
		IPL_DEPTH_8U,1);

	cvCvtColor(img,grayImg,CV_BGR2GRAY);
	cvResize(grayImg,smallImg,CV_INTER_LINEAR);
	cvEqualizeHist(smallImg, smallImg);
	cvClearMemStorage(pStorage);

	pFaces = NULL;
	pFaces = cvHaarDetectObjects(smallImg, pCascade, pStorage,
		1.2, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(30,30));

	cvReleaseImage(&img);
	cvReleaseImage(&grayImg);
	cvReleaseImage(&smallImg);
}
コード例 #22
0
void
equalize_image2(IplImage *img)
{
	IplImage *img_gray = cvCreateImage(cvSize(img->width, img->height), IPL_DEPTH_8U, 1);

	int i, j;
	for (j = 0; j < img->height; j++)
	{
		for (i = 0; i < img->width; i++)
		{
			if ((unsigned char)img->imageData[j * img->widthStep + 3 * i] == 255 &&
				(unsigned char)img->imageData[j * img->widthStep + 3 * i + 1] == 0 &&
				(unsigned char)img->imageData[j * img->widthStep + 3 * i + 2] == 0)
				img_gray->imageData[j * img_gray->widthStep + i] = 255;
			else
				img_gray->imageData[j * img_gray->widthStep + i] = img->imageData[j * img->widthStep + 3 * i];
		}
	}
	cvEqualizeHist(img_gray,img_gray);

	for (j = 0; j < img->height; j++)
	{
		for (i = 0; i < img->width; i++)
		{
			if ((unsigned char)img->imageData[j * img->widthStep + 3 * i] == 255 &&
				(unsigned char)img->imageData[j * img->widthStep + 3 * i + 1] == 0 &&
				(unsigned char)img->imageData[j * img->widthStep + 3 * i + 2] == 0)
				img->imageData[j * img->widthStep + 3 * i] = 255;
			else
			{
				img->imageData[j * img->widthStep + 3 * i] = img_gray->imageData[j * img_gray->widthStep + i];
				img->imageData[j * img->widthStep + 3 * i + 1] = img_gray->imageData[j * img_gray->widthStep + i];
				img->imageData[j * img->widthStep + 3 * i + 2] = img_gray->imageData[j * img_gray->widthStep + i];
			}
		}
	}

	cvReleaseImage(&img_gray);
}
コード例 #23
0
vector<CvRect> FaceDetector::detect(const IplImage *img) {
	vector<CvRect> result;
	try {
	   double scale = 1.3;
	   IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
	   IplImage* small_img = 
	cvCreateImage( cvSize( cvRound (img->width/scale),
			       cvRound (img->height/scale)), 8, 1 );
    int i;

    cvCvtColor( img, gray, CV_BGR2GRAY );
    cvResize( gray, small_img, CV_INTER_LINEAR );
    cvEqualizeHist( small_img, small_img );
    cvClearMemStorage( storage );

    CvSeq* faces = 
	cvHaarDetectObjects( small_img, cascade, storage,
			     1.1, 2, 0/*CV_HAAR_DO_CANNY_PRUNING*/,
			     cvSize(30, 30) );

    for( i = 0; i < (faces ? faces->total : 0); i++ ) {
	CvRect *rect = (CvRect*)cvGetSeqElem( faces, i );
	result.push_back(cvRect((int)(rect->x * scale),
				(int)(rect->y * scale),
				(int)(rect->width * scale),
				(int)(rect->height * scale)));
    }

    cvReleaseImage(&gray);
    cvReleaseImage(&small_img);
//     cvReleaseSeq(&faces);

	   return result;
	}
   catch (std::exception &ex) {
		cout << ex.what() << endl;
		return result;
   }
}
コード例 #24
0
ファイル: ShapeDetect.cpp プロジェクト: hemprasad/DetectCar
IplImage* ShapeDetect::EqualizeHistColorImage(IplImage *pImage){
	IplImage *pEquaImage = cvCreateImage(cvGetSize(pImage), pImage->depth, 3);

	// 原图像分成各通道后再均衡化,最后合并即彩色图像的直方图均衡化
	const int MAX_CHANNEL = 4;
	IplImage *pImageChannel[MAX_CHANNEL] = {NULL};

	int i;
	for (i = 0; i < pImage->nChannels; i++)
		pImageChannel[i] = cvCreateImage(cvGetSize(pImage), pImage->depth, 1);

	cvSplit(pImage, pImageChannel[0], pImageChannel[1], pImageChannel[2], pImageChannel[3]);

	for (i = 0; i < pImage->nChannels; i++)
		cvEqualizeHist(pImageChannel[i], pImageChannel[i]);

	cvMerge(pImageChannel[0], pImageChannel[1], pImageChannel[2], pImageChannel[3], pEquaImage);

	for (i = 0; i < pImage->nChannels; i++)
		cvReleaseImage(&pImageChannel[i]);

	return pEquaImage;
}
コード例 #25
0
ファイル: image_utils.cpp プロジェクト: ulyssesrr/carmen_lcad
void image_utils_equalize(IplImage *src, IplImage *dst)
{
  IplImage *channels[3];
  int c;

  cvCopy(src, dst, NULL);

  for (c = 0; c < 3; c++)
  {
    channels[c] = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
    cvSetImageCOI(src, c + 1);
    cvCopy(src, channels[c], NULL);
    cvSetImageCOI(src, 0);
    cvEqualizeHist(channels[c], channels[c]);
  }

  cvMerge(channels[0], channels[1], channels[2], NULL, dst);

  for (c = 0; c < 3; c++)
  {
    cvReleaseImage(&channels[c]);
  }
}
コード例 #26
0
ファイル: faceDetector.cpp プロジェクト: cou929/pointing
int faceDetector::faceDetect( IplImage* img, CvPoint *center, int *radius )
{
  int i = 0, ret = 0;

  cvResize( img, smallImg, CV_INTER_LINEAR );
  cvEqualizeHist( smallImg, smallImg );
  cvClearMemStorage( storage );

  if( cascade )
    {
      CvSeq* faces = cvHaarDetectObjects( smallImg, cascade, storage, 1.1, 2, 0/*CV_HAAR_DO_CANNY_PRUNING*/, cvSize(30, 30) );
      ret = (faces ? faces->total : 0);   

      for( i = 0; i < ret; i++ )
	{
	  CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
	  center->x = cvRound((r->x + r->width*0.5)*scale);
	  center->y = cvRound((r->y + r->height*0.5)*scale);
	  *radius = cvRound((r->width + r->height)*0.25*scale);
	}
    }

  return ret;
}
コード例 #27
0
std::list<utils::Garbage*> GarbageRecognition::garbageList(IplImage * src, IplImage * model){
	std::list<utils::Garbage*>::iterator it;
	for ( it=garbages.begin() ; it != garbages.end() ; it++ )
		delete *it;
	garbages.clear();

	//cvNamedWindow("output",CV_WINDOW_AUTOSIZE);
	//object model

	//image for the histogram-based filter
	//could be a parameter

	utils::Histogram * h = new Histogram(HIST_H_BINS,HIST_S_BINS);
	CvHistogram * testImageHistogram = h->getHShistogramFromRGB(model);

	//~ int frameWidth=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);
	//~ int frameHeight=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);



	//gets a frame for setting  image size
	//CvSize srcSize = cvSize(frameWidth,frameHeight);
	CvSize srcSize = cvGetSize(src);

	//images for HSV conversion
	IplImage* hsv = cvCreateImage( srcSize, 8, 3 );
	IplImage* h_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* s_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* v_plane = cvCreateImage( srcSize, 8, 1 );



	//Image for thresholding
	IplImage * threshImage=cvCreateImage(srcSize,8,1);

	//image for equalization
	IplImage * equalizedImage=cvCreateImage(srcSize,8,1);

	//image for Morphing operations(Dilate-erode)
	IplImage * morphImage=cvCreateImage(srcSize,8,1);

	//image for image smoothing
	IplImage * smoothImage=cvCreateImage(srcSize,8,1);

	//image for contour-finding operations
	IplImage * contourImage=cvCreateImage(srcSize,8,3);

	int frameCounter=1;
	int cont_index=0;

	//convolution kernel for morph operations
	IplConvKernel* element;

	CvRect boundingRect;

	//contours
	CvSeq * contours;

	//Main loop


	frameCounter++;

	//convert image to hsv
	cvCvtColor( src, hsv, CV_BGR2HSV );
	cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );

	//equalize Saturation Channel image
	cvEqualizeHist(s_plane,equalizedImage);

	//threshold the equalized Saturation channel image
	cvThreshold(equalizedImage,threshImage,THRESHOLD_VALUE,255,
	CV_THRESH_BINARY);

	//apply morphologic operations
	element = cvCreateStructuringElementEx( MORPH_KERNEL_SIZE*2+1,
		MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE, MORPH_KERNEL_SIZE,
		CV_SHAPE_RECT, NULL);

	cvDilate(threshImage,morphImage,element,MORPH_DILATE_ITER);
	cvErode(morphImage,morphImage,element,MORPH_ERODE_ITER);

	//apply smooth gaussian-filter
	cvSmooth(morphImage,smoothImage,CV_GAUSSIAN,3,0,0,0);

	//get all contours
	contours = myFindContours(smoothImage);

	cont_index=0;
	cvCopy(src,contourImage,0);
	


	while(contours!=NULL){
		CvSeq * aContour=getPolygon(contours);
		utils::Contours * ct = new Contours(aContour);

	
	    int	pf = ct->perimeterFilter(MINCONTOUR_PERIMETER,MAXCONTOUR_PERIMETER);

		int raf = ct->rectangularAspectFilter(CONTOUR_RECTANGULAR_MIN_RATIO, CONTOUR_RECTANGULAR_MAX_RATIO);

		// int af = ct->areaFilter(MINCONTOUR_AREA,MAXCONTOUR_AREA);
		int baf = ct->boxAreaFilter(BOXFILTER_TOLERANCE);

        int hmf = ct->histogramMatchingFilter(src,testImageHistogram, HIST_H_BINS,HIST_S_BINS,HIST_MIN);


		//apply filters

		if( pf && raf && baf && hmf	){

				//if passed filters
				ct->printContour(3,cvScalar(127,127,0,0),
					contourImage);
				
				//get contour bounding box
				boundingRect=cvBoundingRect(ct->getContour(),0);
				cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y),
						cvPoint(boundingRect.x+boundingRect.width,
						boundingRect.y+boundingRect.height),
						_GREEN,1,8,0);
				//build garbage List
			
				//printf(" c %d,%d\n",boundingRect.x,boundingRect.y);

				utils::MinimalBoundingRectangle * r = new utils::MinimalBoundingRectangle(boundingRect.x,
					boundingRect.y,boundingRect.width,boundingRect.height);



				utils::Garbage * aGarbage = new utils::Garbage(r);
//				printf("%d , %d - %d , %d\n",boundingRect.x,boundingRect.y,boundingRect.width,boundingRect.height);

				garbages.push_back(aGarbage);


			}

		delete ct;
		cvReleaseMemStorage( &aContour->storage );
		contours=contours->h_next;
		cont_index++;
	}

   // cvShowImage("output",contourImage);
   // cvWaitKey(0);
	delete h;

	cvReleaseHist(&testImageHistogram);
	//Image for thresholding
	//cvReleaseMemStorage( &contours->storage );
	cvReleaseImage(&threshImage);
	cvReleaseImage(&equalizedImage);
	cvReleaseImage(&morphImage);
	cvReleaseImage(&smoothImage);
	cvReleaseImage(&contourImage);
	
	cvReleaseImage(&hsv);
	cvReleaseImage(&h_plane);
	cvReleaseImage(&s_plane);
	cvReleaseImage(&v_plane);


	return garbages;
}
コード例 #28
0
ファイル: Image.cpp プロジェクト: jeremt/emotionDetector
IplImage *equalizeImage(IplImage *image) {
  IplImage *result = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 1);
  cvCvtColor(image, result, CV_RGB2GRAY);
  cvEqualizeHist(result, result);
  return result;
}
コード例 #29
0
CvRect* QOpenCvImageBox::detect_objs( IplImage* img, CvMemStorage* storage, CvHaarClassifierCascade* cascade, int image_scale, int &objs_found, int &calc_time, double scale_factor, int min_neighbors )
{
    IplImage *gray, *small_img;
    int i;

    if( cascade )
    {
        storage = cvCreateMemStorage(0);

        gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
        small_img = cvCreateImage( cvSize( cvRound (img->width/image_scale), cvRound (img->height/image_scale)), 8, 1 );

        cvCvtColor( img, gray, CV_RGB2GRAY );
        cvResize( gray, small_img, CV_INTER_LINEAR );
        cvEqualizeHist( small_img, small_img );
        cvClearMemStorage( storage );

        double t = (double)cvGetTickCount();
        CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
                                            scale_factor,
                                            min_neighbors,
                                            0
                                            //|CV_HAAR_FIND_BIGGEST_OBJECT
                                            //|CV_HAAR_DO_ROUGH_SEARCH
                                            |CV_HAAR_DO_CANNY_PRUNING
                                            //|CV_HAAR_SCALE_IMAGE
                                            ,
                                            cvSize(0, 0) );
        t = (double)cvGetTickCount() - t;

        //printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
        calc_time=t/((double)cvGetTickFrequency()*1000.);

        // Update the number
        objs_found = faces->total;

        cvReleaseImage( &gray );
        cvReleaseImage( &small_img );

        // Loop the number of faces found.
        //printf("Detected %d faces!\n", faces->total);

        CvRect* farray=new CvRect[objs_found];
        for( i = 0; i < (faces ? objs_found : 0); i++ )
        {
            // Create a new rectangle for drawing the face
            CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

            farray[i].x=r->x;
            farray[i].y=r->y;
            farray[i].width=r->width;
            farray[i].height=r->height;
        }

        cvClearSeq(faces);
        cvReleaseMemStorage(&storage);

        return farray;
    }

    return NULL;
}
コード例 #30
0
ファイル: FaceDetection.cpp プロジェクト: friedvan/snake
void FaceDetection::detection()
{
	static CvMemStorage* storage = 0;
	static CvHaarClassifierCascade* cascade = 0;
	const char* cascade_name ="haarcascade_frontalface_alt.xml";
	
    CvCapture* capture = NULL;
    IplImage *frame, *frame_copy = 0;

	//

	
	static CvScalar colors[] = 
    {
        {{0,0,255}},
        {{0,128,255}},
        {{0,255,255}},
        {{0,255,0}},
        {{255,128,0}},
        {{255,255,0}},
        {{255,0,0}},
        {{255,0,255}}
    };	
	
	
	cascade_name = "haarcascade_frontalface_alt2.xml";	
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );	
    storage = cvCreateMemStorage(0);	
//	capture = cvCaptureFromAVI( "f://[¹úÍõµÄÑݽ²].The.Kings.Speech.2010.DVDSCR.XViD-iLG.CD1.avi" ); 	
  
//	capture = cvCaptureFromAVI("f://test.avi");
	capture = cvCaptureFromCAM(-1);
	cvNamedWindow( "result", 1 );  
	cvSetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES,10);
	IplImage* img=cvQueryFrame(capture);
	double scale = 1.3;
    IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
    IplImage* small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
		cvRound (img->height/scale)),
		8, 1 );
    int i;	
	
    if( capture )
    {

        for(;;)
        {	
			cvGrabFrame( capture );
            frame = cvRetrieveFrame( capture );
            if( !frame )
                break;
            if( !img )
                img = cvCreateImage( cvSize(frame->width,frame->height),IPL_DEPTH_8U, frame->nChannels );
            if( frame->origin != IPL_ORIGIN_TL )
                cvCopy( frame, img, 0 );
            else
                cvFlip( frame, img, 0 );
			
			cvCvtColor( img, gray, CV_BGR2GRAY );
			cvResize( gray, small_img, CV_INTER_LINEAR );
			cvEqualizeHist( small_img, small_img );
			cvClearMemStorage( storage );
			
			if( cascade )
			{
				double t = (double)cvGetTickCount();
				CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
					1.1, 2, 0/*CV_HAAR_DO_CANNY_PRUNING*/,
					cvSize(30, 30) );
				t = (double)cvGetTickCount() - t;
				printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
				for( i = 0; i < (faces ? faces->total : 0); i++ )
				{
					CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
					CvPoint center;
					int radius;
					center.x = cvRound((r->x + r->width*0.5)*scale);
					center.y = cvRound((r->y + r->height*0.5)*scale);

					//
					lastPoint = point;

					point.x=center.x;
					point.y=center.y;

					radius = cvRound((r->width + r->height)*0.25*scale);
					cvCircle( img, center, radius, colors[i%8], 3, 8, 0 );
					cvCircle( img, center, 1, colors[i%8], 3, 8, 0 );
				}
			}
			
			cvFlip( img, img, 0 );
			cvShowImage( "result", img );
			
            if( cvWaitKey( 1 ) >= 0 )
                break;
        }
		
        cvReleaseImage( &frame_copy );
        cvReleaseCapture( &capture );
    }
	
    cvDestroyWindow("result");
}