vector<VisionRecognitionResult> KITECH_ERSPViPRObjectRecognitionComp::Recognize(vector<unsigned char> image,int width,int height,int pixelBytes)
{
	//PrintMessage("SUCCESS:KITECH_ERSPViPRObjectRecognitionComp::Recognize()\n");

	vector<VisionRecognitionResult> _recognitionResult(0);

	IplImage *cvImage = cvCreateImageHeader( cvSize(width, height), 8, pixelBytes );
	cvImage->imageData = (char *)&image[0];

	Evolution::Matrix<int> e_img;
	Evolution::Image capture_img;

	if (ImageConversion (&e_img, (BYTE *)cvImage->imageData, width, height, pixelBytes) < 0) {
		return _recognitionResult;
	}

	capture_img.copy_matrix(e_img);		//이미지를 얻음.

	//특징점 추출
	Evolution::ObjRecKeypointList* capture_features = new Evolution::ObjRecKeypointList ();
	if (capture_features->extract (&capture_img) != Evolution::RESULT_SUCCESS) {
		capture_features->remove_ref ();
		return _recognitionResult;
	}

	//특징점들을 비교하여 사물 인식
	Evolution::ObjRecQuery* query = new Evolution::ObjRecQuery ();
	query->recognize (_vipr->objrecDB, capture_features);

	// The image features are not needed after recognition is performed 
	// so we should release the resource.
	capture_features->remove_ref ();

	int n = query->get_num_matches ();
	_recognitionResult.resize(n);

	for( int i = 0  ;  i < n  ;  i++ )
	{
		Evolution::ObjRecQuery::MatchInfo info[1]; 
		query->get_match_info (i, info);
	
		_recognitionResult[i].name = info->label;
		_recognitionResult[i].point1X = (long)info->rectangle[0][0];
		_recognitionResult[i].point1Y = height-(long)info->rectangle[0][1];
		_recognitionResult[i].point2X = (long)info->rectangle[1][0];
		_recognitionResult[i].point2Y = height-(long)info->rectangle[1][1];
		_recognitionResult[i].point3X = (long)info->rectangle[2][0];
		_recognitionResult[i].point3Y = height-(long)info->rectangle[2][1];
		_recognitionResult[i].point4X = (long)info->rectangle[3][0];
		_recognitionResult[i].point4Y = height-(long)info->rectangle[3][1];
	}
	query->remove_ref (); // clean-up the query object

	cvReleaseImageHeader(&cvImage);


	return _recognitionResult;
}
vector<VisionRecognitionResult> KITECH_HSVColorRecognitionComp::Recognize(vector<unsigned char> image,int width,int height,int pixelBytes)
{
	//PrintMessage("SUCCESS:KITECH_HSVColorRecognitionComp::Recognize()");

	vector<VisionRecognitionResult> _recognitionResult(0);
	VisionRecognitionResult tmpResult;
	
	IplImage *cvImage = cvCreateImageHeader( cvSize(width, height), 8, pixelBytes );
	cvImage->imageData = (char *)&image[0];

	IplImage *hsvImage = cvCreateImage( cvGetSize(cvImage), 8, pixelBytes );
	cvCvtColor(cvImage, hsvImage, CV_BGR2HSV);

	IplImage *cvMask= cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );

	for( unsigned int p = 0  ;  p < _colorRange.size()  ;  p++ ) {

		cvInRangeS( hsvImage,
				cvScalar(_colorRange[p].min1,_colorRange[p].min2,_colorRange[p].min3,0),
				cvScalar(_colorRange[p].max1,_colorRange[p].max2,_colorRange[p].max3,255),
				cvMask );

		CBlobLabeling blob;
		blob.SetParam( cvMask, width*height/1000 );
		blob.DoLabeling();

//		_recognitionResult.resize(blob.m_nBlobs);
		for( int i = 0  ;  i < blob.m_nBlobs  ;  i++ ) {
			tmpResult.name = _colorRange[p].name;

			tmpResult.point1X = blob.m_recBlobs[i].x;
			tmpResult.point1Y = blob.m_recBlobs[i].y;
			tmpResult.point2X = blob.m_recBlobs[i].x + blob.m_recBlobs[i].width;
			tmpResult.point2Y = blob.m_recBlobs[i].y;
			tmpResult.point3X = blob.m_recBlobs[i].x + blob.m_recBlobs[i].width;
			tmpResult.point3Y = blob.m_recBlobs[i].y + blob.m_recBlobs[i].height;
			tmpResult.point4X = blob.m_recBlobs[i].x;
			tmpResult.point4Y = blob.m_recBlobs[i].y + blob.m_recBlobs[i].height;
			_recognitionResult.push_back(tmpResult);
			//PrintMessage("SUCCESS:KITECH_HSVColorRecognitionComp::Recognize() -> I found %d data.(%d=%s)\n", blob.m_nBlobs, i, tmpResult.name.c_str());
		}
	}

	//cvSaveImage("KITECH_HSVColorRecognitionComp.jpg", cvImage);

	cvReleaseImage(&cvMask);
	cvReleaseImage(&hsvImage);
	cvReleaseImageHeader(&cvImage);

	return _recognitionResult;
}
vector<VisionRecognitionResult> KitechSkinColorFaceRecognitionComp::Recognize(vector<unsigned char> image,int width,int height,int pixelBytes)
{
	vector<VisionRecognitionResult> _recognitionResult(0);

	IplImage *cvImage = cvCreateImageHeader( cvSize(width, height), 8, pixelBytes );
	cvImage->imageData = (char *)&(image)[0];

	IplImage *ycrcbImage = cvCreateImage( cvGetSize(cvImage), 8, pixelBytes );
	cvCvtColor(cvImage, ycrcbImage, CV_BGR2YCrCb);

	IplImage *cvMask= cvCreateImage( cvSize(width, height), IPL_DEPTH_8U, 1 );

	cvInRangeS( ycrcbImage, cvScalar(0,133,77), cvScalar(255,173,127), cvMask );

	CBlobLabeling blob;
	blob.SetParam( cvMask, width*height/100 );
	blob.DoLabeling();

	_recognitionResult.resize(blob.m_nBlobs);
	for( int i = 0  ;  i < blob.m_nBlobs  ;  i++ ) {
		_recognitionResult[i].name = "Face";

		_recognitionResult[i].point1X = blob.m_recBlobs[i].x;
		_recognitionResult[i].point1Y = blob.m_recBlobs[i].y;
		_recognitionResult[i].point2X = blob.m_recBlobs[i].x + blob.m_recBlobs[i].width;
		_recognitionResult[i].point2Y = blob.m_recBlobs[i].y;
		_recognitionResult[i].point3X = blob.m_recBlobs[i].x + blob.m_recBlobs[i].width;
		_recognitionResult[i].point3Y = blob.m_recBlobs[i].y + blob.m_recBlobs[i].height;
		_recognitionResult[i].point4X = blob.m_recBlobs[i].x;
		_recognitionResult[i].point4Y = blob.m_recBlobs[i].y + blob.m_recBlobs[i].height;
		//PrintMessage("SUCCESS:KitechSkinColorFaceRecognitionComp::recognize() -> I found %d data.(%d=%s)\n", blob.m_nBlobs, i, _recognitionResult[i].name.c_str());
	}

	//cvSaveImage("KitechSkinColorFaceRecognitionComp.jpg", cvImage);

	cvReleaseImage(&cvMask);
	cvReleaseImage(&ycrcbImage);
	cvReleaseImageHeader(&cvImage);

	return _recognitionResult;
}
vector<VisionRecognitionResult> KitechSurfObjectRecognitionComp::Recognize(vector<unsigned char> image,int width,int height,int pixelBytes)
{
	vector<VisionRecognitionResult> _recognitionResult(0);

	IplImage *cvImage = cvCreateImageHeader( cvSize(width, height), 8, pixelBytes );
	cvImage->imageData = (char *)&image[0];

	IplImage *grayImage = cvCreateImage( cvGetSize(cvImage), 8, 1 );
	cvCvtColor( cvImage, grayImage, CV_BGR2GRAY );

    CvMemStorage *imageStorage = cvCreateMemStorage(0);
    CvSeq *imageKeypoints, *imageDescriptors;

	cvExtractSURF( grayImage, 0, &imageKeypoints, &imageDescriptors, imageStorage, cvSURFParams(500,1) );

	CvPoint src_corners[4] = {{0,0}, {_orgWidth,0}, {_orgWidth, _orgHeight}, {0, _orgHeight}};
	CvPoint dst_corners[4];

	if( LocatePlanarObject( _objectKeypoints, _objectDescriptors, imageKeypoints, imageDescriptors, src_corners, dst_corners ) ) {
		_recognitionResult.resize(1);

		_recognitionResult[0].name = _objName;
		_recognitionResult[0].point1X = dst_corners[0].x;
		_recognitionResult[0].point1Y = dst_corners[0].y;
		_recognitionResult[0].point2X = dst_corners[1].x;
		_recognitionResult[0].point2Y = dst_corners[1].y;
		_recognitionResult[0].point3X = dst_corners[2].x;
		_recognitionResult[0].point3Y = dst_corners[2].y;
		_recognitionResult[0].point4X = dst_corners[3].x;
		_recognitionResult[0].point4Y = dst_corners[3].y;
		//PrintMessage("KitechSurfObjectRecognitionComp::recognize() -> I found data.(%s)\n", _recognitionResult[0].name.c_str());
	}

	cvReleaseMemStorage( &imageStorage );

	cvReleaseImage( &grayImage );
	cvReleaseImageHeader( &cvImage );

	return _recognitionResult;
}
vector<VisionRecognitionResult> IPEL_Haar2FaceEyeDetectionComp::Recognize(vector<unsigned char> image,int width,int height,int pixelBytes)
{
	//PrintMessage("SUCCESS:IPEL_Haar2FaceEyeDetectionComp::Recognize()\n");

	vector<VisionRecognitionResult> _recognitionResult(0);

	IplImage *cvImage = cvCreateImageHeader( cvSize(width, height), 8, pixelBytes );
	cvImage->imageData = (char *)&image[0];

	if( _storage ) cvClearMemStorage( _storage );

	if( _cascade_f ) {
		/* detect faces */
		CvSeq *faces = cvHaarDetectObjects(cvImage, _cascade_f, _storage,
			1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize( 30, 30 ) );

		if( faces && faces->total>0) {
			/* Get region of face */
			int nfaces = faces->total; // faces->total값이 변한다. 
			_recognitionResult.resize (nfaces);
			CvRect *fr = new CvRect[nfaces];

			for( int i = 0; i < (faces ? nfaces : 0); i++ ) {
				/* draw a rectangle */
				CvRect *r = (CvRect*)cvGetSeqElem(faces, i);
				memcpy(&fr[i],r,sizeof(CvRect));					

				//rec.type = 1;
				_recognitionResult[i].name = "Face";
				/*- Get Upper left rectangle corner coordinate -*/
				_recognitionResult[i].point1X = (int)((r->x) + 0.5);
				_recognitionResult[i].point1Y = (int)((r->y) + 0.5);
				/*- Get Upper right rectangle corner coordinate -*/
				_recognitionResult[i].point2X = (int)((r->x + r->width) + 0.5);
				_recognitionResult[i].point2Y = (int)((r->y) + 0.5);
				/*- Get Lower right rectangle corner coordinate -*/
				_recognitionResult[i].point3X = (int)((r->x + r->width) + 0.5);
				_recognitionResult[i].point3Y = (int)((r->y + r->height) + 0.5);
				/*- Get Lower left rectangle corner coordinate -*/
				_recognitionResult[i].point4X = (int)((r->x) + 0.5);
				_recognitionResult[i].point4Y = (int)((r->y + r->height) + 0.5);
			}

			// Haar함수를 두번 수행할때 결과가 다를수 있다.
			for( int i = 0; i < (faces ? nfaces : 0); i++ ) {
				/* reset buffer for the next object detection */
				cvClearMemStorage(_storage);

				/* Set the Region of Interest: estimate the eyes' position */
				cvSetImageROI(cvImage, cvRect(fr[i].x, fr[i].y + (int)(fr[i].height/5.5), fr[i].width, (int)(fr[i].height/3.0) ) );

				/* detect eyes */
				CvSeq* eyes = cvHaarDetectObjects(cvImage, _cascade_e, _storage,
					1.15, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize(25, 15));

				/* draw a rectangle for each eye found */
				for(int j = 0; j < (eyes ? eyes->total : 0); j++ ) {
					if(j>1) break;
					CvRect *er = (CvRect*) cvGetSeqElem( eyes, j );
					cvRectangle(cvImage,
						cvPoint(er->x, er->y), 
						cvPoint(er->x + er->width, er->y + er->height),
						CV_RGB(255, 0, 0), 1, 8, 0);

				}

				cvResetImageROI(cvImage);
			}

			delete fr;
		}
	}

#if 0
	if( _recognitionResult.size() ) {
		for( std::vector<VisionRecognitionResult>::iterator it = _recognitionResult.begin()  ;  it != _recognitionResult.end()  ;  it++ ) {
			cvLine(cvImage,
				cvPoint(it->point1X,it->point1Y),
				cvPoint(it->point2X,it->point2Y),
				CV_RGB(0, 255, 0));
			cvLine(cvImage,
				cvPoint(it->point2X,it->point2Y),
				cvPoint(it->point3X,it->point3Y),
				CV_RGB(0, 255, 0));
			cvLine(cvImage,
				cvPoint(it->point3X,it->point3Y),
				cvPoint(it->point4X,it->point4Y),
				CV_RGB(0, 255, 0));
			cvLine(cvImage,
				cvPoint(it->point4X,it->point4Y),
				cvPoint(it->point1X,it->point1Y),
				CV_RGB(0, 255, 0));
		}
	}
#endif

	cvReleaseImageHeader( &cvImage );

	return _recognitionResult;
}