CLIFIntegralResult clifGrayscaleIntegral(const IplImage* source, CLIFEnvironmentData* data, const cl_bool use_opencl) { CLIFIntegralResult ret; if(!use_opencl) { IplImage* grayscale = cvCreateImage(cvSize(source->width, source->height), IPL_DEPTH_8U, 1); cvCvtColor(source, grayscale, CV_BGR2GRAY); ret.image = cvCreateMat(source->height + 1, source->width + 1, CV_32SC1); ret.square_image = cvCreateMat(source->height + 1, source->width + 1, CV_64FC1); cvIntegral(grayscale, ret.image, ret.square_image); cvReleaseImage(&grayscale); return ret; } cl_int error = CL_SUCCESS; // Init buffer error = clEnqueueWriteBuffer(data->environment.queue, data->bgr_to_gray_data.buffers[0], CL_FALSE, 0, source->widthStep * source->height, source->imageData, 0, NULL, NULL); clCheckOrExit(error); // Run kernel error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[0], 2, NULL, data->bgr_to_gray_data.global_size, data->bgr_to_gray_data.local_size, 0, NULL, NULL); clCheckOrExit(error); // Set as arg the output of greyscale clSetKernelArg(data->environment.kernels[1], 0, sizeof(cl_mem), &(data->bgr_to_gray_data.buffers[1])); clCheckOrExit(error); // Run sum rows kernel error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[1], 1, NULL, &(data->integral_image_data.global_size[0]), &(data->integral_image_data.local_size[0]), 0, NULL, NULL); clCheckOrExit(error); // Run sum cols kernel error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[2], 1, NULL, &(data->integral_image_data.global_size[1]), &(data->integral_image_data.local_size[1]), 0, NULL, NULL); clCheckOrExit(error); // Read result cl_uint* result = (cl_uint*)clEnqueueMapBuffer(data->environment.queue, data->integral_image_data.buffers[3], CL_TRUE, CL_MAP_READ, 0, (source->width + 1) * (source->height + 1) * sizeof(cl_uint), 0, NULL, NULL, &error); clCheckOrExit(error); cl_ulong* square_result = (cl_ulong*)clEnqueueMapBuffer(data->environment.queue, data->integral_image_data.buffers[4], CL_TRUE, CL_MAP_READ, 0, (source->width + 1) * (source->height + 1) * sizeof(cl_ulong), 0, NULL, NULL, &error); clCheckOrExit(error); data->integral_image_data.ptr = result; // Return ret.image = cvCreateMatHeader(source->height + 1, source->width + 1, CV_32SC1); cvSetData(ret.image, result, source->width + 1); ret.square_image = cvCreateMatHeader(source->height + 1, source->width + 1, CV_64FC1); cvSetData(ret.square_image, square_result, source->width + 1); return ret; }
void extractHaarFeatures(const IplImage* img, Mat& haar) { CvSize size = cvSize(IMAGE_RESIZE, IMAGE_RESIZE); CvSize size2 = cvSize(INTEGRAL_SIZE, INTEGRAL_SIZE); CvSize img_size = cvGetSize(img); IplImage* ipl= cvCreateImage(img_size,8,0); if(img->nChannels==3) { cvCvtColor(img,ipl,CV_BGR2GRAY); } else { cvCopy(img,ipl,0); } if((size.width!=img_size.width)|| (size.height!=img_size.height)) { IplImage* tmpsize=cvCreateImage(size,IPL_DEPTH_8U,0); cvResize(ipl,tmpsize,CV_INTER_LINEAR); cvReleaseImage( &ipl); ipl=cvCreateImage(size,IPL_DEPTH_8U,0); cvCopy(tmpsize,ipl,0); cvReleaseImage( &tmpsize); } IplImage* temp = cvCreateImage(size, IPL_DEPTH_64F, 0); cvCvtScale(ipl,temp); cvNormalize(temp, temp, 0, 1, CV_MINMAX); haar.release(); haar = Mat::zeros(1, NUM_HAAR_FEATURES, CV_32FC1); IplImage* integral=cvCreateImage(size2,IPL_DEPTH_64F,0); CvMat * sqSum = cvCreateMat(temp->height + 1, temp->width + 1, CV_64FC1); cvIntegral(temp, integral, sqSum); cvReleaseMat(&sqSum); int actualSize = 0; // top left for(int i = 0; i < 100; i+= 10) { for(int j = 0; j < 100; j+= 10) { // bottom right for(int m = i+10; m<=100; m+=10) { for(int n = j+10; n<=100; n+=10) { haar.at<float>(0, actualSize++) = getIntegralRectValue(integral, i, j, m, n); } } } } cvReleaseImage(&ipl); cvReleaseImage(&temp); cvReleaseImage(&integral); }
void getBriefDescriptors2(int descriptors[getNumberOfKeypoints()][DESC_LEN], const CvPoint kpts[getNumberOfKeypoints()], IplImage* img) { unsigned int i; // Make sure that input image contains only one color channel: assert(img->nChannels == 1); // Check whether all the keypoints are inside the subimage: CvPoint goodArray[getNumberOfKeypoints()]; assert(validateKeypoints(kpts, img->width, img->height)); // Allocate space for the integral image: allocateIntegralImage(img); // Calculate the integral image: cvIntegral(img, integralImage, NULL, NULL); printf("NumberOfKeypoints : %d\n", getNumberOfKeypoints()); // Iterate over keypoints: for (i = 0; i < getNumberOfKeypoints(); i++) { int inWS = integralImage->step / CV_ELEM_SIZE(integralImage->type); int* iD = integralImage->data.i; int j; for (j = 0; j < DESC_LEN; ++j) { const pair tL = testLocations[j]; const CvPoint p1 = CV_POINT_PLUS(kpts[i], tL.point1); const CvPoint p2 = CV_POINT_PLUS(kpts[i], tL.point2); const int intensity1 = GET_PIXEL_NW(iD, p1, inWS) - GET_PIXEL_NE(iD, p1, inWS) - GET_PIXEL_SW(iD, p1, inWS) + GET_PIXEL_SE(iD, p1, inWS); const int intensity2 = GET_PIXEL_NW(iD, p2, inWS) - GET_PIXEL_NE(iD, p2, inWS) - GET_PIXEL_SW(iD, p2, inWS) + GET_PIXEL_SE(iD, p2, inWS); if (intensity1 < intensity2) { descriptors[i][j] = 1; } else { descriptors[i][j] = 0; } } } }
//Parallel version void OMP_eliminateJunction(IplImage* iplEdge, uint8 edgC, uint8 backC) { int W = iplEdge->width; int H = iplEdge->height; Image<uint8> iplEdgeT(iplEdge); IplImage* iplIntImg = cvCreateImage( cvSize(W+1, H+1), IPL_DEPTH_32S, 1 ); cvIntegral(iplEdge, iplIntImg,NULL, NULL ); Image<int> iplIntImgT(iplIntImg); int nthreads = OMP_getNbThread(); int tid; #pragma omp parallel private(tid) { tid = omp_get_thread_num(); int bg = tid+1; printf("[%d] Starting at %d\n", tid,bg); for (int y = bg; y < iplEdge->height-1; y+=nthreads) { //printf("%d %d %d %d\n", tid,y,iplEdge->height-1, nthreads ); for (int x = 1; x < iplEdge->width-1; x++) { //even if current pixel is not mark, we mark it //a junction can have no edge! //double s = iplEdgeT[y][x]; //if (s!=edgC) // nbEdge+=255; //faster than above uint nbEdge=255 - iplEdgeT[y][x]; //if no edge, add 255, otherwise, start at 0, will be added below nbEdge += ( iplIntImgT[y-1][x-1] + iplIntImgT[y+2][x+2] - iplIntImgT[y+2][x-1] - iplIntImgT[y-1][x+2]); if (nbEdge>=4*255) iplEdgeT[y][x] = backC; }} } //end pragma omp parallel private(tid) }
/** * Returns Haar feature values * Precondition: image should be 64x64 */ void HaarFeatures::getFeatureValues(vector<double>& feature_values, const IplImage* img) const { //Image should be 64x64 (because the Haar features are based on 64x64 images) assert(img->width == 64); assert(img->height == 64); IplImage *iImage = cvCreateImage(cvSize(img->width + 1, img->height + 1), IPL_DEPTH_32S, 1); cvIntegral(img, iImage); for(vector<HaarFeature>::size_type i = 0; i < features.size(); i++) { double feature_value = computeFeatureValueIntegral(iImage, features[i]); feature_values.push_back(feature_value); //printFeature(features[i]); //cerr << "Feature value [" << i << "]: " << feature_value << endl; } cvReleaseImage(&iImage); }
CLIFIntegralResult clifIntegral(const IplImage* source, CLIFEnvironmentData* data, const cl_bool use_opencl) { CLIFIntegralResult ret; if(!use_opencl) { ret.image = cvCreateMat(source->height + 1, source->width + 1, CV_32SC1); ret.square_image = cvCreateMat(source->height + 1, source->width + 1, CV_64FC1); cvIntegral(source, ret.image, ret.square_image); return ret; } cl_int error = CL_SUCCESS; // Init buffer error = clEnqueueWriteBuffer(data->environment.queue, data->integral_image_data.buffers[0], CL_FALSE, 0, source->width * source->height, source, 0, NULL, NULL); clCheckOrExit(error); // Run sum rows kernel error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[1], 1, NULL, &(data->integral_image_data.global_size[0]), &(data->integral_image_data.local_size[0]), 0, NULL, NULL); clCheckOrExit(error); // Run sum cols kernel error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[2], 1, NULL, &(data->integral_image_data.global_size[1]), &(data->integral_image_data.local_size[1]), 0, NULL, NULL); clCheckOrExit(error); // Read result cl_uint* result = (cl_uint*)clEnqueueMapBuffer(data->environment.queue, data->integral_image_data.buffers[3], CL_TRUE, CL_MAP_READ, 0, (source->width + 1) * (source->height + 1) * sizeof(cl_uint), 0, NULL, NULL, &error); clCheckOrExit(error); cl_ulong* square_result = (cl_ulong*)clEnqueueMapBuffer(data->environment.queue, data->integral_image_data.buffers[4], CL_TRUE, CL_MAP_READ, 0, (source->width + 1) * (source->height + 1) * sizeof(cl_ulong), 0, NULL, NULL, &error); clCheckOrExit(error); data->integral_image_data.ptr = result; // Return ret.image = cvCreateMat(source->height + 1, source->width + 1, CV_32SC1); cvSetData(ret.image, result, (source->width + 1) * sizeof(cl_uint)); ret.square_image = cvCreateMatHeader(source->height + 1, source->width + 1, CV_64FC1); cvSetData(ret.square_image, square_result, (source->width + 1) * sizeof(cl_ulong)); return ret; }
CV_IMPL void cvMatchTemplate( const CvArr* _img, const CvArr* _templ, CvArr* _result, int method ) { cv::Ptr<CvMat> sum, sqsum; int coi1 = 0, coi2 = 0; int depth, cn; int i, j, k; CvMat stub, *img = (CvMat*)_img; CvMat tstub, *templ = (CvMat*)_templ; CvMat rstub, *result = (CvMat*)_result; CvScalar templ_mean = cvScalarAll(0); double templ_norm = 0, templ_sum2 = 0; int idx = 0, idx2 = 0; double *p0, *p1, *p2, *p3; double *q0, *q1, *q2, *q3; double inv_area; int sum_step, sqsum_step; int num_type = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 : method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2; int is_normed = method == CV_TM_CCORR_NORMED || method == CV_TM_SQDIFF_NORMED || method == CV_TM_CCOEFF_NORMED; img = cvGetMat( img, &stub, &coi1 ); templ = cvGetMat( templ, &tstub, &coi2 ); result = cvGetMat( result, &rstub ); if( CV_MAT_DEPTH( img->type ) != CV_8U && CV_MAT_DEPTH( img->type ) != CV_32F ) CV_Error( CV_StsUnsupportedFormat, "The function supports only 8u and 32f data types" ); if( !CV_ARE_TYPES_EQ( img, templ )) CV_Error( CV_StsUnmatchedSizes, "image and template should have the same type" ); if( CV_MAT_TYPE( result->type ) != CV_32FC1 ) CV_Error( CV_StsUnsupportedFormat, "output image should have 32f type" ); if( img->rows < templ->rows || img->cols < templ->cols ) { CvMat* t; CV_SWAP( img, templ, t ); } if( result->rows != img->rows - templ->rows + 1 || result->cols != img->cols - templ->cols + 1 ) CV_Error( CV_StsUnmatchedSizes, "output image should be (W - w + 1)x(H - h + 1)" ); if( method < CV_TM_SQDIFF || method > CV_TM_CCOEFF_NORMED ) CV_Error( CV_StsBadArg, "unknown comparison method" ); depth = CV_MAT_DEPTH(img->type); cn = CV_MAT_CN(img->type); icvCrossCorr( img, templ, result ); if( method == CV_TM_CCORR ) return; inv_area = 1./((double)templ->rows * templ->cols); sum = cvCreateMat( img->rows + 1, img->cols + 1, CV_MAKETYPE( CV_64F, cn )); if( method == CV_TM_CCOEFF ) { cvIntegral( img, sum, 0, 0 ); templ_mean = cvAvg( templ ); q0 = q1 = q2 = q3 = 0; } else { CvScalar _templ_sdv = cvScalarAll(0); sqsum = cvCreateMat( img->rows + 1, img->cols + 1, CV_MAKETYPE( CV_64F, cn )); cvIntegral( img, sum, sqsum, 0 ); cvAvgSdv( templ, &templ_mean, &_templ_sdv ); templ_norm = CV_SQR(_templ_sdv.val[0]) + CV_SQR(_templ_sdv.val[1]) + CV_SQR(_templ_sdv.val[2]) + CV_SQR(_templ_sdv.val[3]); if( templ_norm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED ) { cvSet( result, cvScalarAll(1.) ); return; } templ_sum2 = templ_norm + CV_SQR(templ_mean.val[0]) + CV_SQR(templ_mean.val[1]) + CV_SQR(templ_mean.val[2]) + CV_SQR(templ_mean.val[3]); if( num_type != 1 ) { templ_mean = cvScalarAll(0); templ_norm = templ_sum2; } templ_sum2 /= inv_area; templ_norm = sqrt(templ_norm); templ_norm /= sqrt(inv_area); // care of accuracy here q0 = (double*)sqsum->data.ptr; q1 = q0 + templ->cols*cn; q2 = (double*)(sqsum->data.ptr + templ->rows*sqsum->step); q3 = q2 + templ->cols*cn; } p0 = (double*)sum->data.ptr; p1 = p0 + templ->cols*cn; p2 = (double*)(sum->data.ptr + templ->rows*sum->step); p3 = p2 + templ->cols*cn; sum_step = sum ? sum->step / sizeof(double) : 0; sqsum_step = sqsum ? sqsum->step / sizeof(double) : 0; for( i = 0; i < result->rows; i++ ) { float* rrow = (float*)(result->data.ptr + i*result->step); idx = i * sum_step; idx2 = i * sqsum_step; for( j = 0; j < result->cols; j++, idx += cn, idx2 += cn ) { double num = rrow[j], t; double wnd_mean2 = 0, wnd_sum2 = 0; if( num_type == 1 ) { for( k = 0; k < cn; k++ ) { t = p0[idx+k] - p1[idx+k] - p2[idx+k] + p3[idx+k]; wnd_mean2 += CV_SQR(t); num -= t*templ_mean.val[k]; } wnd_mean2 *= inv_area; } if( is_normed || num_type == 2 ) { for( k = 0; k < cn; k++ ) { t = q0[idx2+k] - q1[idx2+k] - q2[idx2+k] + q3[idx2+k]; wnd_sum2 += t; } if( num_type == 2 ) num = wnd_sum2 - 2*num + templ_sum2; } if( is_normed ) { t = sqrt(MAX(wnd_sum2 - wnd_mean2,0))*templ_norm; if( fabs(num) < t ) num /= t; else if( fabs(num) < t*1.125 ) num = num > 0 ? 1 : -1; else num = method != CV_TM_SQDIFF_NORMED ? 0 : 1; } rrow[j] = (float)num; } } }
CV_IMPL void cvMatchTemplate( const CvArr* _img, const CvArr* _templ, CvArr* _result, int method ) { CvMat* sum = 0; CvMat* sqsum = 0; CV_FUNCNAME( "cvMatchTemplate" ); __BEGIN__; int coi1 = 0, coi2 = 0; int depth, cn; int i, j, k; CvMat stub, *img = (CvMat*)_img; CvMat tstub, *templ = (CvMat*)_templ; CvMat rstub, *result = (CvMat*)_result; CvScalar templ_mean = cvScalarAll(0); double templ_norm = 0, templ_sum2 = 0; int idx = 0, idx2 = 0; double *p0, *p1, *p2, *p3; double *q0, *q1, *q2, *q3; double inv_area; int sum_step, sqsum_step; int num_type = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 : method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2; int is_normed = method == CV_TM_CCORR_NORMED || method == CV_TM_SQDIFF_NORMED || method == CV_TM_CCOEFF_NORMED; CV_CALL( img = cvGetMat( img, &stub, &coi1 )); CV_CALL( templ = cvGetMat( templ, &tstub, &coi2 )); CV_CALL( result = cvGetMat( result, &rstub )); if( CV_MAT_DEPTH( img->type ) != CV_8U && CV_MAT_DEPTH( img->type ) != CV_32F ) CV_ERROR( CV_StsUnsupportedFormat, "The function supports only 8u and 32f data types" ); if( !CV_ARE_TYPES_EQ( img, templ )) CV_ERROR( CV_StsUnmatchedSizes, "image and template should have the same type" ); if( CV_MAT_TYPE( result->type ) != CV_32FC1 ) CV_ERROR( CV_StsUnsupportedFormat, "output image should have 32f type" ); if( img->rows < templ->rows || img->cols < templ->cols ) { CvMat* t; CV_SWAP( img, templ, t ); } if( result->rows != img->rows - templ->rows + 1 || result->cols != img->cols - templ->cols + 1 ) CV_ERROR( CV_StsUnmatchedSizes, "output image should be (W - w + 1)x(H - h + 1)" ); if( method < CV_TM_SQDIFF || method > CV_TM_CCOEFF_NORMED ) CV_ERROR( CV_StsBadArg, "unknown comparison method" ); depth = CV_MAT_DEPTH(img->type); cn = CV_MAT_CN(img->type); if( is_normed && cn == 1 && templ->rows > 8 && templ->cols > 8 && img->rows > templ->cols && img->cols > templ->cols ) { CvTemplMatchIPPFunc ipp_func = depth == CV_8U ? (method == CV_TM_SQDIFF_NORMED ? (CvTemplMatchIPPFunc)icvSqrDistanceValid_Norm_8u32f_C1R_p : method == CV_TM_CCORR_NORMED ? (CvTemplMatchIPPFunc)icvCrossCorrValid_Norm_8u32f_C1R_p : (CvTemplMatchIPPFunc)icvCrossCorrValid_NormLevel_8u32f_C1R_p) : (method == CV_TM_SQDIFF_NORMED ? (CvTemplMatchIPPFunc)icvSqrDistanceValid_Norm_32f_C1R_p : method == CV_TM_CCORR_NORMED ? (CvTemplMatchIPPFunc)icvCrossCorrValid_Norm_32f_C1R_p : (CvTemplMatchIPPFunc)icvCrossCorrValid_NormLevel_32f_C1R_p); if( ipp_func ) { CvSize img_size = cvGetMatSize(img), templ_size = cvGetMatSize(templ); IPPI_CALL( ipp_func( img->data.ptr, img->step ? img->step : CV_STUB_STEP, img_size, templ->data.ptr, templ->step ? templ->step : CV_STUB_STEP, templ_size, result->data.ptr, result->step ? result->step : CV_STUB_STEP )); for( i = 0; i < result->rows; i++ ) { float* rrow = (float*)(result->data.ptr + i*result->step); for( j = 0; j < result->cols; j++ ) { if( fabs(rrow[j]) > 1. ) rrow[j] = rrow[j] < 0 ? -1.f : 1.f; } } EXIT; } } CV_CALL( icvCrossCorr( img, templ, result )); if( method == CV_TM_CCORR ) EXIT; inv_area = 1./((double)templ->rows * templ->cols); CV_CALL( sum = cvCreateMat( img->rows + 1, img->cols + 1, CV_MAKETYPE( CV_64F, cn ))); if( method == CV_TM_CCOEFF ) { CV_CALL( cvIntegral( img, sum, 0, 0 )); CV_CALL( templ_mean = cvAvg( templ )); q0 = q1 = q2 = q3 = 0; } else { CvScalar _templ_sdv = cvScalarAll(0); CV_CALL( sqsum = cvCreateMat( img->rows + 1, img->cols + 1, CV_MAKETYPE( CV_64F, cn ))); CV_CALL( cvIntegral( img, sum, sqsum, 0 )); CV_CALL( cvAvgSdv( templ, &templ_mean, &_templ_sdv )); templ_norm = CV_SQR(_templ_sdv.val[0]) + CV_SQR(_templ_sdv.val[1]) + CV_SQR(_templ_sdv.val[2]) + CV_SQR(_templ_sdv.val[3]); if( templ_norm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED ) { cvSet( result, cvScalarAll(1.) ); EXIT; } templ_sum2 = templ_norm + CV_SQR(templ_mean.val[0]) + CV_SQR(templ_mean.val[1]) + CV_SQR(templ_mean.val[2]) + CV_SQR(templ_mean.val[3]); if( num_type != 1 ) { templ_mean = cvScalarAll(0); templ_norm = templ_sum2; } templ_sum2 /= inv_area; templ_norm = sqrt(templ_norm); templ_norm /= sqrt(inv_area); // care of accuracy here q0 = (double*)sqsum->data.ptr; q1 = q0 + templ->cols*cn; q2 = (double*)(sqsum->data.ptr + templ->rows*sqsum->step); q3 = q2 + templ->cols*cn; } p0 = (double*)sum->data.ptr; p1 = p0 + templ->cols*cn; p2 = (double*)(sum->data.ptr + templ->rows*sum->step); p3 = p2 + templ->cols*cn; sum_step = sum ? sum->step / sizeof(double) : 0; sqsum_step = sqsum ? sqsum->step / sizeof(double) : 0; for( i = 0; i < result->rows; i++ ) { float* rrow = (float*)(result->data.ptr + i*result->step); idx = i * sum_step; idx2 = i * sqsum_step; for( j = 0; j < result->cols; j++, idx += cn, idx2 += cn ) { double num = rrow[j], t; double wnd_mean2 = 0, wnd_sum2 = 0; if( num_type == 1 ) { for( k = 0; k < cn; k++ ) { t = p0[idx+k] - p1[idx+k] - p2[idx+k] + p3[idx+k]; wnd_mean2 += CV_SQR(t); num -= t*templ_mean.val[k]; } wnd_mean2 *= inv_area; } if( is_normed || num_type == 2 ) { for( k = 0; k < cn; k++ ) { t = q0[idx2+k] - q1[idx2+k] - q2[idx2+k] + q3[idx2+k]; wnd_sum2 += t; } if( num_type == 2 ) num = wnd_sum2 - 2*num + templ_sum2; } if( is_normed ) { t = sqrt(MAX(wnd_sum2 - wnd_mean2,0))*templ_norm; if( t > DBL_EPSILON ) { num /= t; if( fabs(num) > 1. ) num = num > 0 ? 1 : -1; } else num = method != CV_TM_SQDIFF_NORMED || num < DBL_EPSILON ? 0 : 1; } rrow[j] = (float)num; } } __END__; cvReleaseMat( &sum ); cvReleaseMat( &sqsum ); }
*/ int main(int argc, char *argv[]) { DSP_cvStartDSP(); CvCapture * capture; IplImage *videoFrame, *convFrame, *convOpencvFrame, *unsignedFrame; IplImage *dataImage, *integralImage; int key; int *ptr; float *flPtr; int i,j; float tempFloat=0.0; float *floatDataPtr; float *floatOutPtr; /* Data to test cvIntegral() */ unsigned char intdata[] = { 151, 57, 116, 170, 9, 247, 208, 140, 150, 60, 88, 77, 4, 6, 162, 6, 31, 143, 178, 3, 135, 91, 54, 154, 193, 161, 20, 162, 137, 150, 128, 224, 214, 113, 9, 28, 53, 211, 98, 217, 149, 233, 231, 127, 115, 203, 177, 42, 62, 155, 3, 103, 127, 16, 135, 131, 211, 158, 9, 2, 106, 227, 249, 255 }; //16 x 4 if ( argc < 2 ) { printf( "Usage: ./remote_ti_platforms_evm3530_opencv.xv5T [option] \n"); printf("option:\ni. integral\ns. sobel\nd. dft\n"); printf("Following are the all usage:\n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T i dsp(To test integral-image algorithm using DSP. Input is from webcam). Note: You need to install VLIB to test this.\n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T i arm(To test integral-image algorithm using ARM. Input is from webcam). \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T i test(To test integral-image algorithm using test data given in APP. Input is from webcam). \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T d dsp (To test DFT algorithm using DSP) \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T d arm (To test DFT algorithm using ARM) \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi dsp (To test sobel algorithm for movie clip tree.avi using DSP) \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi arm (To test sobel algorithm for movie clip tree.avi using ARM) \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T s webcam dsp (To test sobel algorithm for image from webcam using DSP) \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T s webcam arm (To test sobel algorithm for image from webcam using ARM) \n"); printf("./remote_ti_platforms_evm3530_opencv.xv5T rgb2gray (To test RGB to Gray for image from webcam.) \n"); return (-1); } if (*argv[1] == 's' && argc < 3) { printf( "Usage: ./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi \n"); printf( "Usage: ./remote_ti_platforms_evm3530_opencv.xv5T s webcam \n"); return (-1); } switch (*argv[1]) { case 'i': switch (*argv[2]) { case 'd': { //'d' dor DSP accelerated cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCaptureFromCAM(-1); if ( !capture) { printf("Error: Video capture initialization failed.\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading from webcam\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* create sobel filtered image */ convOpencvFrame = cvCreateImage( cvSize( convFrame->width+1, convFrame->height+1 ), IPL_DEPTH_32S, 1 ); /* Process the first frame outside the loop*/ DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); DSP_cvSyncDSP(); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); /*Find integral image */ DSP_cvIntegral(convFrame,convOpencvFrame,NULL,NULL); /* get next frame */ videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } DSP_cvSyncDSP(); /* Do color conversion */ DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); /* show Image */ //Since I am using VLIB for IntegralImage, its output image width and height is same as source image. convOpencvFrame->width -= 1; convOpencvFrame->height -= 1; convOpencvFrame->widthStep -= sizeof(int) * convOpencvFrame->nChannels; cvShowImage("video", convOpencvFrame); convOpencvFrame->width += 1; convOpencvFrame->height += 1; convOpencvFrame->widthStep += sizeof(int) * convOpencvFrame->nChannels; /* Sync with DSP */ DSP_cvSyncDSP(); Time_delta(&sTime,&time); printf("Total execution time = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); cvReleaseImage(&convOpencvFrame); cvReleaseCapture(&capture); } break; /* End of sobel test */ case 'a': { // 'a' for ARM side cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCaptureFromCAM(-1); if ( !capture) { printf("Error: Video capture initialization failed.\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading from webcam\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* create sobel filtered image */ convOpencvFrame = cvCreateImage( cvSize( convFrame->width+1, convFrame->height+1 ), IPL_DEPTH_32S, 1 ); /* Process the first frame outside the loop*/ cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); /*Find integral image */ cvIntegral(convFrame,convOpencvFrame,NULL,NULL); /* get next frame */ videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } /* Do color conversion */ cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); /* show Image */ cvShowImage("video", convOpencvFrame); Time_delta(&sTime,&time); printf("Total execution time = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); cvReleaseImage(&convOpencvFrame); cvReleaseCapture(&capture); } break; /* End of sobel test */ case 't': { /* This to test the consistency of algorithm */ /* Start of integral image test */ dataImage = cvCreateImageHeader(cvSize(16, 4), IPL_DEPTH_8U, 1); integralImage = cvCreateImageHeader(cvSize(17, 5), IPL_DEPTH_32S, 1); unsigned char *data = (unsigned char*)cvAlloc(16*4*sizeof(char)); unsigned int *sum = (unsigned int *)cvAlloc(17*5*sizeof(int)); memcpy(data, &intdata[0], 16*4*sizeof(char)); memset(sum, 0, 17*5*sizeof(int)); dataImage->imageData = ( char *)data; integralImage->imageData = ( char *)sum; /* DSP based integral */ DSP_cvIntegral(dataImage,integralImage,NULL,NULL); ptr = (int *)integralImage->imageData; printf(" The integral image is:\n"); for (i=0;i<4;i++){ for (j=0;j<16;j++){ printf("%d \t ", ptr[i* 16 + j]); } printf("\n"); } /* Arm based cvIntegral() */ cvIntegral(dataImage, integralImage,NULL,NULL); ptr = (int *)integralImage->imageData; printf(" The integral image is:\n"); for (i=0;i<5;i++){ for (j=0;j<17;j++){ printf("%d \t ", ptr[i* 17 + j]); } printf("\n"); } cvFree(&dataImage); cvFree(&integralImage); } break; /* End of integral image test */ default: printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T i d \n./remote_ti_platforms_evm3530_opencv.xv5T i a\n ./remote_ti_platforms_evm3530_opencv.xv5T i d \n"); break; } break; /* Start RGB to Y test */ case 'r': { cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCaptureFromCAM(-1); if ( !capture) { printf("Error: Video capture initialization failed.\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading from webcam\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); unsignedFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); /* Process the first frame outside the loop*/ DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } DSP_cvSyncDSP(); cvShowImage("video",convFrame); Time_delta(&sTime,&time); printf("Total execution time1 = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); } break; case 's': /* Start of sobel test */ switch (*argv[2]) { case 't': switch(*argv[3]) { case 'd': { //'d' dor DSP accelerated cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCreateFileCapture(argv[2]); if ( !capture) { printf("Error: Video not found\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading video\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* create sobel filtered image */ convOpencvFrame = cvCreateImage( cvSize( convFrame->width, convFrame->height ), IPL_DEPTH_16S, 1 ); DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); DSP_cvSyncDSP(); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); DSP_cvSobel(convFrame,convOpencvFrame,1,1,3); /* get next frame */ videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } /* Sync with DSP*/ DSP_cvSyncDSP(); /* Start RGB To Y conversion of next frame */ DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); /* Display Filtered Image */ cvShowImage("video", convOpencvFrame); /* Sync DSP */ DSP_cvSyncDSP(); Time_delta(&sTime,&time); printf("Total execution time = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); cvReleaseImage(&convOpencvFrame); } break; /* End of sobel test */ case 'a': { // 'a' for ARM side cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCreateFileCapture(argv[2]); if ( !capture) { printf("Error: Video not found\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading video\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* create sobel filtered image */ convOpencvFrame = cvCreateImage( cvSize( convFrame->width, convFrame->height ), IPL_DEPTH_16S, 1 ); cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); cvSobel(convFrame,convOpencvFrame,1,1,3); /* get next frame */ videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } /* Start RGB To Y conversion of next frame */ cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); /* Display Filtered Image */ cvShowImage("video", convOpencvFrame); Time_delta(&sTime,&time); printf("Total execution time = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); cvReleaseImage(&convOpencvFrame); } break; /* End of sobel test */ default: printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi d \n./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi a\n"); } break; case 'w': switch(*argv[3]) { case 'd': { //'d' dor DSP accelerated cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCaptureFromCAM(-1); if ( !capture) { printf("Error: Video capture initialization failed.\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading from webcam\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* create sobel filtered image */ convOpencvFrame = cvCreateImage( cvSize( convFrame->width, convFrame->height ), IPL_DEPTH_16S, 1 ); /* Create unsigned image */ unsignedFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* Process the first frame outside the loop*/ DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); DSP_cvSyncDSP(); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); DSP_cvSobel(convFrame,convOpencvFrame,1,1,3); /* get next frame */ videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } /* Sync with DSP*/ DSP_cvSyncDSP(); /* Start RGB To Y conversion of next frame */ DSP_cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); /* Convert signed image to unsign image for better clarity */ cvConvert(convOpencvFrame,unsignedFrame); /* Display Filtered Image */ cvShowImage("video", unsignedFrame); /* Sync DSP */ DSP_cvSyncDSP(); Time_delta(&sTime,&time); printf("Total execution time = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); cvReleaseImage(&convOpencvFrame); } break; /* End of sobel test */ case 'a': { // 'a' for ARM side cvNamedWindow( "video", CV_WINDOW_AUTOSIZE ); capture = cvCaptureFromCAM(-1); if ( !capture) { printf("Error: Video capture initialization failed.\n"); break; } videoFrame = cvQueryFrame ( capture ); if ( !videoFrame) { printf("**Error reading from webcam\n"); break; } /* create new image for the grayscale version */ convFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* create sobel filtered image */ convOpencvFrame = cvCreateImage( cvSize( convFrame->width, convFrame->height ), IPL_DEPTH_16S, 1 ); /* Create unsigned image */ unsignedFrame = cvCreateImage( cvSize( videoFrame->width, videoFrame->height ), IPL_DEPTH_8U, 1 ); /* Process the first frame outside the loop*/ cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); while ( key != 'q') { /* Time to test and benchmark DSP based sobel filter */ Time_reset(&sTime); Time_delta(&sTime,&time); cvSobel(convFrame,convOpencvFrame,1,1,3); /* get next frame */ videoFrame = cvQueryFrame( capture); if ( !videoFrame) { printf("***The End***\n"); break; } /* Start RGB To Y conversion of next frame */ cvCvtColor(videoFrame,convFrame,CV_RGB2GRAY); /* Convert signed image to unsign image for better clarity */ cvConvert(convOpencvFrame,unsignedFrame); /* Display Filtered Image */ cvShowImage("video", unsignedFrame); Time_delta(&sTime,&time); printf("Total execution time = %dus\n",(unsigned int)time); key = cvWaitKey( 15 ); } cvDestroyWindow("video"); cvReleaseImage(&videoFrame); cvReleaseImage(&convFrame); cvReleaseImage(&convOpencvFrame); } break; /* End of sobel test */ default: printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T s webcam d \n./remote_ti_platforms_evm3530_opencv.xv5T s webcam a\n"); } break; default: printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T s tree.avi \n./remote_ti_platforms_evm3530_opencv.xv5T s webcam\n"); break; } break; case 'd': switch(*argv[2]) { case 'd': { //'d' dor DSP accelerated int row =63; int col =63; floatDataPtr = (float *)cvAlloc(sizeof(float)*row*col* 2); floatOutPtr = (float *)cvAlloc(sizeof(float)*row*col * 2); dataImage = cvCreateImageHeader(cvSize(col, row), IPL_DEPTH_32F, 2); dataImage->imageData = (char *)floatDataPtr; integralImage = cvCreateImageHeader(cvSize(col, row), IPL_DEPTH_32F, 2); integralImage->imageData = (char *)floatOutPtr; for (i=0; i< row * col * 2; i+=2) { tempFloat += 1.0; floatDataPtr[i] = tempFloat; floatDataPtr[i+1] = 0.0; } Time_reset(&sTime); Time_delta(&sTime,&time); DSP_cvDFT(dataImage,integralImage,CV_DXT_FORWARD,0); Time_delta(&sTime,&time); DSP_cvSyncDSP(); /* Print the output data for DFT */ flPtr = (float *)integralImage->imageData; printf(" The DFT output is:\n"); for (i=0;i<integralImage->height;i++){ key = 0; for (j=0;j<integralImage->width * 2;j+=2){ key++; printf("%f + i%f\t", flPtr[(i*integralImage->width * 2)+j], flPtr[(i*integralImage->width * 2) + j + 1]); if ((key % 5) == 0) printf("\n"); } printf("\n"); } printf("DSP_cvDFT Total execution time = %dus\n",(unsigned int)time); cvFree(&floatDataPtr); cvFree(&floatOutPtr); } break; case 'a': { int row =63; int col =63; floatDataPtr = (float *)cvAlloc(sizeof(float)*row*col*2); floatOutPtr = (float *)cvAlloc(sizeof(float)*row*col*2); dataImage = cvCreateImageHeader(cvSize(col, row), IPL_DEPTH_32F, 2); dataImage->imageData = (char *)floatDataPtr; integralImage = cvCreateImageHeader(cvSize(col, row), IPL_DEPTH_32F, 2); integralImage->imageData = (char *)floatOutPtr; for (i=0; i< row * col * 2; i+=2) { tempFloat += 1.0; floatDataPtr[i] = tempFloat; floatDataPtr[i+1] = 0.0; } Time_reset(&sTime); Time_delta(&sTime,&time); cvDFT(dataImage,integralImage,CV_DXT_FORWARD,0); Time_delta(&sTime,&time); /* Print the output data for DFT */ flPtr = (float *)integralImage->imageData; printf(" The DFT output is:\n"); for (i=0;i<integralImage->height;i++){ key = 0; for (j=0;j<integralImage->width * 2;j+=2){ key++; printf("%f + i%f\t", flPtr[(i*integralImage->width * 2)+j], flPtr[(i*integralImage->width * 2) + j + 1]); if ((key % 5) == 0) printf("\n"); } printf("\n"); } printf("cvDFT Total execution time = %dus\n",(unsigned int)time); cvFree(&floatDataPtr); cvFree(&floatOutPtr); } break; default: printf("Input argument Error:\n Usage :\n./remote_ti_platforms_evm3530_opencv.xv5T d d \n./remote_ti_platforms_evm3530_opencv.xv5T d a\n"); } // end of latest switch(*argv[3]) break; default: return -1; } DSP_cvEndDSP(); return 0;
int main( int argc, char** argv ) { int wcam = -1; CvCapture* capture = 0; IplImage *frame = 0; IplImage *frame_resized = 0; IplImage *frame_resized2 = 0; CvSize min_window_size, max_window_size; min_window_size.width = 40; min_window_size.height = 40; max_window_size.width = 0; max_window_size.height = 0; cvNamedWindow(win_face, 1); // Carico il file con le informazioni su cosa trovare cascade = (CvHaarClassifierCascade*)cvLoad(file_xml, 0, 0, 0); // Alloco la memoria per elaborare i dati storage = cvCreateMemStorage(0); /*if(!(capture = cvCaptureFromCAM(wcam))) { printf("Impossibile aprire la webcam.\n"); return -1; }*/ CvSize window_size = cvSize(640, 480); frame = cvLoadImage("/Users/Gabriele/Desktop/jobs.jpeg"); frame_resized = cvCreateImage(window_size, frame->depth, 3); frame_resized2 = cvCreateImage(window_size, frame->depth, 3); cvResize(frame, frame_resized); CLODEnvironmentData* data = clodInitEnvironment(0); clifInitBuffers(data->clif, frame_resized->width, frame_resized->height, frame_resized->widthStep, 3); clodInitBuffers(data, &window_size); ElapseTime t;; /* Test grayscale */ IplImage* grayscale = cvCreateImage(cvSize(frame_resized->width, frame_resized->height), IPL_DEPTH_8U, 1); cvCvtColor(frame_resized, grayscale, CV_BGR2GRAY); CvMat* sim = cvCreateMat(frame_resized->height + 1, frame_resized->width + 1, CV_32SC1); CvMat* sqim = cvCreateMat(frame_resized->height + 1, frame_resized->width + 1, CV_64FC1); cvIntegral(grayscale, sim, sqim); double temp = sqim->data.db[2000]; CLIFIntegralResult r = clifIntegral(frame_resized, data->clif, CL_TRUE); cl_ulong temp2 = ((unsigned long*)r.square_image->data.db)[2000]; cvCopyImage(frame_resized, frame_resized2); t.start(); find_faces_rect_opencv(frame_resized2, min_window_size, max_window_size); printf("OpenCV: %8.4f ms\n", t.get()); cvShowImage("Sample OpenCV", frame_resized2); cvCopyImage(frame_resized, frame_resized2); t.start(); find_faces_rect_opencl(frame_resized2, data, min_window_size, max_window_size, CLOD_PER_STAGE_ITERATIONS | CLOD_PRECOMPUTE_FEATURES, CL_FALSE); printf("OpenCL (optimized): %8.4f ms\n", t.get()); cvShowImage("Sample OpenCL (optimized)", frame_resized2); cvCopyImage(frame_resized, frame_resized2); t.start(); find_faces_rect_opencl(frame_resized2, data, min_window_size, max_window_size, CLOD_PRECOMPUTE_FEATURES, CL_TRUE); printf(" %8.4f ms (block)\n", t.get()); cvShowImage("Sample OpenCL (optimized, block)", frame_resized2); cvCopyImage(frame_resized, frame_resized2); t.start(); find_faces_rect_opencl(frame_resized2, data, min_window_size, max_window_size, CLOD_PRECOMPUTE_FEATURES | CLOD_PER_STAGE_ITERATIONS, CL_FALSE); printf("OpenCL (per-stage): %8.4f ms\n", t.get()); cvShowImage("Sample OpenCL (per-stage)", frame_resized2); cvCopyImage(frame_resized, frame_resized2); t.start(); find_faces_rect_opencl(frame_resized2, data, min_window_size, max_window_size, CLOD_PRECOMPUTE_FEATURES | CLOD_PER_STAGE_ITERATIONS, CL_TRUE); printf(" %8.4f ms (block)\n", t.get()); cvShowImage("Sample OpenCL (per-stage, block)", frame_resized2); //frame_resized->imageData = //printf("OpenCL (per-stage, optimized): %8.4f ms\n", t.get()); //cvShowImage("Sample OpenCL (per-stage, optimized)", frame2); //detect_faces(frame, &data); /* while(1) { // Cerco le facce char fps[1024] = { 0 }; sprintf(fps, "FPS: %4.2f", (1000.0) / (double)(end - start)); CvFont font; cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 1.0f, 1.0f); CvPoint point; point.x = 10; point.y = frame->height - 10; CvScalar scalar = { 255,255,255,1 }; cvPutText(frame, fps, point, &font, scalar); cvShowImage(win_face, frame); frame = cvQueryFrame(capture); if( (cvWaitKey(10) & 255) == 27 ) break; } */ clodReleaseBuffers(data); clodReleaseEnvironment(data); free(data); cvReleaseImage(&frame); cvReleaseImage(&frame_resized); cvReleaseCapture(&capture); cvDestroyWindow(win_face); return 0; }