/*! Transform the grid with given homograhy and average colors over * triangles. */ void LightCollector::averageImage(IplImage *im, CvMat *_homography) { if (avgChannels != im->nChannels) { if (avgChannels < im->nChannels) { delete[] avg; avg = 0; } avgChannels = im->nChannels; } if (!avg) avg = new float[avgChannels*nbTri]; // apply the homography to every mesh vertex if (_homography) cvMatMul(_homography, vertices, transformed); else cvCopy(vertices, transformed); CvMat r1,r2,r3; cvGetRow(transformed, &r1, 0); cvGetRow(transformed, &r2, 1); cvGetRow(transformed, &r3, 2); cvDiv(&r1,&r3,&r1); cvDiv(&r2,&r3,&r2); nbPix=0; for (int t=0; t<nbTri;t++) { int pts[3][2]; for (int i=0; i<3; i++) { assert(triangles[t*3+i] < transformed->cols); pts[i][0] = cvRound(CV_MAT_ELEM(*transformed, float, 0, triangles[t*3+i])); pts[i][1] = cvRound(CV_MAT_ELEM(*transformed, float, 1, triangles[t*3+i])); } nbPix+=stat_triangle(im, pts, avg+t*avgChannels); } }
CvDTree* mushroom_read_dtree(CvDTree* dtree, const CvMat* data, const CvMat* missing, const CvMat* responses, float p_weight ) { int i, hr1 = 0, hr2 = 0, p_total = 0; float priors[] = { 1, p_weight }; dtree->clear(); dtree->load("tree1.xml"); // compute hit-rate on the training database, demonstrates predict usage. for( i = 0; i < data->rows; i++ ) { CvMat sample, mask; cvGetRow( data, &sample, i ); cvGetRow( missing, &mask, i ); double r = dtree->predict( &sample, &mask )->value; int d = fabs(r - responses->data.fl[i]) >= FLT_EPSILON; if( d ) { if( r != 'p' ) hr1++; else hr2++; } p_total += responses->data.fl[i] == 'p'; } printf( "Results on the training database:\n" "\tPoisonous mushrooms mis-predicted: %d (%g%%)\n" "\tFalse-alarms: %d (%g%%)\n", hr1, (double)hr1*100/p_total, hr2, (double)hr2*100/(data->rows - p_total) ); // cvReleaseMat( &var_type ); return dtree; }
/// <summary> /// Reads the sample images and associated charaters into trainClasses and trainData respectively. /// </summary> /// <returns> Nothing. </returns> void OCR::getData() { IplImage* src_image; IplImage prs_image; CvMat row,data; char file[255]; char dataFile[255]; std::ifstream labelStream; std::ostringstream outStringStream; char ch; int i,j; for(i = 0; i < classes; i++) { //26 //Read the corresponding character for current sample being processed into ch. sprintf(dataFile,"%s%d/data.txt",file_path, i); labelStream.open(dataFile); labelStream >> ch; labelStream.close(); for( j = 0; j< train_samples; j++) { //3 //Load file //get the path of image for training into file. if(j<10) sprintf(file,"%s%d/%d0%d.pbm",file_path, i, i, j); else sprintf(file,"%s%d/%d%d.pbm",file_path, i, i, j); src_image = cvLoadImage(file,0); if(!src_image) { printf("Error: Cant load image %s\n", file); //exit(-1); } //process file prs_image = preprocessing(src_image, size, size); //Set class label cvGetRow(trainClasses, &row, i*train_samples + j); cvSet(&row, cvRealScalar(ch)); //Set data cvGetRow(trainData, &row, i*train_samples + j); IplImage* img = cvCreateImage( cvSize( size, size ), IPL_DEPTH_32F, 1 ); //convert 8 bits image to 32 float image cvConvertScale(&prs_image, img, 0.0039215, 0); cvGetSubRect(img, &data, cvRect(0,0, size,size)); CvMat row_header, *row1; //convert data matrix sizexsize to vecor row1 = cvReshape( &data, &row_header, 0, 1 ); cvCopy(row1, &row, NULL); } } }
CvDTree* mushroom_create_dtree( const CvMat* data, const CvMat* missing, const CvMat* responses, float p_weight ) { CvDTree* dtree; CvMat* var_type; int i, hr1 = 0, hr2 = 0, p_total = 0; float priors[] = { 1, p_weight }; var_type = cvCreateMat( data->cols + 1, 1, CV_8U ); cvSet( var_type, cvScalarAll(CV_VAR_CATEGORICAL) ); // all the variables are categorical dtree = new CvDTree; dtree->train( data, CV_ROW_SAMPLE, responses, 0, 0, var_type, missing, CvDTreeParams( 8, // max depth 10, // min sample count 0, // regression accuracy: N/A here true, // compute surrogate split, as we have missing data 15, // max number of categories (use sub-optimal algorithm for larger numbers) 10, // the number of cross-validation folds true, // use 1SE rule => smaller tree true, // throw away the pruned tree branches priors // the array of priors, the bigger p_weight, the more attention // to the poisonous mushrooms // (a mushroom will be judjed to be poisonous with bigger chance) )); // compute hit-rate on the training database, demonstrates predict usage. for( i = 0; i < data->rows; i++ ) { CvMat sample, mask; cvGetRow( data, &sample, i ); cvGetRow( missing, &mask, i ); double r = dtree->predict( &sample, &mask )->value; int d = fabs(r - responses->data.fl[i]) >= FLT_EPSILON; if( d ) { if( r != 'p' ) hr1++; else hr2++; } p_total += responses->data.fl[i] == 'p'; } printf( "Results on the training database:\n" "\tPoisonous mushrooms mis-predicted: %d (%g%%)\n" "\tFalse-alarms: %d (%g%%)\n", hr1, (double)hr1*100/p_total, hr2, (double)hr2*100/(data->rows - p_total) ); cvReleaseMat( &var_type ); return dtree; }
float svm_calc_error( CvSVM* svm, CvMLData* _data, int type, vector<float> *resp ) { svm_check_data(_data); float err = 0; const CvMat* values = _data->get_values(); const CvMat* response = _data->get_responses(); const CvMat* sample_idx = (type == CV_TEST_ERROR) ? _data->get_test_sample_idx() : _data->get_train_sample_idx(); const CvMat* var_types = _data->get_var_types(); int* sidx = sample_idx ? sample_idx->data.i : 0; int r_step = CV_IS_MAT_CONT(response->type) ? 1 : response->step / CV_ELEM_SIZE(response->type); bool is_classifier = var_types->data.ptr[var_types->cols-1] == CV_VAR_CATEGORICAL; int sample_count = sample_idx ? sample_idx->cols : 0; sample_count = (type == CV_TRAIN_ERROR && sample_count == 0) ? values->rows : sample_count; float* pred_resp = 0; if( resp && (sample_count > 0) ) { resp->resize( sample_count ); pred_resp = &((*resp)[0]); } if ( is_classifier ) { for( int i = 0; i < sample_count; i++ ) { CvMat sample; int si = sidx ? sidx[i] : i; cvGetRow( values, &sample, si ); float r = svm->predict( &sample ); if( pred_resp ) pred_resp[i] = r; int d = fabs((double)r - response->data.fl[si*r_step]) <= FLT_EPSILON ? 0 : 1; err += d; } err = sample_count ? err / (float)sample_count * 100 : -FLT_MAX; } else { for( int i = 0; i < sample_count; i++ ) { CvMat sample; int si = sidx ? sidx[i] : i; cvGetRow( values, &sample, si ); float r = svm->predict( &sample ); if( pred_resp ) pred_resp[i] = r; float d = r - response->data.fl[si*r_step]; err += d*d; } err = sample_count ? err / (float)sample_count : -FLT_MAX; } return err; }
float knearest_calc_error( CvKNearest* knearest, CvMLData* _data, int k, int type, vector<float> *resp ) { float err = 0; const CvMat* response = _data->get_responses(); const CvMat* sample_idx = (type == CV_TEST_ERROR) ? _data->get_test_sample_idx() : _data->get_train_sample_idx(); int* sidx = sample_idx ? sample_idx->data.i : 0; int r_step = CV_IS_MAT_CONT(response->type) ? 1 : response->step / CV_ELEM_SIZE(response->type); bool is_regression = _data->get_var_type( _data->get_response_idx() ) == CV_VAR_ORDERED; CvMat predictors; knearest_check_data_and_get_predictors( _data, &predictors ); int sample_count = sample_idx ? sample_idx->cols : 0; sample_count = (type == CV_TRAIN_ERROR && sample_count == 0) ? predictors.rows : sample_count; float* pred_resp = 0; if( resp && (sample_count > 0) ) { resp->resize( sample_count ); pred_resp = &((*resp)[0]); } if ( !is_regression ) { for( int i = 0; i < sample_count; i++ ) { CvMat sample; int si = sidx ? sidx[i] : i; cvGetRow( &predictors, &sample, si ); float r = knearest->find_nearest( &sample, k ); if( pred_resp ) pred_resp[i] = r; int d = fabs((double)r - response->data.fl[si*r_step]) <= FLT_EPSILON ? 0 : 1; err += d; } err = sample_count ? err / (float)sample_count * 100 : -FLT_MAX; } else { for( int i = 0; i < sample_count; i++ ) { CvMat sample; int si = sidx ? sidx[i] : i; cvGetRow( &predictors, &sample, si ); float r = knearest->find_nearest( &sample, k ); if( pred_resp ) pred_resp[i] = r; float d = r - response->data.fl[si*r_step]; err += d*d; } err = sample_count ? err / (float)sample_count : -FLT_MAX; } return err; }
bool LightCollector::genGrid(float corners[4][2], int nx, int ny) { if (nx<1 || ny<1) return false; if (avg) delete[] avg; avg=0; if (vertices) cvReleaseMat(&vertices); if (transformed) cvReleaseMat(&transformed); // generate vertices vertices = cvCreateMat(3, (nx+1)*(ny+1), CV_32FC1); transformed = cvCreateMat(3, vertices->cols, CV_32FC1); for (int y=0; y<(ny+1); ++y) for (int x=0; x<(nx+1); ++x) { CV_MAT_ELEM(*vertices, float, 0, y*(nx+1)+x) = float(x)/float(nx); CV_MAT_ELEM(*vertices, float, 1, y*(nx+1)+x) = float(y)/float(ny); CV_MAT_ELEM(*vertices, float, 2, y*(nx+1)+x) = 1; } // generate triangles nbTri = nx*ny*2; triangles = new int[nbTri*3]; int *tri = triangles; for (int y=0; y<ny; ++y) for (int x=0; x<nx; ++x) { tri[0] = y*(nx+1)+x; tri[1] = y*(nx+1)+x+1; tri[2] = (y+1)*(nx+1)+x; tri+=3; tri[0] = y*(nx+1)+x+1; tri[1] = (y+1)*(nx+1)+x+1; tri[2] = (y+1)*(nx+1)+x; tri+=3; } homography H; if (!H.estimate(0, 0, corners[0][0], corners[0][1], 1, 0, corners[1][0], corners[1][1], 1, 1, corners[2][0], corners[2][1], 0, 1, corners[3][0], corners[3][1])) return false; cvMatMul(&H, vertices, transformed); CvMat r1,r2,r3, d1, d2; cvGetRow(transformed, &r1, 0); cvGetRow(transformed, &r2, 1); cvGetRow(transformed, &r3, 2); cvGetRow(vertices, &d1, 0); cvGetRow(vertices, &d2, 1); cvDiv(&r1,&r3,&d1); cvDiv(&r2,&r3,&d2); return true; }
void FacePredict::CalcNewTextureParams(CvMat* curParam, CvMat* newParam, int curAgeG, int newAgeG) { CvMat* diff = cvCreateMat(1, __nTextureModes, CV_64FC1); CvMat* curClassicP = cvCreateMat(1, __nTextureModes, CV_64FC1); CvMat* newClassicP = cvCreateMat(1, __nTextureModes, CV_64FC1); cvGetRow(__TextureParamGroups, curClassicP, curAgeG); cvGetRow(__TextureParamGroups, newClassicP, newAgeG); cvSub(newClassicP, curClassicP, diff); cvAdd(curParam, diff, newParam); cvReleaseMat(&diff); cvReleaseMat(&curClassicP); cvReleaseMat(&newClassicP); }
void basicOCR::getData() { IplImage* src_image; IplImage prs_image; CvMat row,data; char file[255]; int i,j; //for(i =0; i<classes; i++) for (i = 32; i < 32 + classes; i++) { for ( j = 0; j < train_samples; j++) { //加载pbm格式图像,作为训练 /*if(j < 10) sprintf(file,"%s%d/%d0%d.pbm",file_path, i - 48, i - 48 , j); else sprintf(file,"%s%d/%d%d.pbm",file_path, i - 48, i - 48 , j);*/ if (i >= 48 && i <= 57) sprintf(file,"%s%d/%d.pbm",file_path, i, j); else sprintf(file,"%s%d/%d.bmp",file_path, i, j); src_image = cvLoadImage(file,0); if(!src_image) { //printf("Error: Cant load image %s\n", file); continue; //exit(-1); } //process file prs_image = preprocessing(src_image, size, size); //Set class label cvGetRow(trainClasses, &row, (i - 32)*train_samples + j); cvSet(&row, cvRealScalar(i)); //Set data cvGetRow(trainData, &row, (i - 32)*train_samples + j); IplImage* img = cvCreateImage( cvSize( size, size ), IPL_DEPTH_32F, 1 ); //convert 8 bits image to 32 float image cvConvertScale(&prs_image, img, 0.0039215, 0); cvGetSubRect(img, &data, cvRect(0,0, size,size)); CvMat row_header, *row1; //convert data matrix sizexsize to vecor row1 = cvReshape( &data, &row_header, 0, 1 ); cvCopy(row1, &row, NULL); } } }
//============================================================================ void AAM_Basic::CalcCVectors(const std::vector<AAM_Shape>& AllShapes, const std::vector<IplImage*>& AllImages, CvMat* CParams) { int npixels = __cam.__texture.nPixels(); int npointsby2 = __cam.__shape.nPoints()*2; int nfeatures = __cam.nParameters(); CvMat* a = cvCreateMat(1, nfeatures, CV_64FC1);//appearance vector CvMat* s = cvCreateMat(1, npointsby2, CV_64FC1);//shape vector CvMat* t = cvCreateMat(1, npixels, CV_64FC1);//texture vector for(int i = 0; i < AllShapes.size(); i++) { //calculate current shape and texture vector AllShapes[i].Point2Mat(s); __cam.__paw.FasterGetWarpTextureFromMatShape(s, AllImages[i], t, true); __cam.__texture.AlignTextureToRef(__cam.__MeanG, t); //convert shape and texture vector to appearance vector __cam.ShapeTexture2Combined(s, t, a); //calculate appearance parameters by project to appearance spaces CvMat c; cvGetRow(CParams, &c, i); cvProjectPCA(a, __cam.__MeanAppearance, __cam.__AppearanceEigenVectors, &c); } cvReleaseMat(&s); cvReleaseMat(&t); cvReleaseMat(&a); }
int RandomTrees::test(const char* sample_filename, const char* model_filename, double &test_error) { CvMat* data = 0; CvMat* responses = 0; int ok = read_num_class_data( sample_filename, this->number_of_features_, &data, &responses ); int nsamples_all = 0; int i = 0; if( !ok ) { printf( "Could not read the sample file %s\n", sample_filename ); return -1; } printf( "The sample file %s is loaded.\n", sample_filename ); nsamples_all = data->rows; // compute prediction error on train and test data for( i = 0; i < nsamples_all; i++ ) { CvMat sample; cvGetRow( data, &sample, i ); this->loadModel(model_filename); this->classify(&sample); } cvReleaseMat( &data ); cvReleaseMat( &responses ); return 0; }
/// <summary> /// Finds min and max Y of the data present in given image. /// </summary> /// <params name="imsSrc"> /// Source image for which min and max Y has to be found. /// </params> /// <params name="min"> /// Int pointer where the min Y has to saved. /// </params> /// <params name="max"> /// Int pointer where the max Y has to saved. /// </params> /// <returns> Nothing. </returns> void OCR::findY(IplImage* imgSrc,int* min, int* max) { int i; int minFound=0; CvMat data; CvScalar maxVal=cvRealScalar(imgSrc->width * 255); CvScalar val=cvRealScalar(0); //For each col sum, if sum < width*255 then we find the min //then continue to end to search the max, if sum< width*255 then is new max for (i=0; i< imgSrc->height; i++) { val = cvRealScalar(0); cvGetRow(imgSrc, &data, i); val= cvSum(&data); if(val.val[0] < maxVal.val[0]) { *max=i; if(!minFound) { *min= i; minFound= 1; } } } }
float nbayes_calc_error( CvNormalBayesClassifier* nbayes, CvMLData* _data, int type, vector<float> *resp ) { float err = 0; nbayes_check_data( _data ); const CvMat* values = _data->get_values(); const CvMat* response = _data->get_responses(); const CvMat* sample_idx = (type == CV_TEST_ERROR) ? _data->get_test_sample_idx() : _data->get_train_sample_idx(); int* sidx = sample_idx ? sample_idx->data.i : 0; int r_step = CV_IS_MAT_CONT(response->type) ? 1 : response->step / CV_ELEM_SIZE(response->type); int sample_count = sample_idx ? sample_idx->cols : 0; sample_count = (type == CV_TRAIN_ERROR && sample_count == 0) ? values->rows : sample_count; float* pred_resp = 0; if( resp && (sample_count > 0) ) { resp->resize( sample_count ); pred_resp = &((*resp)[0]); } for( int i = 0; i < sample_count; i++ ) { CvMat sample; int si = sidx ? sidx[i] : i; cvGetRow( values, &sample, si ); float r = (float)nbayes->predict( &sample, 0 ); if( pred_resp ) pred_resp[i] = r; int d = fabs((double)r - response->data.fl[si*r_step]) <= FLT_EPSILON ? 0 : 1; err += d; } err = sample_count ? err / (float)sample_count * 100 : -FLT_MAX; return err; }
int calculateWidthInPixels(CvMat* P, float Y){ float W = 0.10; //width of road 20cm ~ 0.2m float w = 0.0; //width of the roads in pixels CvMat tmp; //create P_1 (row 1 of matrix P) CvMat *P_1 = cvCreateMat(1,4,CV_32FC1); cvGetRow(P,&tmp,0); //row 0 cvCopy(&tmp,P_1,NULL); CvMat *P_3 = cvCreateMat(1,4,CV_32FC1); cvGetRow(P,&tmp,2); //row 2 cvCopy(&tmp,P_3,NULL); CvMat* X_1 = cvCreateMat(4,1,CV_32FC1); CvMat* X_2 = cvCreateMat(4,1,CV_32FC1); CvMat* P_1_times_X_1 = cvCreateMat(1,1,CV_32FC1); CvMat* P_3_times_X_1 = cvCreateMat(1,1,CV_32FC1); CvMat* P_1_times_X_2 = cvCreateMat(1,1,CV_32FC1); CvMat* P_3_times_X_2 = cvCreateMat(1,1,CV_32FC1); cvmSet(X_1,0,0,W); cvmSet(X_1,1,0,Y); cvmSet(X_1,2,0,0.0); cvmSet(X_1,3,0,1.0); cvmSet(X_2,0,0,0); cvmSet(X_2,1,0,Y); cvmSet(X_2,2,0,0); cvmSet(X_2,3,0,1); cvMatMul(P_1,X_1,P_1_times_X_1); cvMatMul(P_3,X_1,P_3_times_X_1); cvMatMul(P_1,X_2,P_1_times_X_2); cvMatMul(P_3,X_2,P_3_times_X_2); w = ((cvmGet(P_1_times_X_1,0,0) / cvmGet(P_3_times_X_1,0,0) ) - (cvmGet(P_1_times_X_2,0,0) / cvmGet(P_3_times_X_2,0,0) )); return int(w+0.5); }
LabelMap AdaBoost::classify(CvMat* data) { if( !is_modelfile_loaded_ ) { printf("no model file is loaded"); exit(0); } LabelMap classification_result; LabelMap::iterator iter; CvMat* responses = 0; CvMat* var_type = 0; CvMat* temp_sample = 0; CvMat* weak_responses = 0; int var_count=0; int j=0, k=0; var_count = data->cols; temp_sample = cvCreateMat( 1, var_count + 1, CV_32F ); weak_responses = cvCreateMat( 1, this->classifier_.get_weak_predictors()->total, CV_32F ); int best_class = 0; double max_sum = -DBL_MAX; CvMat sample; cvGetRow( data, &sample, 0 ); for( k = 0; k < var_count; k++ ) temp_sample->data.fl[k] = (float)sample.data.db[k]; for( j = 0; j < this->number_of_classes_; j++ ) { temp_sample->data.fl[var_count] = (float)j; this->classifier_.predict( temp_sample, 0,weak_responses ); double sum = cvSum( weak_responses ).val[0]; classification_result[((char)(j + FIRST_LABEL))] = sum; if( max_sum < sum ) { max_sum = sum; best_class = j + FIRST_LABEL; } } cvReleaseMat( &temp_sample ); cvReleaseMat( &weak_responses ); cvReleaseMat( &var_type ); cvReleaseMat( &data ); cvReleaseMat( &responses ); return classification_result; }
//============================================================================ void AAM_IC::CalcModifiedSD(CvMat* SD, const CvMat* dTx, const CvMat* dTy, const CvMat* Jx, const CvMat* Jy) { int i, j; //create steepest descent images double* _x = dTx->data.db; double* _y = dTy->data.db; double temp; for(i = 0; i < __shape.nModes()+4; i++) { for(j = 0; j < __paw.nPix(); j++) { temp = _x[3*j ]*cvmGet(Jx,j,i) +_y[3*j ]*cvmGet(Jy,j,i); cvmSet(SD,i,3*j,temp); temp = _x[3*j+1]*cvmGet(Jx,j,i) +_y[3*j+1]*cvmGet(Jy,j,i); cvmSet(SD,i,3*j+1,temp); temp = _x[3*j+2]*cvmGet(Jx,j,i) +_y[3*j+2]*cvmGet(Jy,j,i); cvmSet(SD,i,3*j+2,temp); } } //project out appearance variation (and linear lighting parameters) const CvMat* B = __texture.GetBases(); CvMat* V = cvCreateMat(4+__shape.nModes(), __texture.nModes(), CV_64FC1); CvMat SDMat, BMat; cvGEMM(SD, B, 1., NULL, 1., V, CV_GEMM_B_T); // Equation (63),(64) for(i = 0; i < __shape.nModes()+4; i++) { for(j = 0; j < __texture.nModes(); j++) { cvGetRow(SD, &SDMat, i); cvGetRow(B, &BMat, j); cvScaleAdd(&BMat, cvScalar(-cvmGet(V,i,j)), &SDMat, &SDMat); } } cvReleaseMat(&V); }
/*! \fn CvFaceSVMClassifier::Training_error(CvMat * train_data, CvMat * labels) const */ CvScalar CvFaceSVMClassifier::Training_error(CvMat * train_data, CvMat * labels) const { CvSize size = cvGetSize(labels); int nsamples = size.width * size.height; int numpositive = 0, numnegative = 0; for(int i = 0; i < nsamples; i++) { double value = cvGetReal1D( labels, i ); if ( value == 1.0 ) numpositive++; else if( value == 2.0 ) numnegative++; } assert((numpositive+numnegative) == nsamples ); size = cvGetSize( train_data ); int nelements = size.height; CvMat * sample = cvCreateMat(1, nelements, CV_32FC1); int numerror = 0, numtruepositive = 0, numfalsepositive = 0; for(int i = 0; i < nsamples; i++) { cvGetRow( train_data, sample, i ); double pre_label = Predict( sample ); double label = cvGetReal1D( labels, i ); if((pre_label == 1.0)&&(label == 1.0)) { numtruepositive++; } if(pre_label != label) { if((pre_label == 1.0)&&(label == 2.0)) { numfalsepositive++; } //printf("%d ", i); numerror++; } } printf("\n\n"); double error = (double)numerror/(double)nsamples; double tp_rate = (double)numtruepositive/(double)numpositive; double fp_rate = (double)numfalsepositive/(double)numnegative; cvReleaseMat(&sample); CvScalar scalar = cvScalar(error, tp_rate, fp_rate, 0); return scalar; }
/*! \fn CvFaceSVMClassifier::Training_error(CvGaborResponseData & gabordata, CvGaborFeaturePool & new_features) const */ CvScalar CvFaceSVMClassifier::Training_error(CvGaborResponseData & gabordata, CvGaborFeaturePool & new_features) const { CvMat * train_data = GetDataFromFeatures( gabordata, new_features ); CvMat * labels = GetLabelsFromFeatures( gabordata, new_features ); CvSize size = cvGetSize(labels); int nsamples = size.width * size.height; int numpositive = 0, numnegative = 0; for(int i = 0; i < nsamples; i++) { double value = cvGetReal1D( labels, i ); if ( value == 1.0 ) numpositive++; else if( value == 2.0 ) numnegative++; } assert((numpositive+numnegative) == nsamples ); int nelements = new_features.getSize(); CvMat * sample = cvCreateMat(1, nelements, CV_32FC1); int numerror = 0, numtruepositive = 0, numfalsepositive = 0; for(int i = 0; i < nsamples; i++) { cvGetRow( train_data, sample, i ); double pre_label = Predict( sample ); double label = cvGetReal1D( labels, i ); if((pre_label == 1.0)&&(label == 1.0)) { numtruepositive++; } if(pre_label != label) { if((pre_label == 1.0)&&(label == 2.0)) numfalsepositive++; numerror++; } } double error = (double)numerror/(double)nsamples; double tp_rate = (double)numtruepositive/(double)numpositive; double fp_rate = (double)numfalsepositive/(double)numnegative; cvReleaseMat(&sample); cvReleaseMat(&train_data); cvReleaseMat(&labels); CvScalar scalar = cvScalar(error, tp_rate, fp_rate, 0); return scalar; }
double compute_reprojection_error( const CvMat* object_points, const CvMat* rot_vects, const CvMat* trans_vects, const CvMat* camera_matrix, const CvMat* dist_coeffs, const CvMat* image_points, const CvMat* point_counts, CvMat* per_view_errors ) { CvMat* image_points2 = cvCreateMat( image_points->rows, image_points->cols, image_points->type ); int i, image_count = rot_vects->rows, points_so_far = 0; double total_err = 0, err; for( i = 0; i < image_count; i++ ) { CvMat object_points_i, image_points_i, image_points2_i; int point_count = point_counts->data.i[i]; CvMat rot_vect, trans_vect; cvGetCols( object_points, &object_points_i, points_so_far, points_so_far + point_count ); cvGetCols( image_points, &image_points_i, points_so_far, points_so_far + point_count ); cvGetCols( image_points2, &image_points2_i, points_so_far, points_so_far + point_count ); points_so_far += point_count; cvGetRow( rot_vects, &rot_vect, i ); cvGetRow( trans_vects, &trans_vect, i ); cvProjectPoints2( &object_points_i, &rot_vect, &trans_vect, camera_matrix, dist_coeffs, &image_points2_i, 0, 0, 0, 0, 0 ); err = cvNorm( &image_points_i, &image_points2_i, CV_L1 ); if( per_view_errors ) per_view_errors->data.db[i] = err/point_count; total_err += err; } cvReleaseMat( &image_points2 ); return total_err/points_so_far; }
void LightCollector::drawGrid(IplImage *im, CvMat *_homography) { // apply the homography to every mesh vertex cvMatMul(_homography, vertices, transformed); CvMat r1,r2,r3; cvGetRow(transformed, &r1, 0); cvGetRow(transformed, &r2, 1); cvGetRow(transformed, &r3, 2); cvDiv(&r1,&r3,&r1); cvDiv(&r2,&r3,&r2); for (int t=0; t<nbTri;t++) { int pts[3][2]; for (int i=0; i<3; i++) { pts[i][0] = cvRound(CV_MAT_ELEM(*transformed, float, 0, triangles[t*3+i])); pts[i][1] = cvRound(CV_MAT_ELEM(*transformed, float, 1, triangles[t*3+i])); } cvLine(im, cvPoint(pts[0][0], pts[0][1]), cvPoint(pts[1][0], pts[1][1]), cvScalarAll(255), 1,4,0); cvLine(im, cvPoint(pts[1][0], pts[1][1]), cvPoint(pts[2][0], pts[2][1]), cvScalarAll(255), 1,4,0); cvLine(im, cvPoint(pts[2][0], pts[2][1]), cvPoint(pts[0][0], pts[0][1]), cvScalarAll(255), 1,4,0); } }
virtual void operator()(const cv::BlockedRange& range) const { int begin = range.begin(); int end = range.end(); CvMat x; CvMat miss; for (int i=begin; i<end; ++i) { int j = idx ? idx->data.i[i] : i; cvGetRow(samples, &x, j); if (!missing) { predictions[i] = gbt->predict_serial(&x,0,0,slice); } else { cvGetRow(missing, &miss, j); predictions[i] = gbt->predict_serial(&x,&miss,0,slice); } } } // Sample_predictor::operator()
void CvRTDriver::CvRTClassifyDriver(string &file_name) { cv::Mat data, responses, missing; readData(file_name, data, responses, missing); CvMat *data_m = cvCreateMat(data.rows, data.cols, data.type()); CvMat tmp = data; cvCopy(&tmp, data_m); CvMat *missing_m = cvCreateMat(missing.rows, missing.cols, missing.type()); tmp = missing; cvCopy(&tmp, missing_m); CvMat *responses_m = cvCreateMat(responses.rows, responses.cols, responses.type()); tmp = responses; cvCopy(&tmp, responses_m); // compute hit-rate on the training database, demonstrates predict usage. int success = 0; int fail = 0; for (int i = 0; i < data_m->rows; i++) { CvMat sample, mask; cvGetRow(data_m, &sample, i); cvGetRow(missing_m, &mask, i); float r = rtree->predict(&sample, &mask); #ifdef CLASSIFY if (r == responses_m->data.i[i]) success++; else fail++; #else if (r == responses_m->data.fl[i]) success++; else fail++; #endif // CLASSIFY } cout << "Among " << success + fail << " cases, there are " << success << " success and " << fail << " fail" << endl; cout << "The rate is " << ((double)success) / (success + fail) << endl; }
float ann_calc_error( CvANN_MLP* ann, CvMLData* _data, map<int, int>& cls_map, int type , vector<float> *resp_labels ) { float err = 0; const CvMat* responses = _data->get_responses(); const CvMat* sample_idx = (type == CV_TEST_ERROR) ? _data->get_test_sample_idx() : _data->get_train_sample_idx(); int* sidx = sample_idx ? sample_idx->data.i : 0; int r_step = CV_IS_MAT_CONT(responses->type) ? 1 : responses->step / CV_ELEM_SIZE(responses->type); CvMat predictors; ann_check_data_and_get_predictors( _data, &predictors ); int sample_count = sample_idx ? sample_idx->cols : 0; sample_count = (type == CV_TRAIN_ERROR && sample_count == 0) ? predictors.rows : sample_count; float* pred_resp = 0; vector<float> innresp; if( sample_count > 0 ) { if( resp_labels ) { resp_labels->resize( sample_count ); pred_resp = &((*resp_labels)[0]); } else { innresp.resize( sample_count ); pred_resp = &(innresp[0]); } } int cls_count = (int)cls_map.size(); Mat output( 1, cls_count, CV_32FC1 ); CvMat _output = CvMat(output); map<int, int>::iterator b_it = cls_map.begin(); for( int i = 0; i < sample_count; i++ ) { CvMat sample; int si = sidx ? sidx[i] : i; cvGetRow( &predictors, &sample, si ); ann->predict( &sample, &_output ); CvPoint best_cls = {0,0}; cvMinMaxLoc( &_output, 0, 0, 0, &best_cls, 0 ); int r = cvRound(responses->data.fl[si*r_step]); CV_DbgAssert( fabs(responses->data.fl[si*r_step]-r) < FLT_EPSILON ); r = cls_map[r]; int d = best_cls.x == r ? 0 : 1; err += d; pred_resp[i] = (float)best_cls.x; } err = sample_count ? err / (float)sample_count * 100 : -FLT_MAX; return err; }
void FacePredict::CalcMeanTextureParams(const CvMat* GroupTextures, int group) { int nSamples = GroupTextures->rows; CvMat mParams; cvGetRow(__TextureParamGroups, &mParams, group); //resize the mParams CvMat* lamda = cvCreateMat(1, __nTextureModes, CV_64FC1); CvMat* oneTexture = cvCreateMat(1, GroupTextures->cols, CV_64FC1); for (int i = 0; i < nSamples; i++) { cvGetRow(GroupTextures, oneTexture, i); __texture.CalcParams(oneTexture, lamda); cvAdd(&mParams, lamda, &mParams); } CvMat * size = cvCreateMat(1, __nTextureModes, CV_64FC1); for (int i = 0; i < __nTextureModes; i++) cvmSet(size, 0, i, nSamples); cvDiv(&mParams, size, &mParams); cvReleaseMat(&lamda); cvReleaseMat(&size); cvReleaseMat(&oneTexture); }
/* Calculates a planar homography from point correspondeces using the direct linear transform. Intended for use as a ransac_xform_fn. @param pts array of points @param mpts array of corresponding points; each pts[i], i=0..n-1, corresponds to mpts[i] @param n number of points in both pts and mpts; must be at least 4 @return Returns the 3x3 planar homography matrix that transforms points in pts to their corresponding points in mpts or NULL if fewer than 4 correspondences were provided */ CvMat* dlt_homog( CvPoint2D64f* pts, CvPoint2D64f* mpts, int n ) { CvMat* H, * A, * VT, * D, h, v9; double _h[9]; int i; if( n < 4 ) return NULL; /* set up matrices so we can unstack homography into h; Ah = 0 */ A = cvCreateMat( 2*n, 9, CV_64FC1 ); cvZero( A ); for( i = 0; i < n; i++ ) { cvmSet( A, 2*i, 3, -pts[i].x ); cvmSet( A, 2*i, 4, -pts[i].y ); cvmSet( A, 2*i, 5, -1.0 ); cvmSet( A, 2*i, 6, mpts[i].y * pts[i].x ); cvmSet( A, 2*i, 7, mpts[i].y * pts[i].y ); cvmSet( A, 2*i, 8, mpts[i].y ); cvmSet( A, 2*i+1, 0, pts[i].x ); cvmSet( A, 2*i+1, 1, pts[i].y ); cvmSet( A, 2*i+1, 2, 1.0 ); cvmSet( A, 2*i+1, 6, -mpts[i].x * pts[i].x ); cvmSet( A, 2*i+1, 7, -mpts[i].x * pts[i].y ); cvmSet( A, 2*i+1, 8, -mpts[i].x ); } D = cvCreateMat( 9, 9, CV_64FC1 ); VT = cvCreateMat( 9, 9, CV_64FC1 ); cvSVD( A, D, NULL, VT, CV_SVD_MODIFY_A + CV_SVD_V_T ); v9 = cvMat( 1, 9, CV_64FC1, NULL ); cvGetRow( VT, &v9, 8 ); h = cvMat( 1, 9, CV_64FC1, _h ); cvCopy( &v9, &h, NULL ); h = cvMat( 3, 3, CV_64FC1, _h ); H = cvCreateMat( 3, 3, CV_64FC1 ); cvConvert( &h, H ); cvReleaseMat( &A ); cvReleaseMat( &D ); cvReleaseMat( &VT ); return H; }
//============================================================================ void AAM_TDM::AlignTextures(CvMat* AllTextures) { LOGD("Align textures to minimize the lighting variation ...\n"); int nsamples = AllTextures->rows; int npixels = AllTextures->cols; CvMat* meanTexture = cvCreateMat(1, npixels, CV_64FC1); CvMat* lastMeanEstimate = cvCreateMat(1, npixels, CV_64FC1); CvMat* constmeanTexture = cvCreateMat(1, npixels, CV_64FC1); CvMat ti; // calculate the mean texture AAM_TDM::CalcMeanTexture(AllTextures, meanTexture); AAM_TDM::ZeroMeanUnitLength(meanTexture); cvCopy(meanTexture, constmeanTexture); // do a number of alignment iterations until convergence double diff, diff_max = 1e-6; const int max_iter = 15; for(int iter = 0; iter < max_iter; iter++) { cvCopy(meanTexture, lastMeanEstimate); //align all textures to the mean texture estimate for(int i = 0; i < nsamples; i++) { cvGetRow(AllTextures, &ti, i); AAM_TDM::NormalizeTexture(meanTexture, &ti); } //estimate new mean texture AAM_TDM::CalcMeanTexture(AllTextures, meanTexture); AAM_TDM::NormalizeTexture(constmeanTexture, meanTexture); // test if the mean estimate has converged diff = cvNorm(meanTexture, lastMeanEstimate); LOGD("\tAlignment iteration #%i, mean texture est. diff. = %g\n", iter, diff ); if(diff <= diff_max) break; } cvReleaseMat(&meanTexture); cvReleaseMat(&lastMeanEstimate); cvReleaseMat(&constmeanTexture); }
//============================================================================ void AAM_TDM::Train(const file_lists& pts_files, const file_lists& img_files, const AAM_PAW& m_warp, double texture_percentage /* = 0.975 */, bool registration /* = true */) { int nPoints = m_warp.nPoints(); int nPixels = m_warp.nPix()*3; int nSamples = pts_files.size(); CvMat *AllTextures = cvCreateMat(nSamples, nPixels, CV_64FC1); CvMat * matshape = cvCreateMat(1, nPoints*2, CV_64FC1); for(int i = 0; i < nSamples; i++) { IplImage* image = cvLoadImage(img_files[i].c_str(), -1); AAM_Shape trueshape; if(!trueshape.ReadAnnotations(pts_files[i])) trueshape.ScaleXY(image->width, image->height); trueshape.Point2Mat(matshape); AAM_Common::CheckShape(matshape, image->width, image->height); CvMat t; cvGetRow(AllTextures, &t, i); m_warp.CalcWarpTexture(matshape, image, &t); cvReleaseImage(&image); } cvReleaseMat(&matshape); // align texture so as to minimize the lighting variation AAM_TDM::AlignTextures(AllTextures); //now do pca DoPCA(AllTextures, texture_percentage); if(registration) SaveSeriesTemplate(AllTextures, m_warp); cvReleaseMat(&AllTextures); }
void FacePredict::CalcMeanShapeParams(const std::vector<AAM_Shape> &GroupShapes, int group) { int nSamples = GroupShapes.size(); CvMat mParams; cvGetRow(__ShapeParamGroups, &mParams, group); CvMat* p = cvCreateMat(1, __nShapeModes, CV_64FC1); CvMat* pq = cvCreateMat(1, 4+__nShapeModes, CV_64FC1); for (int i = 0; i < nSamples; i++) { __shape.CalcParams(GroupShapes[i], pq); cvGetCols(pq, p, 4, 4+__nShapeModes); cvAdd(&mParams, p, &mParams); } CvMat * size = cvCreateMat(1, __nShapeModes, CV_64FC1); for (int i = 0; i < __nShapeModes; i++) cvmSet(size, 0, i, nSamples); cvDiv(&mParams, size, &mParams); cvReleaseMat(&p); cvReleaseMat(&pq); cvReleaseMat(&size); }
CV_IMPL double cvCalcGlobalOrientation( const void* orientation, const void* maskimg, const void* mhiimg, double curr_mhi_timestamp, double mhi_duration ) { int hist_size = 12; cv::Ptr<CvHistogram> hist; CvMat mhistub, *mhi = cvGetMat(mhiimg, &mhistub); CvMat maskstub, *mask = cvGetMat(maskimg, &maskstub); CvMat orientstub, *orient = cvGetMat(orientation, &orientstub); void* _orient; float _ranges[] = { 0, 360 }; float* ranges = _ranges; int base_orient; float shift_orient = 0, shift_weight = 0; float a, b, fbase_orient; float delbound; CvMat mhi_row, mask_row, orient_row; int x, y, mhi_rows, mhi_cols; if( !CV_IS_MASK_ARR( mask )) CV_Error( CV_StsBadMask, "" ); if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 ) CV_Error( CV_StsUnsupportedFormat, "MHI and orientation must be single-channel floating-point images" ); if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi )) CV_Error( CV_StsUnmatchedSizes, "" ); if( mhi_duration <= 0 ) CV_Error( CV_StsOutOfRange, "MHI duration must be positive" ); if( orient->data.ptr == mhi->data.ptr ) CV_Error( CV_StsInplaceNotSupported, "orientation image must be different from MHI" ); // calculate histogram of different orientation values hist = cvCreateHist( 1, &hist_size, CV_HIST_ARRAY, &ranges ); _orient = orient; cvCalcArrHist( &_orient, hist, 0, mask ); // find the maximum index (the dominant orientation) cvGetMinMaxHistValue( hist, 0, 0, 0, &base_orient ); fbase_orient = base_orient*360.f/hist_size; // override timestamp with the maximum value in MHI cvMinMaxLoc( mhi, 0, &curr_mhi_timestamp, 0, 0, mask ); // find the shift relative to the dominant orientation as weighted sum of relative angles a = (float)(254. / 255. / mhi_duration); b = (float)(1. - curr_mhi_timestamp * a); delbound = (float)(curr_mhi_timestamp - mhi_duration); mhi_rows = mhi->rows; mhi_cols = mhi->cols; if( CV_IS_MAT_CONT( mhi->type & mask->type & orient->type )) { mhi_cols *= mhi_rows; mhi_rows = 1; } cvGetRow( mhi, &mhi_row, 0 ); cvGetRow( mask, &mask_row, 0 ); cvGetRow( orient, &orient_row, 0 ); /* a = 254/(255*dt) b = 1 - t*a = 1 - 254*t/(255*dur) = (255*dt - 254*t)/(255*dt) = (dt - (t - dt)*254)/(255*dt); -------------------------------------------------------- ax + b = 254*x/(255*dt) + (dt - (t - dt)*254)/(255*dt) = (254*x + dt - (t - dt)*254)/(255*dt) = ((x - (t - dt))*254 + dt)/(255*dt) = (((x - (t - dt))/dt)*254 + 1)/255 = (((x - low_time)/dt)*254 + 1)/255 */ for( y = 0; y < mhi_rows; y++ ) { mhi_row.data.ptr = mhi->data.ptr + mhi->step*y; mask_row.data.ptr = mask->data.ptr + mask->step*y; orient_row.data.ptr = orient->data.ptr + orient->step*y; for( x = 0; x < mhi_cols; x++ ) if( mask_row.data.ptr[x] != 0 && mhi_row.data.fl[x] > delbound ) { /* orient in 0..360, base_orient in 0..360 -> (rel_angle = orient - base_orient) in -360..360. rel_angle is translated to -180..180 */ float weight = mhi_row.data.fl[x] * a + b; float rel_angle = orient_row.data.fl[x] - fbase_orient; rel_angle += (rel_angle < -180 ? 360 : 0); rel_angle += (rel_angle > 180 ? -360 : 0); if( fabs(rel_angle) < 45 ) { shift_orient += weight * rel_angle; shift_weight += weight; } } } // add the dominant orientation and the relative shift if( shift_weight == 0 ) shift_weight = 0.01f; fbase_orient += shift_orient / shift_weight; fbase_orient -= (fbase_orient < 360 ? 0 : 360); fbase_orient += (fbase_orient >= 0 ? 0 : 360); return fbase_orient; }
CV_IMPL void cvCalcMotionGradient( const CvArr* mhiimg, CvArr* maskimg, CvArr* orientation, double delta1, double delta2, int aperture_size ) { cv::Ptr<CvMat> dX_min, dY_max; CvMat mhistub, *mhi = cvGetMat(mhiimg, &mhistub); CvMat maskstub, *mask = cvGetMat(maskimg, &maskstub); CvMat orientstub, *orient = cvGetMat(orientation, &orientstub); CvMat dX_min_row, dY_max_row, orient_row, mask_row; CvSize size; int x, y; float gradient_epsilon = 1e-4f * aperture_size * aperture_size; float min_delta, max_delta; if( !CV_IS_MASK_ARR( mask )) CV_Error( CV_StsBadMask, "" ); if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 ) CV_Error( CV_StsOutOfRange, "aperture_size must be 3, 5 or 7" ); if( delta1 <= 0 || delta2 <= 0 ) CV_Error( CV_StsOutOfRange, "both delta's must be positive" ); if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 ) CV_Error( CV_StsUnsupportedFormat, "MHI and orientation must be single-channel floating-point images" ); if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi )) CV_Error( CV_StsUnmatchedSizes, "" ); if( orient->data.ptr == mhi->data.ptr ) CV_Error( CV_StsInplaceNotSupported, "orientation image must be different from MHI" ); if( delta1 > delta2 ) { double t; CV_SWAP( delta1, delta2, t ); } size = cvGetMatSize( mhi ); min_delta = (float)delta1; max_delta = (float)delta2; dX_min = cvCreateMat( mhi->rows, mhi->cols, CV_32F ); dY_max = cvCreateMat( mhi->rows, mhi->cols, CV_32F ); // calc Dx and Dy cvSobel( mhi, dX_min, 1, 0, aperture_size ); cvSobel( mhi, dY_max, 0, 1, aperture_size ); cvGetRow( dX_min, &dX_min_row, 0 ); cvGetRow( dY_max, &dY_max_row, 0 ); cvGetRow( orient, &orient_row, 0 ); cvGetRow( mask, &mask_row, 0 ); // calc gradient for( y = 0; y < size.height; y++ ) { dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step; dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step; orient_row.data.ptr = orient->data.ptr + y*orient->step; mask_row.data.ptr = mask->data.ptr + y*mask->step; cvCartToPolar( &dX_min_row, &dY_max_row, 0, &orient_row, 1 ); // make orientation zero where the gradient is very small for( x = 0; x < size.width; x++ ) { float dY = dY_max_row.data.fl[x]; float dX = dX_min_row.data.fl[x]; if( fabs(dX) < gradient_epsilon && fabs(dY) < gradient_epsilon ) { mask_row.data.ptr[x] = 0; orient_row.data.i[x] = 0; } else mask_row.data.ptr[x] = 1; } } cvErode( mhi, dX_min, 0, (aperture_size-1)/2); cvDilate( mhi, dY_max, 0, (aperture_size-1)/2); // mask off pixels which have little motion difference in their neighborhood for( y = 0; y < size.height; y++ ) { dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step; dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step; mask_row.data.ptr = mask->data.ptr + y*mask->step; orient_row.data.ptr = orient->data.ptr + y*orient->step; for( x = 0; x < size.width; x++ ) { float d0 = dY_max_row.data.fl[x] - dX_min_row.data.fl[x]; if( mask_row.data.ptr[x] == 0 || d0 < min_delta || max_delta < d0 ) { mask_row.data.ptr[x] = 0; orient_row.data.i[x] = 0; } } } }