// TODO ARCADI CONTINUE // Create copy of these two functions, and modify so that they use histograms double GazeTracker::imagedistance(const IplImage *im1, const IplImage *im2) { //cout << "im1 size: " << im1->width << ", " << im1->height << ", " << im1->depth << ", " << im1->nChannels << endl; //cout << "im2 size: " << im2->width << ", " << im2->height << ", " << im2->depth << ", " << im2->nChannels << endl; double norm = cvNorm(im1, im2, CV_L2); return norm*norm; }
static int CheckImage(IplImage* image, char* file, char* /*funcname*/) { //printf("loading %s\n", file ); IplImage* read = cvLoadImage( file, 1 ); if( !read ) { trsWrite( ATS_CON | ATS_LST, "can't read image\n" ); return 1; } int err = 0; #if 0 { IplImage* temp = cvCloneImage( read ); cvAbsDiff( image, read, temp ); cvThreshold( temp, temp, 0, 255, CV_THRESH_BINARY ); cvvNamedWindow( "Original", 0 ); cvvNamedWindow( "Diff", 0 ); cvvShowImage( "Original", read ); cvvShowImage( "Diff", temp ); cvvWaitKey(0); cvvDestroyWindow( "Original" ); cvvDestroyWindow( "Diff" ); } #endif cvAbsDiff( image, read, read ); cvThreshold( read, read, 0, 1, CV_THRESH_BINARY ); err = cvRound( cvNorm( read, 0, CV_L1 ))/3; cvReleaseImage( &read ); return err; }
int CV_CannyTest::validate_test_results( int test_case_idx ) { int code = CvTS::OK, nz0; prepare_to_validation(test_case_idx); double err = cvNorm(&test_mat[OUTPUT][0], &test_mat[REF_OUTPUT][0], CV_L1); if( err == 0 ) goto _exit_; if( err != cvRound(err) || cvRound(err)%255 != 0 ) { ts->printf( CvTS::LOG, "Some of the pixels, produced by Canny, are not 0's or 255's; the difference is %g\n", err ); code = CvTS::FAIL_INVALID_OUTPUT; goto _exit_; } nz0 = cvCountNonZero(&test_mat[REF_OUTPUT][0]); err = (err/255/MAX(nz0,100))*100; if( err > 1 ) { ts->printf( CvTS::LOG, "Too high percentage of non-matching edge pixels = %g%%\n", err); code = CvTS::FAIL_BAD_ACCURACY; goto _exit_; } _exit_: if( code < 0 ) ts->set_failed_test_info( code ); return code; }
//============================================================================ void AAM_TDM::ZeroMeanUnitLength(CvMat* Texture) { CvScalar mean = cvAvg(Texture); cvSubS(Texture, mean, Texture); double norm = cvNorm(Texture); cvConvertScale(Texture, Texture, 1.0/norm); }
CvMat *normalize(const CvMat* vector) { CvMat *norm = cvCloneMat(vector); double normVal = cvNorm(vector); cvScale(vector, norm, 1 / normVal); return norm; }
double CalculateScore::GetDistance(CvPoint2D32f pt1, CvPoint2D32f pt2) { double distance = -1; float data[] = {pt1.x-pt2.x,pt1.y-pt2.y}; CvMat mat = cvMat(2,1,CV_32FC1,&data); distance = cvNorm(&mat,NULL,CV_L2); return distance; }
void CvOneWayDescriptor::EstimatePose(IplImage* patch, int& pose_idx, float& distance) const { distance = 1e10; pose_idx = -1; CvRect roi = cvGetImageROI(patch); IplImage* patch_32f = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_32F, patch->nChannels); float sum = cvSum(patch).val[0]; cvConvertScale(patch, patch_32f, 1/sum); for(int i = 0; i < m_pose_count; i++) { if(m_samples[i]->width != patch_32f->width || m_samples[i]->height != patch_32f->height) { continue; } float dist = cvNorm(m_samples[i], patch_32f); //float dist = 0.0f; //float i1,i2; //for (int y = 0; y<patch_32f->height; y++) // for (int x = 0; x< patch_32f->width; x++) // { // i1 = ((float*)(m_samples[i]->imageData + m_samples[i]->widthStep*y))[x]; // i2 = ((float*)(patch_32f->imageData + patch_32f->widthStep*y))[x]; // dist+= (i1-i2)*(i1-i2); // } if(dist < distance) { distance = dist; pose_idx = i; } #if 0 IplImage* img1 = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_8U, 1); IplImage* img2 = cvCreateImage(cvSize(roi.width, roi.height), IPL_DEPTH_8U, 1); double maxval; cvMinMaxLoc(m_samples[i], 0, &maxval); cvConvertScale(m_samples[i], img1, 255.0/maxval); cvMinMaxLoc(patch_32f, 0, &maxval); cvConvertScale(patch_32f, img2, 255.0/maxval); cvNamedWindow("1", 1); cvShowImage("1", img1); cvNamedWindow("2", 1); cvShowImage("2", img2); printf("Distance = %f\n", dist); cvWaitKey(0); #endif } cvReleaseImage(&patch_32f); }
double Err_Func2(CvMat *obs,CvMat *res) { CvMat *tmp; tmp = cvCreateMat(obs->rows,1,CV_64F); cvSub(obs,res,tmp); double e; e = cvNorm(tmp); return e; }
int main() { IplImage* image=cvLoadImage("C:\\test0.jpg",CV_LOAD_IMAGE_GRAYSCALE); IplImage* image2=cvLoadImage("C:\\test2.jpg",CV_LOAD_IMAGE_GRAYSCALE); std::cout<<"1-norm is : "<<cvNorm(image,image2,CV_L2); std::cout<<"\n End of doge_"; return 0; }
// auxiliary functions // 1. nbayes void nbayes_check_data( CvMLData* _data ) { if( _data->get_missing() ) CV_Error( CV_StsBadArg, "missing values are not supported" ); const CvMat* var_types = _data->get_var_types(); bool is_classifier = var_types->data.ptr[var_types->cols-1] == CV_VAR_CATEGORICAL; if( ( fabs( cvNorm( var_types, 0, CV_L1 ) - (var_types->rows + var_types->cols - 2)*CV_VAR_ORDERED - CV_VAR_CATEGORICAL ) > FLT_EPSILON ) || !is_classifier ) CV_Error( CV_StsBadArg, "incorrect types of predictors or responses" ); }
void BlinkDetector::update(const boost::scoped_ptr<IplImage> &eyeFloat) { if (!_initialized) { cvCopy(eyeFloat.get(), _averageEye.get()); _initialized = true; } double distance = cvNorm(eyeFloat.get(), _averageEye.get(), CV_L2); _accumulator.update(distance); //cout << "update distance" << distance << " -> " << accumulator.getValue() << endl; _states.updateState(distance / _accumulator.getValue()); cvRunningAvg(eyeFloat.get(), _averageEye.get(), 0.05); }
//It tries to find out the class //that frame belongs to. //It does it using the L2 norm. It sorts the results //and select the nearest core. If cores are empty //it returns -1 int successiveKMeans::findClass(IplImage *frame) { int index=-1; double temp_cost1,temp_cost2; for(int i=0;i<this->size;i++) { if(i==0) { temp_cost1=cvNorm(frame,this->cores[i],CV_L2,NULL); index=0; } else { temp_cost2=cvNorm(frame,this->cores[i],CV_L2,NULL); if(temp_cost2 < temp_cost1) { index=i; temp_cost1=temp_cost2; } } } return index; }
/*! \fn CvGabor::normalize( const CvArr* src, CvArr* dst, double a, double b, int norm_type, const CvArr* mask ) */ void CvGabor::normalize( const CvArr* src, CvArr* dst, double a, double b, int norm_type, const CvArr* mask ) { CvMat* tmp = 0; __BEGIN__; double scale, shift; if( norm_type == CV_MINMAX ) { double smin = 0, smax = 0; double dmin = MIN( a, b ), dmax = MAX( a, b ); cvMinMaxLoc( src, &smin, &smax, 0, 0, mask ); scale = (dmax - dmin)*(smax - smin > DBL_EPSILON ? 1./(smax - smin) : 0); shift = dmin - smin*scale; } else if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C ) { CvMat *s = (CvMat*)src, *d = (CvMat*)dst; scale = cvNorm( src, 0, norm_type, mask ); scale = scale > DBL_EPSILON ? 1./scale : 0.; shift = 0; } else {} if( !mask ) cvConvertScale( src, dst, scale, shift ); else { CvMat stub, *dmat; cvConvertScale( src, tmp, scale, shift ); cvCopy( tmp, dst, mask ); } __END__; if( tmp ) cvReleaseMat( &tmp ); }
// find the farthest node in the "list" from "node" static inline CvSpillTreeNode* icvFarthestNode( CvSpillTreeNode* node, CvSpillTreeNode* list, int total ) { double farthest = -1.; CvSpillTreeNode* result = NULL; for ( int i = 0; i < total; i++ ) { double norm = cvNorm( node->center, list->center ); if ( norm > farthest ) { farthest = norm; result = list; } list = list->rc; } return result; }
//============================================================================ void AAM_TDM::AlignTextures(CvMat* AllTextures) { LOGD("Align textures to minimize the lighting variation ...\n"); int nsamples = AllTextures->rows; int npixels = AllTextures->cols; CvMat* meanTexture = cvCreateMat(1, npixels, CV_64FC1); CvMat* lastMeanEstimate = cvCreateMat(1, npixels, CV_64FC1); CvMat* constmeanTexture = cvCreateMat(1, npixels, CV_64FC1); CvMat ti; // calculate the mean texture AAM_TDM::CalcMeanTexture(AllTextures, meanTexture); AAM_TDM::ZeroMeanUnitLength(meanTexture); cvCopy(meanTexture, constmeanTexture); // do a number of alignment iterations until convergence double diff, diff_max = 1e-6; const int max_iter = 15; for(int iter = 0; iter < max_iter; iter++) { cvCopy(meanTexture, lastMeanEstimate); //align all textures to the mean texture estimate for(int i = 0; i < nsamples; i++) { cvGetRow(AllTextures, &ti, i); AAM_TDM::NormalizeTexture(meanTexture, &ti); } //estimate new mean texture AAM_TDM::CalcMeanTexture(AllTextures, meanTexture); AAM_TDM::NormalizeTexture(constmeanTexture, meanTexture); // test if the mean estimate has converged diff = cvNorm(meanTexture, lastMeanEstimate); LOGD("\tAlignment iteration #%i, mean texture est. diff. = %g\n", iter, diff ); if(diff <= diff_max) break; } cvReleaseMat(&meanTexture); cvReleaseMat(&lastMeanEstimate); cvReleaseMat(&constmeanTexture); }
double compute_reprojection_error( const CvMat* object_points, const CvMat* rot_vects, const CvMat* trans_vects, const CvMat* camera_matrix, const CvMat* dist_coeffs, const CvMat* image_points, const CvMat* point_counts, CvMat* per_view_errors ) { CvMat* image_points2 = cvCreateMat( image_points->rows, image_points->cols, image_points->type ); int i, image_count = rot_vects->rows, points_so_far = 0; double total_err = 0, err; for( i = 0; i < image_count; i++ ) { CvMat object_points_i, image_points_i, image_points2_i; int point_count = point_counts->data.i[i]; CvMat rot_vect, trans_vect; cvGetCols( object_points, &object_points_i, points_so_far, points_so_far + point_count ); cvGetCols( image_points, &image_points_i, points_so_far, points_so_far + point_count ); cvGetCols( image_points2, &image_points2_i, points_so_far, points_so_far + point_count ); points_so_far += point_count; cvGetRow( rot_vects, &rot_vect, i ); cvGetRow( trans_vects, &trans_vect, i ); cvProjectPoints2( &object_points_i, &rot_vect, &trans_vect, camera_matrix, dist_coeffs, &image_points2_i, 0, 0, 0, 0, 0 ); err = cvNorm( &image_points_i, &image_points2_i, CV_L1 ); if( per_view_errors ) per_view_errors->data.db[i] = err/point_count; total_err += err; } cvReleaseMat( &image_points2 ); return total_err/points_so_far; }
int main(int argc, _TCHAR* argv[]) { cvNamedWindow( "Background Averaging", CV_WINDOW_AUTOSIZE ); CvCapture* capture = cvCreateFileCapture( "tree.avi" ); IplImage *frame, *mask1, *mask3; int frameCount = 0; while(1) { frameCount++; frame = cvQueryFrame( capture ); if( !frame ) break; CvSize sz = cvGetSize( frame ); mask1 = cvCreateImage( sz, IPL_DEPTH_8U, 1 ); mask3 = cvCreateImage( sz, IPL_DEPTH_8U, 3 ); if(frameCount == 1) AllocateImages( frame ); if( frameCount < 30 ){ accumulateBackground( frame ); }else if( frameCount == 30 ){ createModelsfromStats(); }else{ backgroundDiff( frame, mask1 ); cvCvtColor(mask1,mask3,CV_GRAY2BGR); cvNorm( mask3, mask3, CV_C, 0); cvThreshold(mask3, mask3, 100, 1, CV_THRESH_BINARY); cvMul( frame, mask3, frame, 1.0 ); cvShowImage( "Background Averaging", frame ); } char c = cvWaitKey(33); if( c == 27 ) break; } cvReleaseCapture( &capture ); cvDestroyWindow( "Background Averaging" ); DeallocateImages(); }
//============================================================================ double AAM_Basic::EstResidual(const IplImage* image, const CvMat* c, CvMat* est_s, CvMat* diff) { // generate model texture __cam.CalcTexture(__t_m, c); // generate model shape __cam.CalcShape(est_s, c, __current_q); //calculate warped texture if(!AAM_Basic::IsShapeWithinImage(est_s, image->width, image->height)) return -1; __cam.__paw.FasterGetWarpTextureFromMatShape(est_s, image, __t_s, true); __cam.__texture.AlignTextureToRef(__cam.__MeanG, __t_s); //calc pixel difference: g_s - g_m cvSub(__t_s, __t_m, diff); return cvNorm(diff); }
const CvMat* CvMLData::get_var_idx() { CV_FUNCNAME( "CvMLData::get_var_idx" ); __BEGIN__; int avcount = 0; if ( !values ) CV_ERROR( CV_StsInternal, "data is empty" ); assert( var_idx_mask ); avcount = cvFloor( cvNorm( var_idx_mask, 0, CV_L1 ) ); int* vidx; if ( avcount == values->cols ) return 0; if ( !var_idx_out || ( var_idx_out && var_idx_out->cols != avcount ) ) { cvReleaseMat( &var_idx_out ); var_idx_out = cvCreateMat( 1, avcount, CV_32SC1); if ( response_idx >=0 ) var_idx_mask->data.ptr[response_idx] = 0; } vidx = var_idx_out->data.i; for(int i = 0; i < var_idx_mask->cols; i++) if ( var_idx_mask->data.ptr[i] ) { *vidx = i; vidx++; } __END__; return var_idx_out; }
const CvMat* CvMLData::get_var_types() { CV_FUNCNAME( "CvMLData::get_var_types" ); __BEGIN__; uchar *var_types_out_ptr = 0; int avcount, vt_size; if ( !values ) CV_ERROR( CV_StsInternal, "data is empty" ); assert( var_idx_mask ); avcount = cvFloor( cvNorm( var_idx_mask, 0, CV_L1 ) ); vt_size = avcount + (response_idx >= 0); if ( avcount == values->cols || (avcount == values->cols-1 && response_idx == values->cols-1) ) return var_types; if ( !var_types_out || ( var_types_out && var_types_out->cols != vt_size ) ) { cvReleaseMat( &var_types_out ); var_types_out = cvCreateMat( 1, vt_size, CV_8UC1 ); } var_types_out_ptr = var_types_out->data.ptr; for( int i = 0; i < var_types->cols; i++) { if (i == response_idx || !var_idx_mask->data.ptr[i]) continue; *var_types_out_ptr = var_types->data.ptr[i]; var_types_out_ptr++; } if ( response_idx >= 0 ) *var_types_out_ptr = var_types->data.ptr[response_idx]; __END__; return var_types_out; }
// ***************************CHECKPOINT 5 Methods: Calculating Norms of Homographies******* // Calculate similarity of homographies void calculateNorms(double* confidences, CvMat** matches, int image_count, int match_count) { int i, j; CvMat* identity = create3DIdentity(); for (i = 0; i < match_count; i++) { if (!matches[i]) continue; confidences[i] = cvNorm(matches[i], identity, CV_L2, 0); // printf("\nIndex is: %d\n", i); // printMatrix(matches[i]); //printf("\n"); } // if (DEBUG) { printf("\n"); for (i = 0; i < image_count; i++) { for (j = 0; j < image_count; j++) { //printf("%.2f\t", confidences[i*image_count + j]); } //printf("\n"); } //} }
//============================================================================ double AAM_Basic::EstResidual(const IplImage* image, const CvMat* c_q, CvMat* s, CvMat* t_m, CvMat* t_s, CvMat* deltat) { CvMat c, q; cvGetCols(c_q, &q, 0, 4); cvGetCols(c_q, &c, 4, 4+__cam.nModes()); // generate model texture __cam.CalcTexture(t_m, &c); // generate model shape __cam.CalcShape(s, c_q); // generate warped texture AAM_Common::CheckShape(s, image->width, image->height); __cam.__paw.CalcWarpTexture(s, image, t_s); __cam.__texture.NormalizeTexture(__cam.__MeanG, t_s); // calculate pixel difference cvSub(t_m, t_s, deltat); return cvNorm(deltat); }
void update_mhi( IplImage* img, IplImage* dst, int diff_threshold ) { double timestamp = (double)clock()/CLOCKS_PER_SEC; CvSize size = cvSize(img->width,img->height); int i, idx1 = last, idx2; IplImage* silh; CvSeq* seq; CvRect comp_rect; double count; double angle; CvPoint center; double magnitude; CvScalar color; if( !mhi || mhi->width != size.width || mhi->height != size.height ) { if( buf == 0 ) { buf = (IplImage**)malloc(N*sizeof(buf[0])); memset( buf, 0, N*sizeof(buf[0])); } for( i = 0; i < N; i++ ) { cvReleaseImage( &buf[i] ); buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 ); cvZero( buf[i] ); } cvReleaseImage( &mhi ); cvReleaseImage( &orient ); cvReleaseImage( &segmask ); cvReleaseImage( &mask ); mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 ); cvZero( mhi ); orient = cvCreateImage( size, IPL_DEPTH_32F, 1 ); segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 ); mask = cvCreateImage( size, IPL_DEPTH_8U, 1 ); } cvCvtColor( img, buf[last], CV_BGR2GRAY ); idx2 = (last + 1) % N; last = idx2; silh = buf[idx2]; cvAbsDiff( buf[idx1], buf[idx2], silh ); cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION ); cvZero( dst ); cvCvtPlaneToPix( mask, 0, 0, 0, dst ); cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 ); if( !storage ) storage = cvCreateMemStorage(0); else cvClearMemStorage(storage); seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA ); for( i = -1; i < seq->total; i++ ) { if( i < 0 ) { comp_rect = cvRect( 0, 0, size.width, size.height ); color = CV_RGB(255,255,255); magnitude = 100; } else { comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect; if( comp_rect.width + comp_rect.height < 100 ) continue; color = CV_RGB(255,0,0); magnitude = 30; } cvSetImageROI( silh, comp_rect ); cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION); angle = 360.0 - angle; count = cvNorm( silh, 0, CV_L1, 0 ); cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( silh ); if( count < comp_rect.width*comp_rect.height * 0.05 ) continue; center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); } }
// parameters: // img - input video frame // dst - resultant motion picture // args - optional parameters void update_mhi( IplImage* img, IplImage* dst, int diff_threshold ) { double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds CvSize size = cvSize(img->width,img->height); // get current frame size int i, idx1 = last, idx2; IplImage* silh; CvSeq* seq; CvRect comp_rect; double count; double angle; CvPoint center; double magnitude; CvScalar color; // allocate images at the beginning or // reallocate them if the frame size is changed if( !mhi || mhi->width != size.width || mhi->height != size.height ) { if( buf == 0 ) { buf = (IplImage**)malloc(N*sizeof(buf[0])); memset( buf, 0, N*sizeof(buf[0])); } for( i = 0; i < N; i++ ) { cvReleaseImage( &buf[i] ); buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 ); cvZero( buf[i] ); } cvReleaseImage( &mhi ); cvReleaseImage( &orient ); cvReleaseImage( &segmask ); cvReleaseImage( &mask ); mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 ); cvZero( mhi ); // clear MHI at the beginning orient = cvCreateImage( size, IPL_DEPTH_32F, 1 ); segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 ); mask = cvCreateImage( size, IPL_DEPTH_8U, 1 ); } cvCvtColor( img, buf[last], CV_BGR2GRAY ); // convert frame to grayscale idx2 = (last + 1) % N; // index of (last - (N-1))th frame last = idx2; silh = buf[idx2]; cvAbsDiff( buf[idx1], buf[idx2], silh ); // get difference between frames cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // and threshold it cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // update MHI // convert MHI to blue 8u image cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION ); cvZero( dst ); cvMerge( mask, 0, 0, 0, dst ); // calculate motion gradient orientation and valid orientation mask cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 ); if( !storage ) storage = cvCreateMemStorage(0); else cvClearMemStorage(storage); // segment motion: get sequence of motion components // segmask is marked motion components map. It is not used further seq = cvSegmentMotion( mhi, segmask, storage, timestamp, MAX_TIME_DELTA ); // iterate through the motion components, // One more iteration (i == -1) corresponds to the whole image (global motion) for( i = -1; i < seq->total; i++ ) { if( i < 0 ) { // case of the whole image comp_rect = cvRect( 0, 0, size.width, size.height ); color = CV_RGB(255,255,255); magnitude = 100; } else { // i-th motion component comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect; if( comp_rect.width + comp_rect.height < 100 ) // reject very small components continue; color = CV_RGB(255,0,0); magnitude = 30; } // select component ROI cvSetImageROI( silh, comp_rect ); cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); // calculate orientation angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION); angle = 360.0 - angle; // adjust for images with top-left origin count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( silh ); // check for the case of little motion if( count < comp_rect.width*comp_rect.height * 0.05 ) continue; // draw a clock with arrow indicating the direction center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); } }
static void update_mhi( IplImage* img, IplImage* dst, int diff_threshold ) { double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds CvSize size = cvSize(img->width,img->height); // get current frame size int i, idx1 = last, idx2; IplImage* silh; CvSeq* seq; CvRect comp_rect; double count; double angle; CvPoint center; double magnitude; CvScalar color; unsigned int tmpx=0,tmpy=0; int tmppcount=0; // allocate images at the beginning or // reallocate them if the frame size is changed if( !mhi || mhi->width != size.width || mhi->height != size.height ) { if( mbuf == 0 ) { mbuf = (IplImage**)malloc(N*sizeof(mbuf[0])); memset( mbuf, 0, N*sizeof(mbuf[0])); } for( i = 0; i < N; i++ ) { cvReleaseImage( &mbuf[i] ); mbuf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 ); cvZero( mbuf[i] ); } cvReleaseImage( &mhi ); cvReleaseImage( &orient ); cvReleaseImage( &segmask ); cvReleaseImage( &mask ); mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 ); cvZero( mhi ); // clear MHI at the beginning orient = cvCreateImage( size, IPL_DEPTH_32F, 1 ); segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 ); mask = cvCreateImage( size, IPL_DEPTH_8U, 1 ); } cvCvtColor( img, mbuf[last], CV_BGR2GRAY ); // convert frame to grayscale idx2 = (last + 1) % N; // index of (last - (N-1))th frame last = idx2; silh=mbuf[idx2]; cvAbsDiff( mbuf[idx1], mbuf[idx2], silh ); // cvThreshold( silh, silh, diff_threshold, 1, CV_THRESH_BINARY ); // 类似二值化 可是只有0 1这么小的差距 cvUpdateMotionHistory( silh, mhi, timestamp, MHI_DURATION ); // 更新以时间为单位的运动历史图 // 将运动历史图转换为具有像素值的图片,并merge到输出图像 cvCvtScale( mhi, mask, 255./MHI_DURATION, (MHI_DURATION - timestamp)*255./MHI_DURATION ); cvZero( dst ); cvMerge( mask,0,0,0,dst ); // calculate motion gradient orientation and valid orientation mask cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );//orient中保存每个运动点的方向角度值 if( !mstorage ) mstorage = cvCreateMemStorage(0); else cvClearMemStorage(mstorage); // segment motion: get sequence of motion components // segmask is marked motion components map. It is not used further seq = cvSegmentMotion( mhi, segmask, mstorage, timestamp, MAX_TIME_DELTA );//对运动历史图片进行运动单元的分割 // iterate through the motion components, // One more iteration (i == -1) corresponds to the whole image (global motion) for( i = -1; i < seq->total; i++ ) { if( i < 0 ) { // case of the whole image comp_rect = cvRect( 0, 0, size.width, size.height ); color = CV_RGB(255,255,255); magnitude = 100; // printf("ALL image.\n"); } else { // i-th motion component comp_rect = ((CvConnectedComp*)cvGetSeqElem( seq, i ))->rect; if( comp_rect.width + comp_rect.height < 30 ) // reject very small components continue; color = CV_RGB(255,0,0); magnitude = 30; } // select component ROI cvSetImageROI( silh, comp_rect ); cvSetImageROI( mhi, comp_rect ); cvSetImageROI( orient, comp_rect ); cvSetImageROI( mask, comp_rect ); // calculate orientation angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);//统计感兴趣区域内的运动单元的总体方向 angle = 360.0 - angle; // adjust for images with top-left origin count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI cvResetImageROI( mhi ); cvResetImageROI( orient ); cvResetImageROI( mask ); cvResetImageROI( silh ); // check for the case of little motion if( count < comp_rect.width*comp_rect.height * 0.05*0.1 ) continue; // draw a clock with arrow indicating the direction center = cvPoint( (comp_rect.x + comp_rect.width/2), (comp_rect.y + comp_rect.height/2) ); if(Zmode == ZMODE_Z && i !=-1) { //放大模式下的稳定算法++++++++++++++++++++++++++ tmpx = comp_rect.x+comp_rect.width/2;//当前获取到的目标坐标之一 tmpy = comp_rect.y+comp_rect.height/2; tmpal1 = sqrt(abs(tx-ax))+sqrt(abs(ty-ay)); if(tmpal2 > tmpal1 || tmppcount == 0)//更接近中心 { tmpal2 = tmpal1; tx = tmpx; ty = tmpy; } catchflag = 1; } if(Zmode == ZMOOE_S) { //缩小模式下的选择目标规则 if(tmppcount == 0 && i != -1) { tx = comp_rect.x+comp_rect.width/2; ty = comp_rect.y+comp_rect.height/2; catchflag = 1; } } tmppcount++; printf("The %dth rect:(%d,%d)\n",tmppcount,comp_rect.x+comp_rect.width/2,comp_rect.y+comp_rect.height/2); cvCircle( img, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 ); cvLine( img, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)), cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 ); } }
// Render more than one image per frame -- mosaic void mosaic(int index, IplImage** images, CvMat** matches, int* bestMatchedIndex, IplImage* viewport, CvMat* initHomography) { int image_count = myScene->max_image; int baseIndex = myScene->currentIndex; int history[image_count]; int j; CvMat *ident = create3DIdentity(); for (j = 0; j < image_count; j++) { history[j] = -1; } CvMat* topHomography = copyMatrix(matches[index*image_count + baseIndex]); double topConfidence = topConfidence = cvNorm(topHomography, ident, CV_L2, 0); CvMat* currHomography = copyMatrix(matches[index*image_count + bestMatchedIndex[index]]);; int currIndex = bestMatchedIndex[index]; while (currIndex != baseIndex) { if (!matches[currIndex*image_count + baseIndex]) { //printf("FIX ME!\n"); currIndex = -1; break; } CvMat* tempHomography = cvCreateMat(3, 3, CV_64F); cvMatMul(matches[currIndex*image_count + baseIndex], currHomography, tempHomography); double currConfidence = cvNorm(tempHomography, ident, CV_L2, 0); if (currConfidence > topConfidence) { break; } else { cvMatMulAdd(matches[currIndex*image_count + bestMatchedIndex[currIndex]], currHomography, 0, currHomography); currIndex = bestMatchedIndex[currIndex]; if (history[currIndex] == 1) { currIndex = -1; break; } else history[currIndex] = 1; } } if (currIndex == baseIndex) { if (DEBUG) { printf("Index to Match: bestBaseImageIndex = %d\n", currIndex); printf("Chosen homography: "); printMatrix(currHomography); } // multiply initial homography cvMatMulAdd(initHomography, currHomography, 0, currHomography); cvWarpPerspective(images[index], viewport, currHomography, CV_INTER_LINEAR, cvScalarAll(0)); } else { //if (DEBUG) { printf("Index to Match: currIndex = %d\n", currIndex); printf("Chosen homography: "); printMatrix(topHomography); //} // multiply initial homography cvMatMulAdd(initHomography, topHomography, 0, topHomography); cvWarpPerspective(images[index], viewport, topHomography, CV_INTER_LINEAR, cvScalarAll(0)); } if (DEBUG){ cvShowImage("Scene", viewport); cvWaitKey(0); } cvReleaseMat(&topHomography); cvReleaseMat(&currHomography); cvReleaseMat(&ident); }
CV_IMPL void cvCalcImageHomography( float* line, CvPoint3D32f* _center, float* _intrinsic, float* _homography ) { double norm_xy, norm_xz, xy_sina, xy_cosa, xz_sina, xz_cosa, nx1, plane_dist; float _ry[3], _rz[3], _r_trans[9]; CvMat rx = cvMat( 1, 3, CV_32F, line ); CvMat ry = cvMat( 1, 3, CV_32F, _ry ); CvMat rz = cvMat( 1, 3, CV_32F, _rz ); CvMat r_trans = cvMat( 3, 3, CV_32F, _r_trans ); CvMat center = cvMat( 3, 1, CV_32F, _center ); float _sub[9]; CvMat sub = cvMat( 3, 3, CV_32F, _sub ); float _t_trans[3]; CvMat t_trans = cvMat( 3, 1, CV_32F, _t_trans ); CvMat intrinsic = cvMat( 3, 3, CV_32F, _intrinsic ); CvMat homography = cvMat( 3, 3, CV_32F, _homography ); if( !line || !_center || !_intrinsic || !_homography ) CV_Error( CV_StsNullPtr, "" ); norm_xy = cvSqrt( line[0] * line[0] + line[1] * line[1] ); xy_cosa = line[0] / norm_xy; xy_sina = line[1] / norm_xy; norm_xz = cvSqrt( line[0] * line[0] + line[2] * line[2] ); xz_cosa = line[0] / norm_xz; xz_sina = line[2] / norm_xz; nx1 = -xz_sina; _rz[0] = (float)(xy_cosa * nx1); _rz[1] = (float)(xy_sina * nx1); _rz[2] = (float)xz_cosa; cvScale( &rz, &rz, 1./cvNorm(&rz,0,CV_L2) ); /* new axe y */ cvCrossProduct( &rz, &rx, &ry ); cvScale( &ry, &ry, 1./cvNorm( &ry, 0, CV_L2 ) ); /* transpone rotation matrix */ memcpy( &_r_trans[0], line, 3*sizeof(float)); memcpy( &_r_trans[3], _ry, 3*sizeof(float)); memcpy( &_r_trans[6], _rz, 3*sizeof(float)); /* calculate center distanse from arm plane */ plane_dist = cvDotProduct( ¢er, &rz ); /* calculate (I - r_trans)*center */ cvSetIdentity( &sub ); cvSub( &sub, &r_trans, &sub ); cvMatMul( &sub, ¢er, &t_trans ); cvMatMul( &t_trans, &rz, &sub ); cvScaleAdd( &sub, cvRealScalar(1./plane_dist), &r_trans, &sub ); /* ? */ cvMatMul( &intrinsic, &sub, &r_trans ); cvInvert( &intrinsic, &sub, CV_SVD ); cvMatMul( &r_trans, &sub, &homography ); }
//============================================================================ void AAM_IC::Fit(const IplImage* image, AAM_Shape& Shape, int max_iter /* = 30 */, bool showprocess /* = false */) { //initialize some stuff double t = gettime; const CvMat* A0 = __texture.GetMean(); CvMat p; cvGetCols(__search_pq, &p, 4, 4+__shape.nModes()); Shape.Point2Mat(__current_s); SetAllParamsZero(); __shape.CalcParams(__current_s, __search_pq); IplImage* Drawimg = 0; for(int iter = 0; iter < max_iter; iter++) { if(showprocess) { if(Drawimg == 0) Drawimg = cvCloneImage(image); else cvCopy(image, Drawimg); Shape.Mat2Point(__current_s); Draw(Drawimg, Shape, 2); mkdir("result"); char filename[100]; sprintf(filename, "result/Iter-%02d.jpg", iter); cvSaveImage(filename, Drawimg); } //check the current shape AAM_Common::CheckShape(__current_s, image->width, image->height); //warp image to mesh shape mesh __paw.CalcWarpTexture(__current_s, image, __warp_t); AAM_TDM::NormalizeTexture(A0, __warp_t); cvSub(__warp_t, A0, __error_t); //calculate updates (and scale to account for linear lighting gain) cvGEMM(__error_t, __G, 1, NULL, 1, __delta_pq, CV_GEMM_B_T); //check for parameter convergence if(cvNorm(__delta_pq) < 1e-6) break; //apply inverse compositional algorithm to update parameters InverseCompose(__delta_pq, __current_s, __update_s); //smooth shape cvAddWeighted(__current_s, 0.4, __update_s, 0.6, 0, __update_s); //update parameters __shape.CalcParams(__update_s, __search_pq); //calculate constrained new shape __shape.CalcShape(__search_pq, __update_s); //check for shape convergence if(cvNorm(__current_s, __update_s, CV_L2) < 0.001) break; else cvCopy(__update_s, __current_s); } Shape.Mat2Point(__current_s); t = gettime-t; printf("AAM IC Fitting time cost %.3f millisec\n", t); cvReleaseImage(&Drawimg); }
//============================================================================ void AAM_Basic::Fit(const IplImage* image, AAM_Shape& Shape, int max_iter /* = 30 */,bool showprocess /* = false */) { //intial some stuff double t = gettime; double e1, e2; const int np = 5; double k_values[np] = {1, 0.5, 0.25, 0.125, 0.0625}; int k; IplImage* Drawimg = 0; Shape.Point2Mat(__s); InitParams(image); CvMat subcq; cvGetCols(__current_c_q, &subcq, 0, 4); cvCopy(__q, &subcq); cvGetCols(__current_c_q, &subcq, 4, 4+__cam.nModes()); cvCopy(__c, &subcq); //calculate error e1 = EstResidual(image, __current_c_q, __s, __t_m, __t_s, __delta_t); //do a number of iteration until convergence for(int iter = 0; iter <max_iter; iter++) { if(showprocess) { if(Drawimg == 0) Drawimg = cvCloneImage(image); else cvCopy(image, Drawimg); __cam.CalcShape(__s, __current_c_q); Shape.Mat2Point(__s); Draw(Drawimg, Shape, 2); #ifdef TARGET_WIN32 mkdir("result"); #else mkdir("result", S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); #endif char filename[100]; sprintf(filename, "result/ter%d.bmp", iter); cvSaveImage(filename, Drawimg); } // predict parameter update cvGEMM(__delta_t, __G, 1, NULL, 0, __delta_c_q, CV_GEMM_B_T); //force first iteration if(iter == 0) { cvAdd(__current_c_q, __delta_c_q, __current_c_q); CvMat c; cvGetCols(__current_c_q, &c, 4, 4+__cam.nModes()); //constrain parameters __cam.Clamp(&c); e1 = EstResidual(image, __current_c_q, __s, __t_m, __t_s, __delta_t); } //find largest step size which reduces texture EstResidual else { for(k = 0; k < np; k++) { cvScaleAdd(__delta_c_q, cvScalar(k_values[k]), __current_c_q, __update_c_q); //constrain parameters CvMat c; cvGetCols(__update_c_q, &c, 4, 4+__cam.nModes()); __cam.Clamp(&c); e2 = EstResidual(image, __update_c_q, __s, __t_m, __t_s, __delta_t); if(e2 <= e1) break; } } //check for convergence if(iter > 0) { if(k == np) { e1 = e2; cvCopy(__update_c_q, __current_c_q); } else if(fabs(e2-e1)<0.001*e1) break; else if (cvNorm(__delta_c_q)<0.001) break; else { cvCopy(__update_c_q, __current_c_q); e1 = e2; } } } cvReleaseImage(&Drawimg); __cam.CalcShape(__s, __current_c_q); Shape.Mat2Point(__s); t = gettime - t; printf("AAM-Basic Fitting time cost: %.3f millisec\n", t); }
bool cvFindExtrinsicCameraParams3( const CvMat* obj_points, const CvMat* img_points, const CvMat* A, const CvMat* dist_coeffs, CvMat* r_vec, CvMat* t_vec ) { bool fGood = true; const int max_iter = 20; CvMat *_M = 0, *_Mxy = 0, *_m = 0, *_mn = 0, *_L = 0, *_J = 0; CV_FUNCNAME( "cvFindExtrinsicCameraParams3" ); __BEGIN__; int i, j, count; double a[9], k[4] = { 0, 0, 0, 0 }, R[9], ifx, ify, cx, cy; double Mc[3] = {0, 0, 0}, MM[9], U[9], V[9], W[3]; double JtJ[6*6], JtErr[6], JtJW[6], JtJV[6*6], delta[6], param[6]; CvPoint3D64f* M = 0; CvPoint2D64f *m = 0, *mn = 0; CvMat _a = cvMat( 3, 3, CV_64F, a ); CvMat _R = cvMat( 3, 3, CV_64F, R ); CvMat _r = cvMat( 3, 1, CV_64F, param ); CvMat _t = cvMat( 3, 1, CV_64F, param + 3 ); CvMat _Mc = cvMat( 1, 3, CV_64F, Mc ); CvMat _MM = cvMat( 3, 3, CV_64F, MM ); CvMat _U = cvMat( 3, 3, CV_64F, U ); CvMat _V = cvMat( 3, 3, CV_64F, V ); CvMat _W = cvMat( 3, 1, CV_64F, W ); CvMat _JtJ = cvMat( 6, 6, CV_64F, JtJ ); CvMat _JtErr = cvMat( 6, 1, CV_64F, JtErr ); CvMat _JtJW = cvMat( 6, 1, CV_64F, JtJW ); CvMat _JtJV = cvMat( 6, 6, CV_64F, JtJV ); CvMat _delta = cvMat( 6, 1, CV_64F, delta ); CvMat _param = cvMat( 6, 1, CV_64F, param ); CvMat _dpdr, _dpdt; if( !CV_IS_MAT(obj_points) || !CV_IS_MAT(img_points) || !CV_IS_MAT(A) || !CV_IS_MAT(r_vec) || !CV_IS_MAT(t_vec) ) CV_ERROR( CV_StsBadArg, "One of required arguments is not a valid matrix" ); count = MAX(obj_points->cols, obj_points->rows); CV_CALL( _M = cvCreateMat( 1, count, CV_64FC3 )); CV_CALL( _Mxy = cvCreateMat( 1, count, CV_64FC2 )); CV_CALL( _m = cvCreateMat( 1, count, CV_64FC2 )); CV_CALL( _mn = cvCreateMat( 1, count, CV_64FC2 )); M = (CvPoint3D64f*)_M->data.db; m = (CvPoint2D64f*)_m->data.db; mn = (CvPoint2D64f*)_mn->data.db; CV_CALL( cvConvertPointsHomogenious( obj_points, _M )); CV_CALL( cvConvertPointsHomogenious( img_points, _m )); CV_CALL( cvConvert( A, &_a )); if( dist_coeffs ) { CvMat _k; if( !CV_IS_MAT(dist_coeffs) || CV_MAT_DEPTH(dist_coeffs->type) != CV_64F && CV_MAT_DEPTH(dist_coeffs->type) != CV_32F || dist_coeffs->rows != 1 && dist_coeffs->cols != 1 || dist_coeffs->rows*dist_coeffs->cols*CV_MAT_CN(dist_coeffs->type) != 4 ) CV_ERROR( CV_StsBadArg, "Distortion coefficients must be 1x4 or 4x1 floating-point vector" ); _k = cvMat( dist_coeffs->rows, dist_coeffs->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(dist_coeffs->type)), k ); CV_CALL( cvConvert( dist_coeffs, &_k )); } if( CV_MAT_DEPTH(r_vec->type) != CV_64F && CV_MAT_DEPTH(r_vec->type) != CV_32F || r_vec->rows != 1 && r_vec->cols != 1 || r_vec->rows*r_vec->cols*CV_MAT_CN(r_vec->type) != 3 ) CV_ERROR( CV_StsBadArg, "Rotation vector must be 1x3 or 3x1 floating-point vector" ); if( CV_MAT_DEPTH(t_vec->type) != CV_64F && CV_MAT_DEPTH(t_vec->type) != CV_32F || t_vec->rows != 1 && t_vec->cols != 1 || t_vec->rows*t_vec->cols*CV_MAT_CN(t_vec->type) != 3 ) CV_ERROR( CV_StsBadArg, "Translation vector must be 1x3 or 3x1 floating-point vector" ); ifx = 1./a[0]; ify = 1./a[4]; cx = a[2]; cy = a[5]; // normalize image points // (unapply the intrinsic matrix transformation and distortion) for( i = 0; i < count; i++ ) { double x = (m[i].x - cx)*ifx, y = (m[i].y - cy)*ify, x0 = x, y0 = y; // compensate distortion iteratively if( dist_coeffs ) for( j = 0; j < 5; j++ ) { double r2 = x*x + y*y; double icdist = 1./(1 + k[0]*r2 + k[1]*r2*r2); double delta_x = 2*k[2]*x*y + k[3]*(r2 + 2*x*x); double delta_y = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y; x = (x0 - delta_x)*icdist; y = (y0 - delta_y)*icdist; } mn[i].x = x; mn[i].y = y; // calc mean(M) Mc[0] += M[i].x; Mc[1] += M[i].y; Mc[2] += M[i].z; } Mc[0] /= count; Mc[1] /= count; Mc[2] /= count; cvReshape( _M, _M, 1, count ); cvMulTransposed( _M, &_MM, 1, &_Mc ); cvSVD( &_MM, &_W, 0, &_V, CV_SVD_MODIFY_A + CV_SVD_V_T ); // initialize extrinsic parameters if( W[2]/W[1] < 1e-3 || count < 4 ) { // a planar structure case (all M's lie in the same plane) double tt[3], h[9], h1_norm, h2_norm; CvMat* R_transform = &_V; CvMat T_transform = cvMat( 3, 1, CV_64F, tt ); CvMat _H = cvMat( 3, 3, CV_64F, h ); CvMat _h1, _h2, _h3; if( V[2]*V[2] + V[5]*V[5] < 1e-10 ) cvSetIdentity( R_transform ); if( cvDet(R_transform) < 0 ) cvScale( R_transform, R_transform, -1 ); cvGEMM( R_transform, &_Mc, -1, 0, 0, &T_transform, CV_GEMM_B_T ); for( i = 0; i < count; i++ ) { const double* Rp = R_transform->data.db; const double* Tp = T_transform.data.db; const double* src = _M->data.db + i*3; double* dst = _Mxy->data.db + i*2; dst[0] = Rp[0]*src[0] + Rp[1]*src[1] + Rp[2]*src[2] + Tp[0]; dst[1] = Rp[3]*src[0] + Rp[4]*src[1] + Rp[5]*src[2] + Tp[1]; } cvFindHomography( _Mxy, _mn, &_H ); cvGetCol( &_H, &_h1, 0 ); _h2 = _h1; _h2.data.db++; _h3 = _h2; _h3.data.db++; h1_norm = sqrt(h[0]*h[0] + h[3]*h[3] + h[6]*h[6]); h2_norm = sqrt(h[1]*h[1] + h[4]*h[4] + h[7]*h[7]); cvScale( &_h1, &_h1, 1./h1_norm ); cvScale( &_h2, &_h2, 1./h2_norm ); cvScale( &_h3, &_t, 2./(h1_norm + h2_norm)); cvCrossProduct( &_h1, &_h2, &_h3 ); cvRodrigues2( &_H, &_r ); cvRodrigues2( &_r, &_H ); cvMatMulAdd( &_H, &T_transform, &_t, &_t ); cvMatMul( &_H, R_transform, &_R ); cvRodrigues2( &_R, &_r ); } else { // non-planar structure. Use DLT method double* L; double LL[12*12], LW[12], LV[12*12], sc; CvMat _LL = cvMat( 12, 12, CV_64F, LL ); CvMat _LW = cvMat( 12, 1, CV_64F, LW ); CvMat _LV = cvMat( 12, 12, CV_64F, LV ); CvMat _RRt, _RR, _tt; CV_CALL( _L = cvCreateMat( 2*count, 12, CV_64F )); L = _L->data.db; for( i = 0; i < count; i++, L += 24 ) { double x = -mn[i].x, y = -mn[i].y; L[0] = L[16] = M[i].x; L[1] = L[17] = M[i].y; L[2] = L[18] = M[i].z; L[3] = L[19] = 1.; L[4] = L[5] = L[6] = L[7] = 0.; L[12] = L[13] = L[14] = L[15] = 0.; L[8] = x*M[i].x; L[9] = x*M[i].y; L[10] = x*M[i].z; L[11] = x; L[20] = y*M[i].x; L[21] = y*M[i].y; L[22] = y*M[i].z; L[23] = y; } cvMulTransposed( _L, &_LL, 1 ); cvSVD( &_LL, &_LW, 0, &_LV, CV_SVD_MODIFY_A + CV_SVD_V_T ); _RRt = cvMat( 3, 4, CV_64F, LV + 11*12 ); cvGetCols( &_RRt, &_RR, 0, 3 ); cvGetCol( &_RRt, &_tt, 3 ); if( cvDet(&_RR) < 0 ) cvScale( &_RRt, &_RRt, -1 ); sc = cvNorm(&_RR); cvSVD( &_RR, &_W, &_U, &_V, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T ); cvGEMM( &_U, &_V, 1, 0, 0, &_R, CV_GEMM_A_T ); cvScale( &_tt, &_t, cvNorm(&_R)/sc ); cvRodrigues2( &_R, &_r ); cvReleaseMat( &_L ); } // // Check if new r and t are good // if ( cvGetReal1D( r_vec, 0 ) || cvGetReal1D( r_vec, 1 ) || cvGetReal1D( r_vec, 2 ) || cvGetReal1D( t_vec, 0 ) || cvGetReal1D( t_vec, 1 ) || cvGetReal1D( t_vec, 2 ) ) { // // perfom this only when the old r and t exist. // CvMat * R_inv = cvCreateMat( 3, 3, CV_64FC1 ); CvMat * r_old = cvCreateMat( 3, 1, CV_64FC1 ); CvMat * R_old = cvCreateMat( 3, 3, CV_64FC1 ); CvMat * t_old = cvCreateMat( 3, 1, CV_64FC1 ); // get new center cvInvert( &_R, R_inv ); double new_center[3]; CvMat newCenter = cvMat( 3, 1, CV_64FC1, new_center ); cvMatMul( R_inv, &_t, &newCenter ); cvScale( &newCenter, &newCenter, -1 ); // get old center cvConvert( r_vec, r_old ); cvConvert( t_vec, t_old ); cvRodrigues2( r_old, R_old ); cvInvert( R_old, R_inv ); double old_center[3]; CvMat oldCenter = cvMat( 3, 1, CV_64FC1, old_center ); cvMatMul( R_inv, t_old, &oldCenter ); cvScale( &oldCenter, &oldCenter, -1 ); // get difference double diff_center = 0; for ( int i = 0; i < 3 ; i ++ ) diff_center += pow( new_center[i] - old_center[i], 2 ); diff_center = sqrt( diff_center ); if ( diff_center > 300 ) { printf("diff_center = %.2f --> set initial r and t as same as the previous\n", diff_center); cvConvert(r_vec, &_r); cvConvert(t_vec, &_t); fGood = false; } // else printf("diff_center = %.2f\n", diff_center ); cvReleaseMat( &R_inv ); cvReleaseMat( &r_old ); cvReleaseMat( &R_old ); cvReleaseMat( &t_old ); } if ( fGood ) { CV_CALL( _J = cvCreateMat( 2*count, 6, CV_64FC1 )); cvGetCols( _J, &_dpdr, 0, 3 ); cvGetCols( _J, &_dpdt, 3, 6 ); // refine extrinsic parameters using iterative algorithm for( i = 0; i < max_iter; i++ ) { double n1, n2; cvReshape( _mn, _mn, 2, 1 ); cvProjectPoints2( _M, &_r, &_t, &_a, dist_coeffs, _mn, &_dpdr, &_dpdt, 0, 0, 0 ); cvSub( _m, _mn, _mn ); cvReshape( _mn, _mn, 1, 2*count ); cvMulTransposed( _J, &_JtJ, 1 ); cvGEMM( _J, _mn, 1, 0, 0, &_JtErr, CV_GEMM_A_T ); cvSVD( &_JtJ, &_JtJW, 0, &_JtJV, CV_SVD_MODIFY_A + CV_SVD_V_T ); if( JtJW[5]/JtJW[0] < 1e-12 ) break; cvSVBkSb( &_JtJW, &_JtJV, &_JtJV, &_JtErr, &_delta, CV_SVD_U_T + CV_SVD_V_T ); cvAdd( &_delta, &_param, &_param ); n1 = cvNorm( &_delta ); n2 = cvNorm( &_param ); if( n1/n2 < 1e-10 ) break; } _r = cvMat( r_vec->rows, r_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(r_vec->type)), param ); _t = cvMat( t_vec->rows, t_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(t_vec->type)), param + 3 ); cvConvert( &_r, r_vec ); cvConvert( &_t, t_vec ); } __END__; cvReleaseMat( &_M ); cvReleaseMat( &_Mxy ); cvReleaseMat( &_m ); cvReleaseMat( &_mn ); cvReleaseMat( &_L ); cvReleaseMat( &_J ); return fGood; }