/*F/////////////////////////////////////////////////////////////////////////////////////// // Name: cvCalcOpticalFlowLK // Purpose: Optical flow implementation // Context: // Parameters: // srcA, srcB - source image // velx, vely - destination image // Returns: // // Notes: //F*/ CV_IMPL void cvCalcOpticalFlowLK(const void* srcarrA, const void* srcarrB, CvSize winSize, void* velarrx, void* velarry) { CvMat stubA, *srcA = cvGetMat(srcarrA, &stubA); CvMat stubB, *srcB = cvGetMat(srcarrB, &stubB); CvMat stubx, *velx = cvGetMat(velarrx, &stubx); CvMat stuby, *vely = cvGetMat(velarry, &stuby); if (!CV_ARE_TYPES_EQ(srcA, srcB)) { CV_Error(CV_StsUnmatchedFormats, "Source images have different formats"); } if (!CV_ARE_TYPES_EQ(velx, vely)) { CV_Error(CV_StsUnmatchedFormats, "Destination images have different formats"); } if (!CV_ARE_SIZES_EQ(srcA, srcB) || !CV_ARE_SIZES_EQ(velx, vely) || !CV_ARE_SIZES_EQ(srcA, velx)) { CV_Error(CV_StsUnmatchedSizes, ""); } if (CV_MAT_TYPE(srcA->type) != CV_8UC1 || CV_MAT_TYPE(velx->type) != CV_32FC1) CV_Error(CV_StsUnsupportedFormat, "Source images must have 8uC1 type and " "destination images must have 32fC1 type"); if (srcA->step != srcB->step || velx->step != vely->step) { CV_Error(CV_BadStep, "source and destination images have different step"); } IPPI_CALL(icvCalcOpticalFlowLK_8u32fR((uchar*)srcA->data.ptr, (uchar*)srcB->data.ptr, srcA->step, cvGetMatSize(srcA), winSize, velx->data.fl, vely->data.fl, velx->step)); }
CV_IMPL void cvCopyMakeBorder( const CvArr* srcarr, CvArr* dstarr, CvPoint offset, int bordertype, CvScalar value ) { CvMat srcstub, *src = (CvMat*)srcarr; CvMat dststub, *dst = (CvMat*)dstarr; CvSize srcsize, dstsize; int srcstep, dststep; int pix_size, type; if( !CV_IS_MAT(src) ) src = cvGetMat( src, &srcstub ); if( !CV_IS_MAT(dst) ) dst = cvGetMat( dst, &dststub ); if( offset.x < 0 || offset.y < 0 ) CV_Error( CV_StsOutOfRange, "Offset (left/top border width) is negative" ); if( src->rows + offset.y > dst->rows || src->cols + offset.x > dst->cols ) CV_Error( CV_StsBadSize, "Source array is too big or destination array is too small" ); if( !CV_ARE_TYPES_EQ( src, dst )) CV_Error( CV_StsUnmatchedFormats, "" ); type = CV_MAT_TYPE(src->type); pix_size = CV_ELEM_SIZE(type); srcsize = cvGetMatSize(src); dstsize = cvGetMatSize(dst); srcstep = src->step; dststep = dst->step; if( srcstep == 0 ) srcstep = CV_STUB_STEP; if( dststep == 0 ) dststep = CV_STUB_STEP; bordertype &= 15; if( bordertype == IPL_BORDER_REPLICATE ) { icvCopyReplicateBorder_8u( src->data.ptr, srcstep, srcsize, dst->data.ptr, dststep, dstsize, offset.y, offset.x, pix_size ); } else if( bordertype == IPL_BORDER_REFLECT_101 ) { icvCopyReflect101Border_8u( src->data.ptr, srcstep, srcsize, dst->data.ptr, dststep, dstsize, offset.y, offset.x, pix_size ); } else if( bordertype == IPL_BORDER_CONSTANT ) { double buf[4]; cvScalarToRawData( &value, buf, src->type, 0 ); icvCopyConstBorder_8u( src->data.ptr, srcstep, srcsize, dst->data.ptr, dststep, dstsize, offset.y, offset.x, pix_size, (uchar*)buf ); } else CV_Error( CV_StsBadFlag, "Unknown/unsupported border type" ); }
// Rearrange the quadrants of Fourier image so that the origin is at // the image center // src & dst arrays of equal size & type void cvShiftDFT(CvArr *src_arr, CvArr *dst_arr ) { CvMat *tmp = NULL; CvMat q1stub, q2stub; CvMat q3stub, q4stub; CvMat d1stub, d2stub; CvMat d3stub, d4stub; CvMat *q1, *q2, *q3, *q4; CvMat *d1, *d2, *d3, *d4; CvSize size = cvGetSize(src_arr); CvSize dst_size = cvGetSize(dst_arr); int cx, cy; if(dst_size.width != size.width || dst_size.height != size.height){ cvError( CV_StsUnmatchedSizes, "cvShiftDFT", "Source and Destination arrays must have equal sizes", __FILE__, __LINE__ ); } if(src_arr==dst_arr){ tmp = rb_cvCreateMat(size.height/2, size.width/2, cvGetElemType(src_arr)); } cx = size.width/2; cy = size.height/2; // image center q1 = cvGetSubRect( src_arr, &q1stub, cvRect(0,0,cx, cy) ); q2 = cvGetSubRect( src_arr, &q2stub, cvRect(cx,0,cx,cy) ); q3 = cvGetSubRect( src_arr, &q3stub, cvRect(cx,cy,cx,cy) ); q4 = cvGetSubRect( src_arr, &q4stub, cvRect(0,cy,cx,cy) ); d1 = cvGetSubRect( src_arr, &d1stub, cvRect(0,0,cx,cy) ); d2 = cvGetSubRect( src_arr, &d2stub, cvRect(cx,0,cx,cy) ); d3 = cvGetSubRect( src_arr, &d3stub, cvRect(cx,cy,cx,cy) ); d4 = cvGetSubRect( src_arr, &d4stub, cvRect(0,cy,cx,cy) ); if(src_arr!=dst_arr){ if( !CV_ARE_TYPES_EQ( q1, d1 )){ cvError( CV_StsUnmatchedFormats, "cvShiftDFT", "Source and Destination arrays must have the same format", __FILE__, __LINE__ ); } cvCopy(q3, d1, 0); cvCopy(q4, d2, 0); cvCopy(q1, d3, 0); cvCopy(q2, d4, 0); } else{ cvCopy(q3, tmp, 0); cvCopy(q1, q3, 0); cvCopy(tmp, q1, 0); cvCopy(q4, tmp, 0); cvCopy(q2, q4, 0); cvCopy(tmp, q2, 0); } if (tmp != NULL) { cvReleaseMat(&tmp); } }
/*F/////////////////////////////////////////////////////////////////////////////////////// // Name: cvCalcOpticalFlowHS // Purpose: Optical flow implementation // Context: // Parameters: // srcA, srcB - source image // velx, vely - destination image // Returns: // // Notes: //F*/ CV_IMPL void cvCalcOpticalFlowHS( const void* srcarrA, const void* srcarrB, int usePrevious, void* velarrx, void* velarry, double lambda, CvTermCriteria criteria ) { CV_FUNCNAME( "cvCalcOpticalFlowHS" ); __BEGIN__; CvMat stubA, *srcA = (CvMat*)srcarrA; CvMat stubB, *srcB = (CvMat*)srcarrB; CvMat stubx, *velx = (CvMat*)velarrx; CvMat stuby, *vely = (CvMat*)velarry; CV_CALL( srcA = cvGetMat( srcA, &stubA )); CV_CALL( srcB = cvGetMat( srcB, &stubB )); CV_CALL( velx = cvGetMat( velx, &stubx )); CV_CALL( vely = cvGetMat( vely, &stuby )); if( !CV_ARE_TYPES_EQ( srcA, srcB )) CV_ERROR( CV_StsUnmatchedFormats, "Source images have different formats" ); if( !CV_ARE_TYPES_EQ( velx, vely )) CV_ERROR( CV_StsUnmatchedFormats, "Destination images have different formats" ); if( !CV_ARE_SIZES_EQ( srcA, srcB ) || !CV_ARE_SIZES_EQ( velx, vely ) || !CV_ARE_SIZES_EQ( srcA, velx )) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( CV_MAT_TYPE( srcA->type ) != CV_8UC1 || CV_MAT_TYPE( velx->type ) != CV_32FC1 ) CV_ERROR( CV_StsUnsupportedFormat, "Source images must have 8uC1 type and " "destination images must have 32fC1 type" ); if( srcA->step != srcB->step || velx->step != vely->step ) CV_ERROR( CV_BadStep, "source and destination images have different step" ); IPPI_CALL( icvCalcOpticalFlowHS_8u32fR( (uchar*)srcA->data.ptr, (uchar*)srcB->data.ptr, srcA->step, cvGetMatSize( srcA ), usePrevious, velx->data.fl, vely->data.fl, velx->step, (float)lambda, criteria )); __END__; }
CV_IMPL void cvConDensInitSampleSet( CvConDensation * conDens, CvMat * lowerBound, CvMat * upperBound ) { int i, j; float *LBound; float *UBound; float Prob = 1.f / conDens->SamplesNum; CV_FUNCNAME( "cvConDensInitSampleSet" ); __BEGIN__; if( !conDens || !lowerBound || !upperBound ) CV_ERROR( CV_StsNullPtr, "" ); if( CV_MAT_TYPE(lowerBound->type) != CV_32FC1 || !CV_ARE_TYPES_EQ(lowerBound,upperBound) ) CV_ERROR( CV_StsBadArg, "source has not appropriate format" ); if( (lowerBound->cols != 1) || (upperBound->cols != 1) ) CV_ERROR( CV_StsBadArg, "source has not appropriate size" ); if( (lowerBound->rows != conDens->DP) || (upperBound->rows != conDens->DP) ) CV_ERROR( CV_StsBadArg, "source has not appropriate size" ); LBound = lowerBound->data.fl; UBound = upperBound->data.fl; /* Initializing the structures to create initial Sample set */ //这里根据输入的动态范围给每个系统状态分配一个产生随机数的结构 for( i = 0; i < conDens->DP; i++ ) { cvRandInit( &(conDens->RandS[i]), LBound[i], UBound[i], i ); } /* Generating the samples */ //根据产生的随即数,为每个粒子的每个系统状态分配初始值,并将每个粒子的置信度设置为相同的1/n for( j = 0; j < conDens->SamplesNum; j++ ) { for( i = 0; i < conDens->DP; i++ ) { cvbRand( conDens->RandS + i, conDens->flSamples[j] + i, 1 ); } conDens->flConfidence[j] = Prob; } /* Reinitializes the structures to update samples randomly */ //产生以后更新粒子系统状态的随即结构,采样范围为原来初始范围的-1/5到1/5 for( i = 0; i < conDens->DP; i++ ) { cvRandInit( &(conDens->RandS[i]), (LBound[i] - UBound[i]) / 5, (UBound[i] - LBound[i]) / 5, i); } __END__; }
CV_IMPL void cvDeInterlace( const CvArr* framearr, CvArr* fieldEven, CvArr* fieldOdd ) { CV_FUNCNAME("cvDeInterlace"); __BEGIN__; CvMat frame_stub, *frame = (CvMat*)framearr; CvMat even_stub, *even = (CvMat*)fieldEven; CvMat odd_stub, *odd = (CvMat*)fieldOdd; CvSize size; int y; CV_CALL( frame = cvGetMat( frame, &frame_stub )); CV_CALL( even = cvGetMat( even, &even_stub )); CV_CALL( odd = cvGetMat( odd, &odd_stub )); if( !CV_ARE_TYPES_EQ( frame, even ) || !CV_ARE_TYPES_EQ( frame, odd )) CV_ERROR( CV_StsUnmatchedFormats, "All the input images must have the same type" ); if( frame->cols != even->cols || frame->cols != odd->cols || frame->rows != even->rows*2 || odd->rows != even->rows ) CV_ERROR( CV_StsUnmatchedSizes, "Uncorrelated sizes of the input image and output fields" ); size = cvGetMatSize( even ); size.width *= CV_ELEM_SIZE( even->type ); for( y = 0; y < size.height; y++ ) { memcpy( even->data.ptr + even->step*y, frame->data.ptr + frame->step*y*2, size.width ); memcpy( odd->data.ptr + even->step*y, frame->data.ptr + frame->step*(y*2+1), size.width ); } __END__; }
CV_IMPL void cvConDensInitSampleSet( CvConDensation * conDens, CvMat * lowerBound, CvMat * upperBound ) { int i, j; float *LBound; float *UBound; float Prob = 1.f / conDens->SamplesNum; if( !conDens || !lowerBound || !upperBound ) CV_Error( CV_StsNullPtr, "" ); if( CV_MAT_TYPE(lowerBound->type) != CV_32FC1 || !CV_ARE_TYPES_EQ(lowerBound,upperBound) ) CV_Error( CV_StsBadArg, "source has not appropriate format" ); if( (lowerBound->cols != 1) || (upperBound->cols != 1) ) CV_Error( CV_StsBadArg, "source has not appropriate size" ); if( (lowerBound->rows != conDens->DP) || (upperBound->rows != conDens->DP) ) CV_Error( CV_StsBadArg, "source has not appropriate size" ); LBound = lowerBound->data.fl; UBound = upperBound->data.fl; /* Initializing the structures to create initial Sample set */ for( i = 0; i < conDens->DP; i++ ) { cvRandInit( &(conDens->RandS[i]), LBound[i], UBound[i], i ); } /* Generating the samples */ for( j = 0; j < conDens->SamplesNum; j++ ) { for( i = 0; i < conDens->DP; i++ ) { cvbRand( conDens->RandS + i, conDens->flSamples[j] + i, 1 ); } conDens->flConfidence[j] = Prob; } /* Reinitializes the structures to update samples randomly */ for( i = 0; i < conDens->DP; i++ ) { cvRandInit( &(conDens->RandS[i]), (LBound[i] - UBound[i]) / 5, (UBound[i] - LBound[i]) / 5, i); } }
/*F/////////////////////////////////////////////////////////////////////////////////////// // Name: cvAdaptiveThreshold // Purpose: Adaptive Thresholding the source image // Context: // Parameters: // srcIm - source image // dstIm - result thresholding image // maxValue - the maximum value of the image pixel // method - method for the adaptive threshold calculation // (now CV_STDDEF_ADAPTIVE_THRESH only) // type - thresholding type, must be one of // CV_THRESH_BINARY - val = (val > Thresh ? MAX : 0) // CV_THRESH_BINARY_INV - val = (val > Thresh ? 0 : MAX) // CV_THRESH_TOZERO - val = (val > Thresh ? val : 0) // CV_THRESH_TOZERO_INV - val = (val > Thresh ? 0 : val) // parameters - pointer to the input parameters (for the // CV_STDDEF_ADAPTIVE_THRESH method parameters[0] is size of // the neighborhood thresholding, (one of the 1-(3x3),2-(5x5),or // 3-(7x7)), parameters[1] is the value of the minimum variance // Returns: // Notes: //F*/ CV_IMPL void cvAdaptiveThreshold( const void *srcIm, void *dstIm, double maxValue, CvAdaptiveThreshMethod method, CvThreshType type, double *parameters ) { ///////////////// Some variables ///////////////// CvMat src_stub, dst_stub; CvMat *src = 0, *dst = 0; CV_FUNCNAME( "cvAdaptiveThreshold" ); __BEGIN__; ///////////////// Checking ///////////////// CV_CALL( src = cvGetMat( srcIm, &src_stub )); CV_CALL( dst = cvGetMat( dstIm, &dst_stub )); if( !CV_ARE_TYPES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( CV_ARR_TYPE(src->type) != CV_8UC1 ) CV_ERROR( CV_StsUnsupportedFormat, "" ); if( !CV_ARE_SIZES_EQ( src, dst ) ) CV_ERROR( CV_StsUnmatchedSizes, "" ); switch (method) { case CV_STDDEV_ADAPTIVE_THRESH: icvAdaptiveThreshold_StdDev( src, dst, cvRound(maxValue), type, cvRound( parameters[0] ), cvRound( parameters[1] )); break; default: CV_ERROR_FROM_STATUS( CV_BADCOEF_ERR ); } __END__; }
CV_IMPL void cvMatchTemplate( const CvArr* _img, const CvArr* _templ, CvArr* _result, int method ) { cv::Ptr<CvMat> sum, sqsum; int coi1 = 0, coi2 = 0; int depth, cn; int i, j, k; CvMat stub, *img = (CvMat*)_img; CvMat tstub, *templ = (CvMat*)_templ; CvMat rstub, *result = (CvMat*)_result; CvScalar templ_mean = cvScalarAll(0); double templ_norm = 0, templ_sum2 = 0; int idx = 0, idx2 = 0; double *p0, *p1, *p2, *p3; double *q0, *q1, *q2, *q3; double inv_area; int sum_step, sqsum_step; int num_type = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 : method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2; int is_normed = method == CV_TM_CCORR_NORMED || method == CV_TM_SQDIFF_NORMED || method == CV_TM_CCOEFF_NORMED; img = cvGetMat( img, &stub, &coi1 ); templ = cvGetMat( templ, &tstub, &coi2 ); result = cvGetMat( result, &rstub ); if( CV_MAT_DEPTH( img->type ) != CV_8U && CV_MAT_DEPTH( img->type ) != CV_32F ) CV_Error( CV_StsUnsupportedFormat, "The function supports only 8u and 32f data types" ); if( !CV_ARE_TYPES_EQ( img, templ )) CV_Error( CV_StsUnmatchedSizes, "image and template should have the same type" ); if( CV_MAT_TYPE( result->type ) != CV_32FC1 ) CV_Error( CV_StsUnsupportedFormat, "output image should have 32f type" ); if( img->rows < templ->rows || img->cols < templ->cols ) { CvMat* t; CV_SWAP( img, templ, t ); } if( result->rows != img->rows - templ->rows + 1 || result->cols != img->cols - templ->cols + 1 ) CV_Error( CV_StsUnmatchedSizes, "output image should be (W - w + 1)x(H - h + 1)" ); if( method < CV_TM_SQDIFF || method > CV_TM_CCOEFF_NORMED ) CV_Error( CV_StsBadArg, "unknown comparison method" ); depth = CV_MAT_DEPTH(img->type); cn = CV_MAT_CN(img->type); icvCrossCorr( img, templ, result ); if( method == CV_TM_CCORR ) return; inv_area = 1./((double)templ->rows * templ->cols); sum = cvCreateMat( img->rows + 1, img->cols + 1, CV_MAKETYPE( CV_64F, cn )); if( method == CV_TM_CCOEFF ) { cvIntegral( img, sum, 0, 0 ); templ_mean = cvAvg( templ ); q0 = q1 = q2 = q3 = 0; } else { CvScalar _templ_sdv = cvScalarAll(0); sqsum = cvCreateMat( img->rows + 1, img->cols + 1, CV_MAKETYPE( CV_64F, cn )); cvIntegral( img, sum, sqsum, 0 ); cvAvgSdv( templ, &templ_mean, &_templ_sdv ); templ_norm = CV_SQR(_templ_sdv.val[0]) + CV_SQR(_templ_sdv.val[1]) + CV_SQR(_templ_sdv.val[2]) + CV_SQR(_templ_sdv.val[3]); if( templ_norm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED ) { cvSet( result, cvScalarAll(1.) ); return; } templ_sum2 = templ_norm + CV_SQR(templ_mean.val[0]) + CV_SQR(templ_mean.val[1]) + CV_SQR(templ_mean.val[2]) + CV_SQR(templ_mean.val[3]); if( num_type != 1 ) { templ_mean = cvScalarAll(0); templ_norm = templ_sum2; } templ_sum2 /= inv_area; templ_norm = sqrt(templ_norm); templ_norm /= sqrt(inv_area); // care of accuracy here q0 = (double*)sqsum->data.ptr; q1 = q0 + templ->cols*cn; q2 = (double*)(sqsum->data.ptr + templ->rows*sqsum->step); q3 = q2 + templ->cols*cn; } p0 = (double*)sum->data.ptr; p1 = p0 + templ->cols*cn; p2 = (double*)(sum->data.ptr + templ->rows*sum->step); p3 = p2 + templ->cols*cn; sum_step = sum ? sum->step / sizeof(double) : 0; sqsum_step = sqsum ? sqsum->step / sizeof(double) : 0; for( i = 0; i < result->rows; i++ ) { float* rrow = (float*)(result->data.ptr + i*result->step); idx = i * sum_step; idx2 = i * sqsum_step; for( j = 0; j < result->cols; j++, idx += cn, idx2 += cn ) { double num = rrow[j], t; double wnd_mean2 = 0, wnd_sum2 = 0; if( num_type == 1 ) { for( k = 0; k < cn; k++ ) { t = p0[idx+k] - p1[idx+k] - p2[idx+k] + p3[idx+k]; wnd_mean2 += CV_SQR(t); num -= t*templ_mean.val[k]; } wnd_mean2 *= inv_area; } if( is_normed || num_type == 2 ) { for( k = 0; k < cn; k++ ) { t = q0[idx2+k] - q1[idx2+k] - q2[idx2+k] + q3[idx2+k]; wnd_sum2 += t; } if( num_type == 2 ) num = wnd_sum2 - 2*num + templ_sum2; } if( is_normed ) { t = sqrt(MAX(wnd_sum2 - wnd_mean2,0))*templ_norm; if( fabs(num) < t ) num /= t; else if( fabs(num) < t*1.125 ) num = num > 0 ? 1 : -1; else num = method != CV_TM_SQDIFF_NORMED ? 0 : 1; } rrow[j] = (float)num; } } }
CV_IMPL void cvFindStereoCorrespondenceGC(const CvArr* _left, const CvArr* _right, CvArr* _dispLeft, CvArr* _dispRight, CvStereoGCState* state, int useDisparityGuess) { CvStereoGCState2 state2; state2.orphans = 0; state2.maxOrphans = 0; CvMat lstub, *left = cvGetMat(_left, &lstub); CvMat rstub, *right = cvGetMat(_right, &rstub); CvMat dlstub, *dispLeft = cvGetMat(_dispLeft, &dlstub); CvMat drstub, *dispRight = cvGetMat(_dispRight, &drstub); CvSize size; int iter, i, nZeroExpansions = 0; CvRNG rng = cvRNG(-1); int* disp; CvMat _disp; int64 E; CV_Assert(state != 0); CV_Assert(CV_ARE_SIZES_EQ(left, right) && CV_ARE_TYPES_EQ(left, right) && CV_MAT_TYPE(left->type) == CV_8UC1); CV_Assert(!dispLeft || (CV_ARE_SIZES_EQ(dispLeft, left) && CV_MAT_CN(dispLeft->type) == 1)); CV_Assert(!dispRight || (CV_ARE_SIZES_EQ(dispRight, left) && CV_MAT_CN(dispRight->type) == 1)); size = cvGetSize(left); if (!state->left || state->left->width != size.width || state->left->height != size.height) { int pcn = (int)(sizeof(GCVtx*) / sizeof(int)); int vcn = (int)(sizeof(GCVtx) / sizeof(int)); int ecn = (int)(sizeof(GCEdge) / sizeof(int)); cvReleaseMat(&state->left); cvReleaseMat(&state->right); cvReleaseMat(&state->ptrLeft); cvReleaseMat(&state->ptrRight); cvReleaseMat(&state->dispLeft); cvReleaseMat(&state->dispRight); state->left = cvCreateMat(size.height, size.width, CV_8UC3); state->right = cvCreateMat(size.height, size.width, CV_8UC3); state->dispLeft = cvCreateMat(size.height, size.width, CV_16SC1); state->dispRight = cvCreateMat(size.height, size.width, CV_16SC1); state->ptrLeft = cvCreateMat(size.height, size.width, CV_32SC(pcn)); state->ptrRight = cvCreateMat(size.height, size.width, CV_32SC(pcn)); state->vtxBuf = cvCreateMat(1, size.height * size.width * 2, CV_32SC(vcn)); state->edgeBuf = cvCreateMat(1, size.height * size.width * 12 + 16, CV_32SC(ecn)); } if (!useDisparityGuess) { cvSet(state->dispLeft, cvScalarAll(OCCLUDED)); cvSet(state->dispRight, cvScalarAll(OCCLUDED)); } else { CV_Assert(dispLeft && dispRight); cvConvert(dispLeft, state->dispLeft); cvConvert(dispRight, state->dispRight); } state2.Ithreshold = state->Ithreshold; state2.interactionRadius = state->interactionRadius; state2.lambda = cvRound(state->lambda * DENOMINATOR); state2.lambda1 = cvRound(state->lambda1 * DENOMINATOR); state2.lambda2 = cvRound(state->lambda2 * DENOMINATOR); state2.K = cvRound(state->K * DENOMINATOR); icvInitStereoConstTabs(); icvInitGraySubpix(left, right, state->left, state->right); disp = (int*)cvStackAlloc(state->numberOfDisparities * sizeof(disp[0])); _disp = cvMat(1, state->numberOfDisparities, CV_32S, disp); cvRange(&_disp, state->minDisparity, state->minDisparity + state->numberOfDisparities); cvRandShuffle(&_disp, &rng); if (state2.lambda < 0 && (state2.K < 0 || state2.lambda1 < 0 || state2.lambda2 < 0)) { float L = icvComputeK(state) * 0.2f; state2.lambda = cvRound(L * DENOMINATOR); } if (state2.K < 0) { state2.K = state2.lambda * 5; } if (state2.lambda1 < 0) { state2.lambda1 = state2.lambda * 3; } if (state2.lambda2 < 0) { state2.lambda2 = state2.lambda; } icvInitStereoTabs(&state2); E = icvComputeEnergy(state, &state2, !useDisparityGuess); for (iter = 0; iter < state->maxIters; iter++) { for (i = 0; i < state->numberOfDisparities; i++) { int alpha = disp[i]; int64 Enew = icvAlphaExpand(E, -alpha, state, &state2); if (Enew < E) { nZeroExpansions = 0; E = Enew; } else if (++nZeroExpansions >= state->numberOfDisparities) { break; } } } if (dispLeft) { cvConvert(state->dispLeft, dispLeft); } if (dispRight) { cvConvert(state->dispRight, dispRight); } cvFree(&state2.orphans); }
CV_IMPL void cvInitUndistortMap( const CvMat* A, const CvMat* dist_coeffs, CvArr* mapxarr, CvArr* mapyarr ) { uchar* buffer = 0; CV_FUNCNAME( "cvInitUndistortMap" ); __BEGIN__; float a[9], k[4]; int coi1 = 0, coi2 = 0; CvMat mapxstub, *_mapx = (CvMat*)mapxarr; CvMat mapystub, *_mapy = (CvMat*)mapyarr; float *mapx, *mapy; CvMat _a = cvMat( 3, 3, CV_32F, a ), _k; int mapxstep, mapystep; int u, v; float u0, v0, fx, fy, _fx, _fy, k1, k2, p1, p2; CvSize size; CV_CALL( _mapx = cvGetMat( _mapx, &mapxstub, &coi1 )); CV_CALL( _mapy = cvGetMat( _mapy, &mapystub, &coi2 )); if( coi1 != 0 || coi2 != 0 ) CV_ERROR( CV_BadCOI, "The function does not support COI" ); if( CV_MAT_TYPE(_mapx->type) != CV_32FC1 ) CV_ERROR( CV_StsUnsupportedFormat, "Both maps must have 32fC1 type" ); if( !CV_ARE_TYPES_EQ( _mapx, _mapy )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( !CV_ARE_SIZES_EQ( _mapx, _mapy )) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( !CV_IS_MAT(A) || A->rows != 3 || A->cols != 3 || CV_MAT_TYPE(A->type) != CV_32FC1 && CV_MAT_TYPE(A->type) != CV_64FC1 ) CV_ERROR( CV_StsBadArg, "Intrinsic matrix must be a valid 3x3 floating-point matrix" ); if( !CV_IS_MAT(dist_coeffs) || dist_coeffs->rows != 1 && dist_coeffs->cols != 1 || dist_coeffs->rows*dist_coeffs->cols*CV_MAT_CN(dist_coeffs->type) != 4 || CV_MAT_DEPTH(dist_coeffs->type) != CV_64F && CV_MAT_DEPTH(dist_coeffs->type) != CV_32F ) CV_ERROR( CV_StsBadArg, "Distortion coefficients must be 1x4 or 4x1 floating-point vector" ); cvConvert( A, &_a ); _k = cvMat( dist_coeffs->rows, dist_coeffs->cols, CV_MAKETYPE(CV_32F, CV_MAT_CN(dist_coeffs->type)), k ); cvConvert( dist_coeffs, &_k ); u0 = a[2]; v0 = a[5]; fx = a[0]; fy = a[4]; _fx = 1.f/fx; _fy = 1.f/fy; k1 = k[0]; k2 = k[1]; p1 = k[2]; p2 = k[3]; mapxstep = _mapx->step ? _mapx->step : CV_STUB_STEP; mapystep = _mapy->step ? _mapy->step : CV_STUB_STEP; mapx = _mapx->data.fl; mapy = _mapy->data.fl; size = cvGetMatSize(_mapx); /*if( icvUndistortGetSize_p && icvCreateMapCameraUndistort_32f_C1R_p ) { int buf_size = 0; if( icvUndistortGetSize_p( size, &buf_size ) && buf_size > 0 ) { CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); if( icvCreateMapCameraUndistort_32f_C1R_p( mapx, mapxstep, mapy, mapystep, size, a[0], a[4], a[2], a[5], k[0], k[1], k[2], k[3], buffer ) >= 0 ) EXIT; } }*/ mapxstep /= sizeof(mapx[0]); mapystep /= sizeof(mapy[0]); for( v = 0; v < size.height; v++, mapx += mapxstep, mapy += mapystep ) { float y = (v - v0)*_fy; float y2 = y*y; float _2p1y = 2*p1*y; float _3p1y2 = 3*p1*y2; float p2y2 = p2*y2; for( u = 0; u < size.width; u++ ) { float x = (u - u0)*_fx; float x2 = x*x; float r2 = x2 + y2; float d = 1 + (k1 + k2*r2)*r2; float _u = fx*(x*(d + _2p1y) + p2y2 + (3*p2)*x2) + u0; float _v = fy*(y*(d + (2*p2)*x) + _3p1y2 + p1*x2) + v0; mapx[u] = _u; mapy[u] = _v; } } __END__; cvFree( &buffer ); }
int icvIPPSepFilter( const CvMat* src, CvMat* dst, const CvMat* kernelX, const CvMat* kernelY, CvPoint anchor ) { int result = 0; CvMat* top_bottom = 0; CvMat* vout_hin = 0; CvMat* dst_buf = 0; CV_FUNCNAME( "icvIPPSepFilter" ); __BEGIN__; CvSize ksize; CvPoint el_anchor; CvSize size; int type, depth, pix_size; int i, x, y, dy = 0, prev_dy = 0, max_dy; CvMat vout; CvCopyNonConstBorderFunc copy_border_func; CvIPPSepFilterFunc x_func = 0, y_func = 0; int src_step, top_bottom_step; float *kx, *ky; int align, stripe_size; if( !icvFilterRow_8u_C1R_p ) EXIT; if( !CV_ARE_TYPES_EQ( src, dst ) || !CV_ARE_SIZES_EQ( src, dst ) || !CV_IS_MAT_CONT(kernelX->type & kernelY->type) || CV_MAT_TYPE(kernelX->type) != CV_32FC1 || CV_MAT_TYPE(kernelY->type) != CV_32FC1 || kernelX->cols != 1 && kernelX->rows != 1 || kernelY->cols != 1 && kernelY->rows != 1 || (unsigned)anchor.x >= (unsigned)(kernelX->cols + kernelX->rows - 1) || (unsigned)anchor.y >= (unsigned)(kernelY->cols + kernelY->rows - 1) ) CV_ERROR( CV_StsError, "Internal Error: incorrect parameters" ); ksize.width = kernelX->cols + kernelX->rows - 1; ksize.height = kernelY->cols + kernelY->rows - 1; /*if( ksize.width <= 5 && ksize.height <= 5 ) { float* ker = (float*)cvStackAlloc( ksize.width*ksize.height*sizeof(ker[0])); CvMat kernel = cvMat( ksize.height, ksize.width, CV_32F, ker ); for( y = 0, i = 0; y < ksize.height; y++ ) for( x = 0; x < ksize.width; x++, i++ ) ker[i] = kernelY->data.fl[y]*kernelX->data.fl[x]; CV_CALL( cvFilter2D( src, dst, &kernel, anchor )); EXIT; }*/ type = CV_MAT_TYPE(src->type); depth = CV_MAT_DEPTH(type); pix_size = CV_ELEM_SIZE(type); if( type == CV_8UC1 ) x_func = icvFilterRow_8u_C1R_p, y_func = icvFilterColumn_8u_C1R_p; else if( type == CV_8UC3 ) x_func = icvFilterRow_8u_C3R_p, y_func = icvFilterColumn_8u_C3R_p; else if( type == CV_8UC4 ) x_func = icvFilterRow_8u_C4R_p, y_func = icvFilterColumn_8u_C4R_p; else if( type == CV_16SC1 ) x_func = icvFilterRow_16s_C1R_p, y_func = icvFilterColumn_16s_C1R_p; else if( type == CV_16SC3 ) x_func = icvFilterRow_16s_C3R_p, y_func = icvFilterColumn_16s_C3R_p; else if( type == CV_16SC4 ) x_func = icvFilterRow_16s_C4R_p, y_func = icvFilterColumn_16s_C4R_p; else if( type == CV_32FC1 ) x_func = icvFilterRow_32f_C1R_p, y_func = icvFilterColumn_32f_C1R_p; else if( type == CV_32FC3 ) x_func = icvFilterRow_32f_C3R_p, y_func = icvFilterColumn_32f_C3R_p; else if( type == CV_32FC4 ) x_func = icvFilterRow_32f_C4R_p, y_func = icvFilterColumn_32f_C4R_p; else EXIT; size = cvGetMatSize(src); stripe_size = src->data.ptr == dst->data.ptr ? 1 << 15 : 1 << 16; max_dy = MAX( ksize.height - 1, stripe_size/(size.width + ksize.width - 1)); max_dy = MIN( max_dy, size.height + ksize.height - 1 ); align = 8/CV_ELEM_SIZE(depth); CV_CALL( top_bottom = cvCreateMat( ksize.height*2, cvAlign(size.width,align), type )); CV_CALL( vout_hin = cvCreateMat( max_dy + ksize.height, cvAlign(size.width + ksize.width - 1, align), type )); if( src->data.ptr == dst->data.ptr && size.height ) CV_CALL( dst_buf = cvCreateMat( max_dy + ksize.height, cvAlign(size.width, align), type )); kx = (float*)cvStackAlloc( ksize.width*sizeof(kx[0]) ); ky = (float*)cvStackAlloc( ksize.height*sizeof(ky[0]) ); // mirror the kernels for( i = 0; i < ksize.width; i++ ) kx[i] = kernelX->data.fl[ksize.width - i - 1]; for( i = 0; i < ksize.height; i++ ) ky[i] = kernelY->data.fl[ksize.height - i - 1]; el_anchor = cvPoint( ksize.width - anchor.x - 1, ksize.height - anchor.y - 1 ); cvGetCols( vout_hin, &vout, anchor.x, anchor.x + size.width ); copy_border_func = icvGetCopyNonConstBorderFunc( pix_size, IPL_BORDER_REPLICATE ); src_step = src->step ? src->step : CV_STUB_STEP; top_bottom_step = top_bottom->step ? top_bottom->step : CV_STUB_STEP; vout.step = vout.step ? vout.step : CV_STUB_STEP; for( y = 0; y < size.height; y += dy ) { const CvMat *vin = src, *hout = dst; int src_y = y, dst_y = y; dy = MIN( max_dy, size.height - (ksize.height - anchor.y - 1) - y ); if( y < anchor.y || dy < anchor.y ) { int ay = anchor.y; CvSize src_stripe_size = size; if( y < anchor.y ) { src_y = 0; dy = MIN( anchor.y, size.height ); src_stripe_size.height = MIN( dy + ksize.height - anchor.y - 1, size.height ); } else { src_y = MAX( y - anchor.y, 0 ); dy = size.height - y; src_stripe_size.height = MIN( dy + anchor.y, size.height ); ay = MAX( anchor.y - y, 0 ); } copy_border_func( src->data.ptr + src_y*src_step, src_step, src_stripe_size, top_bottom->data.ptr, top_bottom_step, cvSize(size.width, dy + ksize.height - 1), ay, 0 ); vin = top_bottom; src_y = anchor.y; } // do vertical convolution IPPI_CALL( y_func( vin->data.ptr + src_y*vin->step, vin->step ? vin->step : CV_STUB_STEP, vout.data.ptr, vout.step, cvSize(size.width, dy), ky, ksize.height, el_anchor.y )); // now it's time to copy the previously processed stripe to the input/output image if( src->data.ptr == dst->data.ptr ) { for( i = 0; i < prev_dy; i++ ) memcpy( dst->data.ptr + (y - prev_dy + i)*dst->step, dst_buf->data.ptr + i*dst_buf->step, size.width*pix_size ); if( y + dy < size.height ) { hout = dst_buf; dst_y = 0; } } // create a border for every line by replicating the left-most/right-most elements for( i = 0; i < dy; i++ ) { uchar* ptr = vout.data.ptr + i*vout.step; for( x = -1; x >= -anchor.x*pix_size; x-- ) ptr[x] = ptr[x + pix_size]; for( x = size.width*pix_size; x < (size.width+ksize.width-anchor.x-1)*pix_size; x++ ) ptr[x] = ptr[x - pix_size]; } // do horizontal convolution IPPI_CALL( x_func( vout.data.ptr, vout.step, hout->data.ptr + dst_y*hout->step, hout->step ? hout->step : CV_STUB_STEP, cvSize(size.width, dy), kx, ksize.width, el_anchor.x )); prev_dy = dy; } result = 1; __END__; cvReleaseMat( &vout_hin ); cvReleaseMat( &dst_buf ); cvReleaseMat( &top_bottom ); return result; }
CV_IMPL double cvPseudoInv( CvArr* srcarr, CvArr* dstarr, int flags ) { uchar* buffer = 0; int local_alloc = 0; double condition_number = 0; CV_FUNCNAME( "cvPseudoInv" ); __BEGIN__; CvMat astub, *a = (CvMat*)srcarr; CvMat bstub, *b = (CvMat*)dstarr; CvMat ustub, *u = &ustub; CvMat vstub, *v = &vstub; CvMat tmat; uchar* tw = 0; int type, n, m, nm, mn; int buf_size, pix_size; if( !CV_IS_ARR( a )) CV_CALL( a = cvGetMat( a, &astub )); if( !CV_IS_ARR( b )) CV_CALL( b = cvGetMat( b, &bstub )); if( !CV_ARE_TYPES_EQ( a, b )) CV_ERROR( CV_StsUnmatchedSizes, "" ); n = a->width; m = a->height; nm = MIN( n, m ); mn = MAX( n, m ); if( n != b->height || m != b->width ) CV_ERROR( CV_StsUnmatchedSizes, "" ); type = CV_ARR_TYPE( a->type ); pix_size = icvPixSize[type]; buf_size = nm*2 + mn + m*mn + n*n; if( !(flags & CV_SVD_MODIFY_A) ) buf_size += m*n; buf_size *= pix_size; if( buf_size <= CV_MAX_LOCAL_SIZE ) { buffer = (uchar*)alloca( buf_size ); local_alloc = 1; } else { CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); } if( !(flags & CV_SVD_MODIFY_A) ) { cvInitMatHeader( &tmat, a->height, a->width, type, buffer + buf_size - n*m*pix_size ); cvCopy( a, &tmat ); a = &tmat; } tw = buffer + (nm + mn)*pix_size; cvInitMatHeader( u, m, m, type, tw + nm*pix_size ); cvInitMatHeader( v, n, n, type, u->data.ptr + m*mn*pix_size ); if( type == CV_32FC1 ) { IPPI_CALL( icvSVD_32f( a->data.fl, a->step/sizeof(float), (float*)tw, u->data.fl, u->step/sizeof(float), v->data.fl, v->step/sizeof(float), icvGetMatSize(a), (float*)buffer )); } else if( type == CV_64FC1 ) { IPPI_CALL( icvSVD_64f( a->data.db, a->step/sizeof(double), (double*)tw, u->data.db, u->step/sizeof(double), v->data.db, v->step/sizeof(double), icvGetMatSize(a), (double*)buffer )); } else { CV_ERROR( CV_StsUnsupportedFormat, "" ); } cvT( v, v ); cvGetRow( u, &tmat, 0 ); if( type == CV_32FC1 ) { for( int i = 0; i < nm; i++ ) { double t = ((float*)tw)[i]; tmat.data.ptr = u->data.ptr + i*u->step; t = t > FLT_EPSILON ? 1./t : 0; if( i == mn - 1 ) condition_number = t != 0 ? ((float*)tw)[0]*t : DBL_MAX; cvScale( &tmat, &tmat, t ); } } else { for( int i = 0; i < nm; i++ ) { double t = ((double*)tw)[i]; tmat.data.ptr = u->data.ptr + i*u->step; t = t > DBL_EPSILON ? 1./t : 0; if( i == mn - 1 ) condition_number = t != 0 ? ((double*)tw)[0]*t : DBL_MAX; cvScale( &tmat, &tmat, t ); } } u->height = n; if( n > m ) { cvGetSubArr( u, &tmat, cvRect( 0, m, m, n - m )); cvSetZero( &tmat ); } cvMatMulAdd( v, u, 0, b ); CV_CHECK_NANS( b ); __END__; if( buffer && !local_alloc ) cvFree( (void**)&buffer ); return condition_number; }
CV_IMPL void cvAbsDiffS( const void* srcarr, void* dstarr, CvScalar scalar ) { static CvFuncTable adiffs_tab; static int inittab = 0; CV_FUNCNAME( "cvAbsDiffS" ); __BEGIN__; int coi1 = 0, coi2 = 0; int type, sctype; CvMat srcstub, *src = (CvMat*)srcarr; CvMat dststub, *dst = (CvMat*)dstarr; int src_step = src->step; int dst_step = dst->step; double buf[12]; CvSize size; if( !inittab ) { icvInitAbsDiffCTable( &adiffs_tab ); inittab = 1; } CV_CALL( src = cvGetMat( src, &srcstub, &coi1 )); CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 )); if( coi1 != 0 || coi2 != 0 ) CV_ERROR( CV_BadCOI, "" ); if( !CV_ARE_TYPES_EQ(src, dst) ) CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); if( !CV_ARE_SIZES_EQ(src, dst) ) CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); sctype = type = CV_MAT_TYPE( src->type ); if( CV_MAT_DEPTH(type) < CV_32S ) sctype = (type & CV_MAT_CN_MASK) | CV_32SC1; size = icvGetMatSize( src ); size.width *= CV_MAT_CN( type ); src_step = src->step; dst_step = dst->step; if( CV_IS_MAT_CONT( src->type & dst->type )) { size.width *= size.height; size.height = 1; src_step = dst_step = CV_STUB_STEP; } CV_CALL( cvScalarToRawData( &scalar, buf, sctype, 1 )); { CvFunc2D_2A1P func = (CvFunc2D_2A1P) (adiffs_tab.fn_2d[CV_MAT_DEPTH(type)]); if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); IPPI_CALL( func( src->data.ptr, src_step, dst->data.ptr, dst_step, size, buf )); } __END__; }
CV_IMPL void cvSVBkSb( const CvArr* warr, const CvArr* uarr, const CvArr* varr, const CvArr* barr, CvArr* xarr, int flags ) { uchar* buffer = 0; int local_alloc = 0; CV_FUNCNAME( "cvSVBkSb" ); __BEGIN__; CvMat wstub, *w = (CvMat*)warr; CvMat bstub, *b = (CvMat*)barr; CvMat xstub, *x = (CvMat*)xarr; CvMat ustub, ustub2, *u = (CvMat*)uarr; CvMat vstub, vstub2, *v = (CvMat*)varr; uchar* tw = 0; int type; int temp_u = 0, temp_v = 0; int u_buf_offset = 0, v_buf_offset = 0, w_buf_offset = 0, t_buf_offset = 0; int buf_size = 0, pix_size; int m, n, nm; int u_rows, u_cols; int v_rows, v_cols; if( !CV_IS_MAT( w )) CV_CALL( w = cvGetMat( w, &wstub )); if( !CV_IS_MAT( u )) CV_CALL( u = cvGetMat( u, &ustub )); if( !CV_IS_MAT( v )) CV_CALL( v = cvGetMat( v, &vstub )); if( !CV_IS_MAT( x )) CV_CALL( x = cvGetMat( x, &xstub )); if( !CV_ARE_TYPES_EQ( w, u ) || !CV_ARE_TYPES_EQ( w, v ) || !CV_ARE_TYPES_EQ( w, x )) CV_ERROR( CV_StsUnmatchedFormats, "All matrices must have the same type" ); type = CV_MAT_TYPE( w->type ); pix_size = CV_ELEM_SIZE(type); if( !(flags & CV_SVD_U_T) ) { temp_u = 1; u_buf_offset = buf_size; buf_size += u->cols*u->rows*pix_size; u_rows = u->rows; u_cols = u->cols; } else { u_rows = u->cols; u_cols = u->rows; } if( !(flags & CV_SVD_V_T) ) { temp_v = 1; v_buf_offset = buf_size; buf_size += v->cols*v->rows*pix_size; v_rows = v->rows; v_cols = v->cols; } else { v_rows = v->cols; v_cols = v->rows; } m = u_rows; n = v_rows; nm = MIN(n,m); if( (u_rows != u_cols && v_rows != v_cols) || x->rows != v_rows ) CV_ERROR( CV_StsBadSize, "V or U matrix must be square" ); if( (w->rows == 1 || w->cols == 1) && w->rows + w->cols - 1 == nm ) { if( CV_IS_MAT_CONT(w->type) ) tw = w->data.ptr; else { w_buf_offset = buf_size; buf_size += nm*pix_size; } } else { if( w->cols != v_cols || w->rows != u_cols ) CV_ERROR( CV_StsBadSize, "W must be 1d array of MIN(m,n) elements or " "matrix which size matches to U and V" ); w_buf_offset = buf_size; buf_size += nm*pix_size; } if( b ) { if( !CV_IS_MAT( b )) CV_CALL( b = cvGetMat( b, &bstub )); if( !CV_ARE_TYPES_EQ( w, b )) CV_ERROR( CV_StsUnmatchedFormats, "All matrices must have the same type" ); if( b->cols != x->cols || b->rows != m ) CV_ERROR( CV_StsUnmatchedSizes, "b matrix must have (m x x->cols) size" ); } else { b = &bstub; memset( b, 0, sizeof(*b)); } t_buf_offset = buf_size; buf_size += (MAX(m,n) + b->cols)*pix_size; if( buf_size <= CV_MAX_LOCAL_SIZE ) { buffer = (uchar*)cvStackAlloc( buf_size ); local_alloc = 1; } else CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); if( temp_u ) { cvInitMatHeader( &ustub2, u_cols, u_rows, type, buffer + u_buf_offset ); cvT( u, &ustub2 ); u = &ustub2; } if( temp_v ) { cvInitMatHeader( &vstub2, v_cols, v_rows, type, buffer + v_buf_offset ); cvT( v, &vstub2 ); v = &vstub2; } if( !tw ) { int i, shift = w->cols > 1 ? pix_size : 0; tw = buffer + w_buf_offset; for( i = 0; i < nm; i++ ) memcpy( tw + i*pix_size, w->data.ptr + i*(w->step + shift), pix_size ); } if( type == CV_32FC1 ) { icvSVBkSb_32f( m, n, (float*)tw, u->data.fl, u->step/sizeof(float), v->data.fl, v->step/sizeof(float), b->data.fl, b->step/sizeof(float), b->cols, x->data.fl, x->step/sizeof(float), (float*)(buffer + t_buf_offset) ); } else if( type == CV_64FC1 ) { icvSVBkSb_64f( m, n, (double*)tw, u->data.db, u->step/sizeof(double), v->data.db, v->step/sizeof(double), b->data.db, b->step/sizeof(double), b->cols, x->data.db, x->step/sizeof(double), (double*)(buffer + t_buf_offset) ); } else { CV_ERROR( CV_StsUnsupportedFormat, "" ); } __END__; if( buffer && !local_alloc ) cvFree( &buffer ); }
CV_IMPL void cvNormalize( const CvArr* src, CvArr* dst, double a, double b, int norm_type, const CvArr* mask ) { CvMat* tmp = 0; CV_FUNCNAME( "cvNormalize" ); __BEGIN__; double scale, shift; if( norm_type == CV_MINMAX ) { double smin = 0, smax = 0; double dmin = MIN( a, b ), dmax = MAX( a, b ); cvMinMaxLoc( src, &smin, &smax, 0, 0, mask ); scale = (dmax - dmin)*(smax - smin > DBL_EPSILON ? 1./(smax - smin) : 0); shift = dmin - smin*scale; } else if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C ) { CvMat *s = (CvMat*)src, *d = (CvMat*)dst; if( CV_IS_MAT(s) && CV_IS_MAT(d) && CV_IS_MAT_CONT(s->type & d->type) && CV_ARE_TYPES_EQ(s,d) && CV_ARE_SIZES_EQ(s,d) && !mask && s->cols*s->rows <= CV_MAX_INLINE_MAT_OP_SIZE*CV_MAX_INLINE_MAT_OP_SIZE ) { int i, len = s->cols*s->rows; double norm = 0, v; if( CV_MAT_TYPE(s->type) == CV_32FC1 ) { const float* sptr = s->data.fl; float* dptr = d->data.fl; if( norm_type == CV_L2 ) { for( i = 0; i < len; i++ ) { v = sptr[i]; norm += v*v; } norm = sqrt(norm); } else if( norm_type == CV_L1 ) for( i = 0; i < len; i++ ) { v = fabs((double)sptr[i]); norm += v; } else for( i = 0; i < len; i++ ) { v = fabs((double)sptr[i]); norm = MAX(norm,v); } norm = norm > DBL_EPSILON ? 1./norm : 0.; for( i = 0; i < len; i++ ) dptr[i] = (float)(sptr[i]*norm); EXIT; } if( CV_MAT_TYPE(s->type) == CV_64FC1 ) { const double* sptr = s->data.db; double* dptr = d->data.db; if( norm_type == CV_L2 ) { for( i = 0; i < len; i++ ) { v = sptr[i]; norm += v*v; } norm = sqrt(norm); } else if( norm_type == CV_L1 ) for( i = 0; i < len; i++ ) { v = fabs(sptr[i]); norm += v; } else for( i = 0; i < len; i++ ) { v = fabs(sptr[i]); norm = MAX(norm,v); } norm = norm > DBL_EPSILON ? 1./norm : 0.; for( i = 0; i < len; i++ ) dptr[i] = sptr[i]*norm; EXIT; } } scale = cvNorm( src, 0, norm_type, mask ); scale = scale > DBL_EPSILON ? 1./scale : 0.; shift = 0; } else CV_ERROR( CV_StsBadArg, "Unknown/unsupported norm type" ); if( !mask ) cvConvertScale( src, dst, scale, shift ); else { CvMat stub, *dmat; CV_CALL( dmat = cvGetMat(dst, &stub)); CV_CALL( tmp = cvCreateMat(dmat->rows, dmat->cols, dmat->type) ); cvConvertScale( src, tmp, scale, shift ); cvCopy( tmp, dst, mask ); } __END__; if( tmp ) cvReleaseMat( &tmp ); }
void mcvGetIPM(const CvMat* inImage, CvMat* outImage, IPMInfo *ipmInfo, const CameraInfo *cameraInfo, list<CvPoint> *outPoints) { //check input images types //CvMat inMat, outMat; //cvGetMat(inImage, &inMat); //cvGetMat(outImage, &outMat); //cout << CV_MAT_TYPE(inImage->type) << " " << CV_MAT_TYPE(FLOAT_MAT_TYPE) << " " << CV_MAT_TYPE(INT_MAT_TYPE)<<"\n"; if (!(CV_ARE_TYPES_EQ(inImage, outImage) && (CV_MAT_TYPE(inImage->type)==CV_MAT_TYPE(FLOAT_MAT_TYPE) || (CV_MAT_TYPE(inImage->type)==CV_MAT_TYPE(INT_MAT_TYPE))))) { if(CV_ARE_TYPES_EQ(inImage, outImage)){ cerr << "Types are equal" << CV_MAT_TYPE(inImage->type); }else{ cerr << "Types are NOT equal" << CV_MAT_TYPE(inImage->type); } cerr << "Unsupported image types in mcvGetIPM"; exit(1); } //get size of input image FLOAT u, v; v = inImage->height; u = inImage->width; //get the vanishing point FLOAT_POINT2D vp; vp = mcvGetVanishingPoint(cameraInfo); vp.y = MAX(0, vp.y); //vp.y = 30; //get extent of the image in the xfyf plane FLOAT_MAT_ELEM_TYPE eps = ipmInfo->vpPortion * v;//VP_PORTION*v; ipmInfo->ipmLeft = MAX(0, ipmInfo->ipmLeft); ipmInfo->ipmRight = MIN(u-1, ipmInfo->ipmRight); ipmInfo->ipmTop = MAX(vp.y+eps, ipmInfo->ipmTop); ipmInfo->ipmBottom = MIN(v-1, ipmInfo->ipmBottom); FLOAT_MAT_ELEM_TYPE uvLimitsp[] = {vp.x, ipmInfo->ipmRight, ipmInfo->ipmLeft, vp.x, ipmInfo->ipmTop, ipmInfo->ipmTop, ipmInfo->ipmTop, ipmInfo->ipmBottom}; //{vp.x, u, 0, vp.x, //vp.y+eps, vp.y+eps, vp.y+eps, v}; CvMat uvLimits = cvMat(2, 4, FLOAT_MAT_TYPE, uvLimitsp); //get these points on the ground plane CvMat * xyLimitsp = cvCreateMat(2, 4, FLOAT_MAT_TYPE); CvMat xyLimits = *xyLimitsp; mcvTransformImage2Ground(&uvLimits, &xyLimits,cameraInfo); //SHOW_MAT(xyLimitsp, "xyLImits"); //get extent on the ground plane CvMat row1, row2; cvGetRow(&xyLimits, &row1, 0); cvGetRow(&xyLimits, &row2, 1); double xfMax, xfMin, yfMax, yfMin; cvMinMaxLoc(&row1, (double*)&xfMin, (double*)&xfMax, 0, 0, 0); cvMinMaxLoc(&row2, (double*)&yfMin, (double*)&yfMax, 0, 0, 0); INT outRow = outImage->height; INT outCol = outImage->width; FLOAT_MAT_ELEM_TYPE stepRow = (yfMax-yfMin)/outRow; FLOAT_MAT_ELEM_TYPE stepCol = (xfMax-xfMin)/outCol; //construct the grid to sample CvMat *xyGrid = cvCreateMat(2, outRow*outCol, FLOAT_MAT_TYPE); INT i, j; FLOAT_MAT_ELEM_TYPE x, y; //fill it with x-y values on the ground plane in world frame for (i=0, y=yfMax-.5*stepRow; i<outRow; i++, y-=stepRow) for (j=0, x=xfMin+.5*stepCol; j<outCol; j++, x+=stepCol) { CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 0, i*outCol+j) = x; CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 1, i*outCol+j) = y; } //get their pixel values in image frame CvMat *uvGrid = cvCreateMat(2, outRow*outCol, FLOAT_MAT_TYPE); mcvTransformGround2Image(xyGrid, uvGrid, cameraInfo); //now loop and find the nearest pixel value for each position //that's inside the image, otherwise put it zero //generic loop to work for both float and int matrix types if (CV_MAT_TYPE(inImage->type)==FLOAT_MAT_TYPE) { //test<int>(); //MCV_GET_IPM(FLOAT_MAT_ELEM_TYPE) interpolation<FLOAT_MAT_ELEM_TYPE>(inImage, outImage, uvGrid,outCol, outRow, ipmInfo); } else { //test<double>(); //MCV_GET_IPM(INT_MAT_ELEM_TYPE) interpolation<INT_MAT_ELEM_TYPE>(inImage, outImage, uvGrid,outCol, outRow, ipmInfo); } //return the ipm info ipmInfo->xLimits[0] = CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 0, 0); ipmInfo->xLimits[1] = CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 0, (outRow-1)*outCol+outCol-1); ipmInfo->yLimits[1] = CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 1, 0); ipmInfo->yLimits[0] = CV_MAT_ELEM(*xyGrid, FLOAT_MAT_ELEM_TYPE, 1, (outRow-1)*outCol+outCol-1); ipmInfo->xScale = 1/stepCol; ipmInfo->yScale = 1/stepRow; ipmInfo->width = outCol; ipmInfo->height = outRow; //clean cvReleaseMat(&xyLimitsp); cvReleaseMat(&xyGrid); cvReleaseMat(&uvGrid); }
CV_IMPL void cvMatchTemplate( const CvArr* _img, const CvArr* _templ, CvArr* _result, int method ) { CvMat* sum = 0; CvMat* sqsum = 0; CV_FUNCNAME( "cvMatchTemplate" ); __BEGIN__; int coi1 = 0, coi2 = 0; int depth, cn; int i, j, k; CvMat stub, *img = (CvMat*)_img; CvMat tstub, *templ = (CvMat*)_templ; CvMat rstub, *result = (CvMat*)_result; CvScalar templ_mean = cvScalarAll(0); double templ_norm = 0, templ_sum2 = 0; int idx = 0, idx2 = 0; double *p0, *p1, *p2, *p3; double *q0, *q1, *q2, *q3; double inv_area; int sum_step, sqsum_step; int num_type = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 : method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2; int is_normed = method == CV_TM_CCORR_NORMED || method == CV_TM_SQDIFF_NORMED || method == CV_TM_CCOEFF_NORMED; CV_CALL( img = cvGetMat( img, &stub, &coi1 )); CV_CALL( templ = cvGetMat( templ, &tstub, &coi2 )); CV_CALL( result = cvGetMat( result, &rstub )); if( CV_MAT_DEPTH( img->type ) != CV_8U && CV_MAT_DEPTH( img->type ) != CV_32F ) CV_ERROR( CV_StsUnsupportedFormat, "The function supports only 8u and 32f data types" ); if( !CV_ARE_TYPES_EQ( img, templ )) CV_ERROR( CV_StsUnmatchedSizes, "image and template should have the same type" ); if( CV_MAT_TYPE( result->type ) != CV_32FC1 ) CV_ERROR( CV_StsUnsupportedFormat, "output image should have 32f type" ); if( img->rows < templ->rows || img->cols < templ->cols ) { CvMat* t; CV_SWAP( img, templ, t ); } if( result->rows != img->rows - templ->rows + 1 || result->cols != img->cols - templ->cols + 1 ) CV_ERROR( CV_StsUnmatchedSizes, "output image should be (W - w + 1)x(H - h + 1)" ); if( method < CV_TM_SQDIFF || method > CV_TM_CCOEFF_NORMED ) CV_ERROR( CV_StsBadArg, "unknown comparison method" ); depth = CV_MAT_DEPTH(img->type); cn = CV_MAT_CN(img->type); if( is_normed && cn == 1 && templ->rows > 8 && templ->cols > 8 && img->rows > templ->cols && img->cols > templ->cols ) { CvTemplMatchIPPFunc ipp_func = depth == CV_8U ? (method == CV_TM_SQDIFF_NORMED ? (CvTemplMatchIPPFunc)icvSqrDistanceValid_Norm_8u32f_C1R_p : method == CV_TM_CCORR_NORMED ? (CvTemplMatchIPPFunc)icvCrossCorrValid_Norm_8u32f_C1R_p : (CvTemplMatchIPPFunc)icvCrossCorrValid_NormLevel_8u32f_C1R_p) : (method == CV_TM_SQDIFF_NORMED ? (CvTemplMatchIPPFunc)icvSqrDistanceValid_Norm_32f_C1R_p : method == CV_TM_CCORR_NORMED ? (CvTemplMatchIPPFunc)icvCrossCorrValid_Norm_32f_C1R_p : (CvTemplMatchIPPFunc)icvCrossCorrValid_NormLevel_32f_C1R_p); if( ipp_func ) { CvSize img_size = cvGetMatSize(img), templ_size = cvGetMatSize(templ); IPPI_CALL( ipp_func( img->data.ptr, img->step ? img->step : CV_STUB_STEP, img_size, templ->data.ptr, templ->step ? templ->step : CV_STUB_STEP, templ_size, result->data.ptr, result->step ? result->step : CV_STUB_STEP )); for( i = 0; i < result->rows; i++ ) { float* rrow = (float*)(result->data.ptr + i*result->step); for( j = 0; j < result->cols; j++ ) { if( fabs(rrow[j]) > 1. ) rrow[j] = rrow[j] < 0 ? -1.f : 1.f; } } EXIT; } } CV_CALL( icvCrossCorr( img, templ, result )); if( method == CV_TM_CCORR ) EXIT; inv_area = 1./((double)templ->rows * templ->cols); CV_CALL( sum = cvCreateMat( img->rows + 1, img->cols + 1, CV_MAKETYPE( CV_64F, cn ))); if( method == CV_TM_CCOEFF ) { CV_CALL( cvIntegral( img, sum, 0, 0 )); CV_CALL( templ_mean = cvAvg( templ )); q0 = q1 = q2 = q3 = 0; } else { CvScalar _templ_sdv = cvScalarAll(0); CV_CALL( sqsum = cvCreateMat( img->rows + 1, img->cols + 1, CV_MAKETYPE( CV_64F, cn ))); CV_CALL( cvIntegral( img, sum, sqsum, 0 )); CV_CALL( cvAvgSdv( templ, &templ_mean, &_templ_sdv )); templ_norm = CV_SQR(_templ_sdv.val[0]) + CV_SQR(_templ_sdv.val[1]) + CV_SQR(_templ_sdv.val[2]) + CV_SQR(_templ_sdv.val[3]); if( templ_norm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED ) { cvSet( result, cvScalarAll(1.) ); EXIT; } templ_sum2 = templ_norm + CV_SQR(templ_mean.val[0]) + CV_SQR(templ_mean.val[1]) + CV_SQR(templ_mean.val[2]) + CV_SQR(templ_mean.val[3]); if( num_type != 1 ) { templ_mean = cvScalarAll(0); templ_norm = templ_sum2; } templ_sum2 /= inv_area; templ_norm = sqrt(templ_norm); templ_norm /= sqrt(inv_area); // care of accuracy here q0 = (double*)sqsum->data.ptr; q1 = q0 + templ->cols*cn; q2 = (double*)(sqsum->data.ptr + templ->rows*sqsum->step); q3 = q2 + templ->cols*cn; } p0 = (double*)sum->data.ptr; p1 = p0 + templ->cols*cn; p2 = (double*)(sum->data.ptr + templ->rows*sum->step); p3 = p2 + templ->cols*cn; sum_step = sum ? sum->step / sizeof(double) : 0; sqsum_step = sqsum ? sqsum->step / sizeof(double) : 0; for( i = 0; i < result->rows; i++ ) { float* rrow = (float*)(result->data.ptr + i*result->step); idx = i * sum_step; idx2 = i * sqsum_step; for( j = 0; j < result->cols; j++, idx += cn, idx2 += cn ) { double num = rrow[j], t; double wnd_mean2 = 0, wnd_sum2 = 0; if( num_type == 1 ) { for( k = 0; k < cn; k++ ) { t = p0[idx+k] - p1[idx+k] - p2[idx+k] + p3[idx+k]; wnd_mean2 += CV_SQR(t); num -= t*templ_mean.val[k]; } wnd_mean2 *= inv_area; } if( is_normed || num_type == 2 ) { for( k = 0; k < cn; k++ ) { t = q0[idx2+k] - q1[idx2+k] - q2[idx2+k] + q3[idx2+k]; wnd_sum2 += t; } if( num_type == 2 ) num = wnd_sum2 - 2*num + templ_sum2; } if( is_normed ) { t = sqrt(MAX(wnd_sum2 - wnd_mean2,0))*templ_norm; if( t > DBL_EPSILON ) { num /= t; if( fabs(num) > 1. ) num = num > 0 ? 1 : -1; } else num = method != CV_TM_SQDIFF_NORMED || num < DBL_EPSILON ? 0 : 1; } rrow[j] = (float)num; } } __END__; cvReleaseMat( &sum ); cvReleaseMat( &sqsum ); }
void OptFlowEMD::calculate_flow(IplImage* imageT, IplImage* imageTMinus1, IplImage* velx, IplImage* vely, IplImage* abs){ #ifdef __CV_BEGIN__ __CV_BEGIN__ #else __BEGIN__ #endif CV_FUNCNAME( "OptFlowGenGrad::calculate_flow" ); CvMat stubA, *srcA = (CvMat*)imageT; // stubA takes the new header data for the matrix according to ROI CvMat stubB, *srcB = (CvMat*)imageTMinus1; CvMat stubx, *vel_x = (CvMat*)velx; CvMat stuby, *vel_y = (CvMat*)vely; CvMat stubAbs, *abs_ = NULL; if (abs != NULL) abs_ = (CvMat*)abs; // see GetMat function doc: This returns a matrix header with the current image ROI! // this gives basically a view on the ROI, stubA takes the header data of the matrix // srcA is pointed to this new 'augmented' data-header CV_CALL( srcA = cvGetMat( srcA, &stubA )); CV_CALL( srcB = cvGetMat( srcB, &stubB )); CV_CALL( vel_x = cvGetMat( vel_x, &stubx )); CV_CALL( vel_y = cvGetMat( vel_y, &stuby )); if (abs_ != NULL) CV_CALL( abs_ = cvGetMat ( abs_, &stubAbs )); if( !CV_ARE_TYPES_EQ( srcA, srcB )) CV_ERROR( CV_StsUnmatchedFormats, "Source images have different formats" ); if( !CV_ARE_TYPES_EQ( vel_x, vel_y )) CV_ERROR( CV_StsUnmatchedFormats, "Destination images have different formats" ); if (abs_ != NULL) if (!CV_ARE_TYPES_EQ( vel_x, abs_)) CV_ERROR( CV_StsUnmatchedFormats, "Destination images have different formats" ); if( !CV_ARE_SIZES_EQ( srcA, srcB ) || !CV_ARE_SIZES_EQ( vel_x, vel_y ) || !CV_ARE_SIZES_EQ( srcA, vel_x )) CV_ERROR( CV_StsUnmatchedSizes, "Some images have different sizes" ); if(abs_ != NULL) if (!CV_ARE_SIZES_EQ( srcA, abs_)) CV_ERROR( CV_StsUnmatchedSizes, "Some images have different sizes" ); if( CV_MAT_TYPE( srcA->type ) != CV_8UC1) CV_ERROR( CV_StsUnsupportedFormat, "Source images must have 8uC1 type"); if( CV_MAT_TYPE( vel_x->type ) != CV_32FC1 ) CV_ERROR( CV_StsUnsupportedFormat, "Destination images must have 32fC1 type" ); if( srcA->step != srcB->step || vel_x->step != vel_y->step) CV_ERROR( CV_BadStep, "source and destination images have different step" ); if (abs_ != NULL) if (vel_x->step != abs_->step) CV_ERROR( CV_BadStep, "source and destination images have different step" ); if (abs_ != NULL){ IPPI_CALL( calcOptFlowEMD( (uchar*)srcA->data.ptr, (uchar*)srcB->data.ptr, srcA->step, cvGetMatSize( srcA ), vel_x->data.fl, vel_y->data.fl, vel_x->step, abs_->data.fl)); } else{ IPPI_CALL( calcOptFlowEMD( (uchar*)srcA->data.ptr, (uchar*)srcB->data.ptr, srcA->step, cvGetMatSize( srcA ), vel_x->data.fl, vel_y->data.fl, vel_x->step, NULL)); } #ifdef __CV_END__ __CV_END__ #else __END__ #endif }
CV_IMPL void cvEigenVV( CvArr* srcarr, CvArr* evectsarr, CvArr* evalsarr, double eps ) { CV_FUNCNAME( "cvEigenVV" ); __BEGIN__; CvMat sstub, *src = (CvMat*)srcarr; CvMat estub1, *evects = (CvMat*)evectsarr; CvMat estub2, *evals = (CvMat*)evalsarr; if( !CV_IS_MAT( src )) CV_CALL( src = cvGetMat( src, &sstub )); if( !CV_IS_MAT( evects )) CV_CALL( evects = cvGetMat( evects, &estub1 )); if( !CV_IS_MAT( evals )) CV_CALL( evals = cvGetMat( evals, &estub2 )); if( src->cols != src->rows ) CV_ERROR( CV_StsUnmatchedSizes, "source is not quadratic matrix" ); if( !CV_ARE_SIZES_EQ( src, evects) ) CV_ERROR( CV_StsUnmatchedSizes, "eigenvectors matrix has inappropriate size" ); if( (evals->rows != src->rows || evals->cols != 1) && (evals->cols != src->rows || evals->rows != 1)) CV_ERROR( CV_StsBadSize, "eigenvalues vector has inappropriate size" ); if( !CV_ARE_TYPES_EQ( src, evects ) || !CV_ARE_TYPES_EQ( src, evals )) CV_ERROR( CV_StsUnmatchedFormats, "input matrix, eigenvalues and eigenvectors must have the same type" ); if( !CV_IS_MAT_CONT( src->type & evals->type & evects->type )) CV_ERROR( CV_BadStep, "all the matrices must be continuous" ); if( CV_MAT_TYPE(src->type) == CV_32FC1 ) { IPPI_CALL( icvJacobiEigens_32f( src->data.fl, evects->data.fl, evals->data.fl, src->cols, (float)eps )); } else if( CV_MAT_TYPE(src->type) == CV_64FC1 ) { IPPI_CALL( icvJacobiEigens_64d( src->data.db, evects->data.db, evals->data.db, src->cols, eps )); } else { CV_ERROR( CV_StsUnsupportedFormat, "Only 32fC1 and 64fC1 types are supported" ); } CV_CHECK_NANS( evects ); CV_CHECK_NANS( evals ); __END__; }
CV_IMPL void cvAbsDiff( const void* srcarr1, const void* srcarr2, void* dstarr ) { CV_FUNCNAME( "cvAbsDiff" ); __BEGIN__; int coi1 = 0, coi2 = 0, coi3 = 0; CvMat srcstub1, *src1 = (CvMat*)srcarr1; CvMat srcstub2, *src2 = (CvMat*)srcarr2; CvMat dststub, *dst = (CvMat*)dstarr; CvSize size; int type, depth, pixel_size; CV_CALL( src1 = cvGetMat( src1, &srcstub1, &coi1 )); CV_CALL( src2 = cvGetMat( src2, &srcstub2, &coi2 )); CV_CALL( dst = cvGetMat( dst, &dststub, &coi3 )); if( coi1 != 0 || coi2 != 0 || coi3 != 0 ) CV_ERROR( CV_BadCOI, "" ); if( !CV_ARE_SIZES_EQ( src1, src2 ) ) CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); type = CV_MAT_TYPE(src1->type); depth = CV_MAT_DEPTH(type); if( !CV_ARE_SIZES_EQ( src1, dst )) CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); if( !CV_ARE_TYPES_EQ( src1, src2 )) CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); if( !CV_ARE_TYPES_EQ( src1, dst )) CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); size.width = src1->step * src1->height; size.height = 1; pixel_size = CV_DEPTH_BYTES[depth]; if(depth == CV_8U) { int idx; unsigned char * p1; unsigned char * p2; unsigned char * pdst; p1 = src1->data.ptr ; p2 = src2->data.ptr; pdst = dst->data.ptr; #ifdef _TMS320C6X for (idx = 0; idx < size.width/pixel_size; idx+=4) { _amem4(pdst) = _subabs4(_amem4_const(p1), _amem4_const(p2) ); p1 += 4; p2 += 4; pdst += 4; } #else for (idx = 0; idx < size.width/pixel_size; idx+=1) { (*pdst) = abs((*p1)-(*p2)); pdst++; p1++; p2++; } #endif } else if(depth == CV_32S) { int idx; int * p1; int * p2; int * pdst; p1 = src1->data.i; p2 = src2->data.i; pdst = dst->data.i; for (idx = 0; idx < size.width/pixel_size; idx++) { #ifdef _TMS320C6X *pdst = _abs(_ssub(*p1, *p2)); #else *pdst = abs((*p1)-(*p2)); #endif p1 += 1; p2 += 1; pdst += 1; } } else { CV_ERROR( CV_StsUnsupportedFormat, "unsupported matrix type." ); } __END__; }
CV_IMPL void cvFilter2D( const CvArr* _src, CvArr* _dst, const CvMat* _kernel, CvPoint anchor ) { // below that approximate size OpenCV is faster const int ipp_lower_limit = 20; static CvFuncTable filter_tab; static int inittab = 0; CvFilterState *state = 0; float* kernel_data = 0; int local_alloc = 1; CvMat* temp = 0; CV_FUNCNAME( "cvFilter2D" ); __BEGIN__; CvFilterFunc func = 0; int coi1 = 0, coi2 = 0; CvMat srcstub, *src = (CvMat*)_src; CvMat dststub, *dst = (CvMat*)_dst; CvSize size; int type, depth; int src_step, dst_step; CvMat kernel_hdr; const CvMat* kernel = _kernel; if( !inittab ) { icvInitFilterTab( &filter_tab ); inittab = 1; } CV_CALL( src = cvGetMat( src, &srcstub, &coi1 )); CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 )); if( coi1 != 0 || coi2 != 0 ) CV_ERROR( CV_BadCOI, "" ); type = CV_MAT_TYPE( src->type ); if( !CV_ARE_SIZES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( !CV_ARE_TYPES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( !CV_IS_MAT(kernel) || (CV_MAT_TYPE(kernel->type) != CV_32F && CV_MAT_TYPE(kernel->type) != CV_64F )) CV_ERROR( CV_StsBadArg, "kernel must be single-channel floating-point matrix" ); if( anchor.x == -1 && anchor.y == -1 ) anchor = cvPoint(kernel->cols/2,kernel->rows/2); if( (unsigned)anchor.x >= (unsigned)kernel->cols || (unsigned)anchor.y >= (unsigned)kernel->rows ) CV_ERROR( CV_StsOutOfRange, "anchor point is out of kernel" ); if( CV_MAT_TYPE(kernel->type) != CV_32FC1 || !CV_IS_MAT_CONT(kernel->type) || icvFilter_8u_C1R_p ) { int sz = kernel->rows*kernel->cols*sizeof(kernel_data[0]); if( sz < CV_MAX_LOCAL_SIZE ) kernel_data = (float*)cvStackAlloc( sz ); else { CV_CALL( kernel_data = (float*)cvAlloc( sz )); local_alloc = 0; } kernel_hdr = cvMat( kernel->rows, kernel->cols, CV_32F, kernel_data ); if( CV_MAT_TYPE(kernel->type) == CV_32FC1 ) cvCopy( kernel, &kernel_hdr ); else cvConvertScale( kernel, &kernel_hdr, 1, 0 ); kernel = &kernel_hdr; } size = cvGetMatSize( src ); depth = CV_MAT_DEPTH(type); src_step = src->step; dst_step = dst->step ? dst->step : CV_STUB_STEP; if( icvFilter_8u_C1R_p && (src->rows >= ipp_lower_limit || src->cols >= ipp_lower_limit) ) { CvFilterIPPFunc ipp_func = type == CV_8UC1 ? (CvFilterIPPFunc)icvFilter_8u_C1R_p : type == CV_8UC3 ? (CvFilterIPPFunc)icvFilter_8u_C3R_p : type == CV_8UC4 ? (CvFilterIPPFunc)icvFilter_8u_C4R_p : type == CV_16SC1 ? (CvFilterIPPFunc)icvFilter_16s_C1R_p : type == CV_16SC3 ? (CvFilterIPPFunc)icvFilter_16s_C3R_p : type == CV_16SC4 ? (CvFilterIPPFunc)icvFilter_16s_C4R_p : type == CV_32FC1 ? (CvFilterIPPFunc)icvFilter_32f_C1R_p : type == CV_32FC3 ? (CvFilterIPPFunc)icvFilter_32f_C3R_p : type == CV_32FC4 ? (CvFilterIPPFunc)icvFilter_32f_C4R_p : 0; if( ipp_func ) { CvSize el_size = { kernel->cols, kernel->rows }; CvPoint el_anchor = { el_size.width - anchor.x - 1, el_size.height - anchor.y - 1 }; int stripe_size = 1 << 16; // the optimal value may depend on CPU cache, // overhead of current IPP code etc. const uchar* shifted_ptr; int i, j, y, dy = 0; int temp_step; // mirror the kernel around the center for( i = 0; i < (el_size.height+1)/2; i++ ) { float* top_row = kernel->data.fl + el_size.width*i; float* bottom_row = kernel->data.fl + el_size.width*(el_size.height - i - 1); for( j = 0; j < (el_size.width+1)/2; j++ ) { float a = top_row[j], b = top_row[el_size.width - j - 1]; float c = bottom_row[j], d = bottom_row[el_size.width - j - 1]; top_row[j] = d; top_row[el_size.width - j - 1] = c; bottom_row[j] = b; bottom_row[el_size.width - j - 1] = a; } } CV_CALL( temp = icvIPPFilterInit( src, stripe_size, el_size )); shifted_ptr = temp->data.ptr + anchor.y*temp->step + anchor.x*CV_ELEM_SIZE(type); temp_step = temp->step ? temp->step : CV_STUB_STEP; for( y = 0; y < src->rows; y += dy ) { dy = icvIPPFilterNextStripe( src, temp, y, el_size, anchor ); IPPI_CALL( ipp_func( shifted_ptr, temp_step, dst->data.ptr + y*dst_step, dst_step, cvSize(src->cols, dy), kernel->data.fl, el_size, el_anchor )); } EXIT; } } CV_CALL( state = icvFilterInitAlloc( src->cols, cv32f, CV_MAT_CN(type), cvSize(kernel->cols, kernel->rows), anchor, kernel->data.ptr, ICV_GENERIC_KERNEL )); if( CV_MAT_CN(type) == 2 ) CV_ERROR( CV_BadNumChannels, "Unsupported number of channels" ); func = (CvFilterFunc)(filter_tab.fn_2d[depth]); if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); if( size.height == 1 ) src_step = dst_step = CV_STUB_STEP; IPPI_CALL( func( src->data.ptr, src_step, dst->data.ptr, dst_step, &size, state, 0 )); __END__; cvReleaseMat( &temp ); icvFilterFree( &state ); if( !local_alloc ) cvFree( (void**)&kernel_data ); }
CV_IMPL void cvPyrMeanShiftFiltering( const CvArr* srcarr, CvArr* dstarr, double sp0, double sr, int max_level, CvTermCriteria termcrit ) { const int cn = 3; const int MAX_LEVELS = 8; CvMat* src_pyramid[MAX_LEVELS+1]; CvMat* dst_pyramid[MAX_LEVELS+1]; CvMat* mask0 = 0; int i, j, level; //uchar* submask = 0; #define cdiff(ofs0) (tab[c0-dptr[ofs0]+255] + \ tab[c1-dptr[(ofs0)+1]+255] + tab[c2-dptr[(ofs0)+2]+255] >= isr22) memset( src_pyramid, 0, sizeof(src_pyramid) ); memset( dst_pyramid, 0, sizeof(dst_pyramid) ); CV_FUNCNAME( "cvPyrMeanShiftFiltering" ); __BEGIN__; double sr2 = sr * sr; int isr2 = cvRound(sr2), isr22 = MAX(isr2,16); int tab[768]; CvMat sstub0, *src0; CvMat dstub0, *dst0; CV_CALL( src0 = cvGetMat( srcarr, &sstub0 )); CV_CALL( dst0 = cvGetMat( dstarr, &dstub0 )); if( CV_MAT_TYPE(src0->type) != CV_8UC3 ) CV_ERROR( CV_StsUnsupportedFormat, "Only 8-bit, 3-channel images are supported" ); if( !CV_ARE_TYPES_EQ( src0, dst0 )) CV_ERROR( CV_StsUnmatchedFormats, "The input and output images must have the same type" ); if( !CV_ARE_SIZES_EQ( src0, dst0 )) CV_ERROR( CV_StsUnmatchedSizes, "The input and output images must have the same size" ); if( (unsigned)max_level > (unsigned)MAX_LEVELS ) CV_ERROR( CV_StsOutOfRange, "The number of pyramid levels is too large or negative" ); if( !(termcrit.type & CV_TERMCRIT_ITER) ) termcrit.max_iter = 5; termcrit.max_iter = MAX(termcrit.max_iter,1); termcrit.max_iter = MIN(termcrit.max_iter,100); if( !(termcrit.type & CV_TERMCRIT_EPS) ) termcrit.epsilon = 1.f; termcrit.epsilon = MAX(termcrit.epsilon, 0.f); for( i = 0; i < 768; i++ ) tab[i] = (i - 255)*(i - 255); // 1. construct pyramid src_pyramid[0] = src0; dst_pyramid[0] = dst0; for( level = 1; level <= max_level; level++ ) { CV_CALL( src_pyramid[level] = cvCreateMat( (src_pyramid[level-1]->rows+1)/2, (src_pyramid[level-1]->cols+1)/2, src_pyramid[level-1]->type )); CV_CALL( dst_pyramid[level] = cvCreateMat( src_pyramid[level]->rows, src_pyramid[level]->cols, src_pyramid[level]->type )); CV_CALL( cvPyrDown( src_pyramid[level-1], src_pyramid[level] )); //CV_CALL( cvResize( src_pyramid[level-1], src_pyramid[level], CV_INTER_AREA )); } CV_CALL( mask0 = cvCreateMat( src0->rows, src0->cols, CV_8UC1 )); //CV_CALL( submask = (uchar*)cvAlloc( (sp+2)*(sp+2) )); // 2. apply meanshift, starting from the pyramid top (i.e. the smallest layer) for( level = max_level; level >= 0; level-- ) { CvMat* src = src_pyramid[level]; CvSize size = cvGetMatSize(src); uchar* sptr = src->data.ptr; int sstep = src->step; uchar* mask = 0; int mstep = 0; uchar* dptr; int dstep; float sp = (float)(sp0 / (1 << level)); sp = MAX( sp, 1 ); if( level < max_level ) { CvSize size1 = cvGetMatSize(dst_pyramid[level+1]); CvMat m = cvMat( size.height, size.width, CV_8UC1, mask0->data.ptr ); dstep = dst_pyramid[level+1]->step; dptr = dst_pyramid[level+1]->data.ptr + dstep + cn; mstep = m.step; mask = m.data.ptr + mstep; //cvResize( dst_pyramid[level+1], dst_pyramid[level], CV_INTER_CUBIC ); cvPyrUp( dst_pyramid[level+1], dst_pyramid[level] ); cvZero( &m ); for( i = 1; i < size1.height-1; i++, dptr += dstep - (size1.width-2)*3, mask += mstep*2 ) { for( j = 1; j < size1.width-1; j++, dptr += cn ) { int c0 = dptr[0], c1 = dptr[1], c2 = dptr[2]; mask[j*2 - 1] = cdiff(-3) || cdiff(3) || cdiff(-dstep-3) || cdiff(-dstep) || cdiff(-dstep+3) || cdiff(dstep-3) || cdiff(dstep) || cdiff(dstep+3); } } cvDilate( &m, &m, 0, 1 ); mask = m.data.ptr; } dptr = dst_pyramid[level]->data.ptr; dstep = dst_pyramid[level]->step; for( i = 0; i < size.height; i++, sptr += sstep - size.width*3, dptr += dstep - size.width*3, mask += mstep ) { for( j = 0; j < size.width; j++, sptr += 3, dptr += 3 ) { int x0 = j, y0 = i, x1, y1, iter; int c0, c1, c2; if( mask && !mask[j] ) continue; c0 = sptr[0], c1 = sptr[1], c2 = sptr[2]; // iterate meanshift procedure for( iter = 0; iter < termcrit.max_iter; iter++ ) { uchar* ptr; int x, y, count = 0; int minx, miny, maxx, maxy; int s0 = 0, s1 = 0, s2 = 0, sx = 0, sy = 0; double icount; int stop_flag; //mean shift: process pixels in window (p-sigmaSp)x(p+sigmaSp) minx = cvRound(x0 - sp); minx = MAX(minx, 0); miny = cvRound(y0 - sp); miny = MAX(miny, 0); maxx = cvRound(x0 + sp); maxx = MIN(maxx, size.width-1); maxy = cvRound(y0 + sp); maxy = MIN(maxy, size.height-1); ptr = sptr + (miny - i)*sstep + (minx - j)*3; for( y = miny; y <= maxy; y++, ptr += sstep - (maxx-minx+1)*3 ) { int row_count = 0; x = minx; for( ; x + 3 <= maxx; x += 4, ptr += 12 ) { int t0 = ptr[0], t1 = ptr[1], t2 = ptr[2]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x; row_count++; } t0 = ptr[3], t1 = ptr[4], t2 = ptr[5]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x+1; row_count++; } t0 = ptr[6], t1 = ptr[7], t2 = ptr[8]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x+2; row_count++; } t0 = ptr[9], t1 = ptr[10], t2 = ptr[11]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x+3; row_count++; } } for( ; x <= maxx; x++, ptr += 3 ) { int t0 = ptr[0], t1 = ptr[1], t2 = ptr[2]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x; row_count++; } } count += row_count; sy += y*row_count; } if( count == 0 ) break; icount = 1./count; x1 = cvRound(sx*icount); y1 = cvRound(sy*icount); s0 = cvRound(s0*icount); s1 = cvRound(s1*icount); s2 = cvRound(s2*icount); stop_flag = (x0 == x1 && y0 == y1) || abs(x1-x0) + abs(y1-y0) + tab[s0 - c0 + 255] + tab[s1 - c1 + 255] + tab[s2 - c2 + 255] <= termcrit.epsilon; x0 = x1; y0 = y1; c0 = s0; c1 = s1; c2 = s2; if( stop_flag ) break; } dptr[0] = (uchar)c0; dptr[1] = (uchar)c1; dptr[2] = (uchar)c2; } } } __END__; for( i = 1; i <= MAX_LEVELS; i++ ) { cvReleaseMat( &src_pyramid[i] ); cvReleaseMat( &dst_pyramid[i] ); } cvReleaseMat( &mask0 ); }
void icvConvertPointsHomogenious( const CvMat* src, CvMat* dst ) { CvMat* temp = 0; CvMat* denom = 0; CV_FUNCNAME( "cvConvertPointsHomogenious" ); __BEGIN__; int i, s_count, s_dims, d_count, d_dims; CvMat _src, _dst, _ones; CvMat* ones = 0; if( !CV_IS_MAT(src) ) CV_ERROR( !src ? CV_StsNullPtr : CV_StsBadArg, "The input parameter is not a valid matrix" ); if( !CV_IS_MAT(dst) ) CV_ERROR( !dst ? CV_StsNullPtr : CV_StsBadArg, "The output parameter is not a valid matrix" ); if( src == dst || src->data.ptr == dst->data.ptr ) { if( src != dst && (!CV_ARE_TYPES_EQ(src, dst) || !CV_ARE_SIZES_EQ(src,dst)) ) CV_ERROR( CV_StsBadArg, "Invalid inplace operation" ); EXIT; } if( src->rows > src->cols ) { if( !((src->cols > 1) ^ (CV_MAT_CN(src->type) > 1)) ) CV_ERROR( CV_StsBadSize, "Either the number of channels or columns or " "rows must be =1" ); s_dims = CV_MAT_CN(src->type)*src->cols; s_count = src->rows; } else { if( !((src->rows > 1) ^ (CV_MAT_CN(src->type) > 1)) ) CV_ERROR( CV_StsBadSize, "Either the number of channels or columns or " "rows must be =1" ); s_dims = CV_MAT_CN(src->type)*src->rows; s_count = src->cols; } if( src->rows == 1 || src->cols == 1 ) src = cvReshape( src, &_src, 1, s_count ); if( dst->rows > dst->cols ) { if( !((dst->cols > 1) ^ (CV_MAT_CN(dst->type) > 1)) ) CV_ERROR( CV_StsBadSize, "Either the number of channels or columns or " "rows in the input matrix must be =1" ); d_dims = CV_MAT_CN(dst->type)*dst->cols; d_count = dst->rows; } else { if( !((dst->rows > 1) ^ (CV_MAT_CN(dst->type) > 1)) ) CV_ERROR( CV_StsBadSize, "Either the number of channels or columns or " "rows in the output matrix must be =1" ); d_dims = CV_MAT_CN(dst->type)*dst->rows; d_count = dst->cols; } if( dst->rows == 1 || dst->cols == 1 ) dst = cvReshape( dst, &_dst, 1, d_count ); if( s_count != d_count ) CV_ERROR( CV_StsUnmatchedSizes, "Both matrices must have the " "same number of points" ); if( CV_MAT_DEPTH(src->type) < CV_32F || CV_MAT_DEPTH(dst->type) < CV_32F ) CV_ERROR( CV_StsUnsupportedFormat, "Both matrices must be floating-point " "(single or double precision)" ); if( s_dims < 2 || s_dims > 4 || d_dims < 2 || d_dims > 4 ) CV_ERROR( CV_StsOutOfRange, "Both input and output point dimensionality " "must be 2, 3 or 4" ); if( s_dims < d_dims - 1 || s_dims > d_dims + 1 ) CV_ERROR( CV_StsUnmatchedSizes, "The dimensionalities of input and output " "point sets differ too much" ); if( s_dims == d_dims - 1 ) { if( d_count == dst->rows ) { ones = cvGetSubRect( dst, &_ones, cvRect( s_dims, 0, 1, d_count )); dst = cvGetSubRect( dst, &_dst, cvRect( 0, 0, s_dims, d_count )); } else { ones = cvGetSubRect( dst, &_ones, cvRect( 0, s_dims, d_count, 1 )); dst = cvGetSubRect( dst, &_dst, cvRect( 0, 0, d_count, s_dims )); } } if( s_dims <= d_dims ) { if( src->rows == dst->rows && src->cols == dst->cols ) { if( CV_ARE_TYPES_EQ( src, dst ) ) cvCopy( src, dst ); else cvConvert( src, dst ); } else { if( !CV_ARE_TYPES_EQ( src, dst )) { CV_CALL( temp = cvCreateMat( src->rows, src->cols, dst->type )); cvConvert( src, temp ); src = temp; } cvTranspose( src, dst ); } if( ones ) cvSet( ones, cvRealScalar(1.) ); } else { int s_plane_stride, s_stride, d_plane_stride, d_stride, elem_size; if( !CV_ARE_TYPES_EQ( src, dst )) { CV_CALL( temp = cvCreateMat( src->rows, src->cols, dst->type )); cvConvert( src, temp ); src = temp; } elem_size = CV_ELEM_SIZE(src->type); if( s_count == src->cols ) s_plane_stride = src->step / elem_size, s_stride = 1; else s_stride = src->step / elem_size, s_plane_stride = 1; if( d_count == dst->cols ) d_plane_stride = dst->step / elem_size, d_stride = 1; else d_stride = dst->step / elem_size, d_plane_stride = 1; CV_CALL( denom = cvCreateMat( 1, d_count, dst->type )); if( CV_MAT_DEPTH(dst->type) == CV_32F ) { const float* xs = src->data.fl; const float* ys = xs + s_plane_stride; const float* zs = 0; const float* ws = xs + (s_dims - 1)*s_plane_stride; float* iw = denom->data.fl; float* xd = dst->data.fl; float* yd = xd + d_plane_stride; float* zd = 0; if( d_dims == 3 ) { zs = ys + s_plane_stride; zd = yd + d_plane_stride; } for( i = 0; i < d_count; i++, ws += s_stride ) { float t = *ws; iw[i] = t ? t : 1.f; } cvDiv( 0, denom, denom ); if( d_dims == 3 ) for( i = 0; i < d_count; i++ ) { float w = iw[i]; float x = *xs * w, y = *ys * w, z = *zs * w; xs += s_stride; ys += s_stride; zs += s_stride; *xd = x; *yd = y; *zd = z; xd += d_stride; yd += d_stride; zd += d_stride; } else for( i = 0; i < d_count; i++ ) { float w = iw[i]; float x = *xs * w, y = *ys * w; xs += s_stride; ys += s_stride; *xd = x; *yd = y; xd += d_stride; yd += d_stride; } } else { const double* xs = src->data.db; const double* ys = xs + s_plane_stride; const double* zs = 0; const double* ws = xs + (s_dims - 1)*s_plane_stride; double* iw = denom->data.db; double* xd = dst->data.db; double* yd = xd + d_plane_stride; double* zd = 0; if( d_dims == 3 ) { zs = ys + s_plane_stride; zd = yd + d_plane_stride; } for( i = 0; i < d_count; i++, ws += s_stride ) { double t = *ws; iw[i] = t ? t : 1.; } cvDiv( 0, denom, denom ); if( d_dims == 3 ) for( i = 0; i < d_count; i++ ) { double w = iw[i]; double x = *xs * w, y = *ys * w, z = *zs * w; xs += s_stride; ys += s_stride; zs += s_stride; *xd = x; *yd = y; *zd = z; xd += d_stride; yd += d_stride; zd += d_stride; } else for( i = 0; i < d_count; i++ ) { double w = iw[i]; double x = *xs * w, y = *ys * w; xs += s_stride; ys += s_stride; *xd = x; *yd = y; xd += d_stride; yd += d_stride; } } } __END__; cvReleaseMat( &denom ); cvReleaseMat( &temp ); }
CV_IMPL void cvAbsDiff( const void* srcarr1, const void* srcarr2, void* dstarr ) { static CvFuncTable adiff_tab; static int inittab = 0; CV_FUNCNAME( "cvAbsDiff" ); __BEGIN__; int coi1 = 0, coi2 = 0, coi3 = 0; CvMat srcstub1, *src1 = (CvMat*)srcarr1; CvMat srcstub2, *src2 = (CvMat*)srcarr2; CvMat dststub, *dst = (CvMat*)dstarr; int src1_step, src2_step, dst_step; CvSize size; int type; if( !inittab ) { icvInitAbsDiffTable( &adiff_tab ); inittab = 1; } CV_CALL( src1 = cvGetMat( src1, &srcstub1, &coi1 )); CV_CALL( src2 = cvGetMat( src2, &srcstub2, &coi2 )); CV_CALL( dst = cvGetMat( dst, &dststub, &coi3 )); if( coi1 != 0 || coi2 != 0 || coi3 != 0 ) CV_ERROR( CV_BadCOI, "" ); if( !CV_ARE_SIZES_EQ( src1, src2 ) ) CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); size = icvGetMatSize( src1 ); type = CV_MAT_TYPE(src1->type); if( !CV_ARE_SIZES_EQ( src1, dst )) CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); if( !CV_ARE_TYPES_EQ( src1, src2 )) CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); if( !CV_ARE_TYPES_EQ( src1, dst )) CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); size.width *= CV_MAT_CN( type ); src1_step = src1->step; src2_step = src2->step; dst_step = dst->step; if( CV_IS_MAT_CONT( src1->type & src2->type & dst->type )) { size.width *= size.height; size.height = 1; src1_step = src2_step = dst_step = CV_STUB_STEP; } { CvFunc2D_3A func = (CvFunc2D_3A) (adiff_tab.fn_2d[CV_MAT_DEPTH(type)]); if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); IPPI_CALL( func( src1->data.ptr, src1_step, src2->data.ptr, src2_step, dst->data.ptr, dst_step, size )); } __END__; }
/* The main function */ CV_IMPL float cvCalcEMD2( const CvArr* signature_arr1, const CvArr* signature_arr2, int dist_type, CvDistanceFunction dist_func, const CvArr* cost_matrix, CvArr* flow_matrix, float *lower_bound, void *user_param ) { cv::AutoBuffer<char> local_buf; CvEMDState state; float emd = 0; memset( &state, 0, sizeof(state)); double total_cost = 0; int result = 0; float eps, min_delta; CvNode2D *xp = 0; CvMat sign_stub1, *signature1 = (CvMat*)signature_arr1; CvMat sign_stub2, *signature2 = (CvMat*)signature_arr2; CvMat cost_stub, *cost = &cost_stub; CvMat flow_stub, *flow = (CvMat*)flow_matrix; int dims, size1, size2; signature1 = cvGetMat( signature1, &sign_stub1 ); signature2 = cvGetMat( signature2, &sign_stub2 ); if( signature1->cols != signature2->cols ) CV_Error( CV_StsUnmatchedSizes, "The arrays must have equal number of columns (which is number of dimensions but 1)" ); dims = signature1->cols - 1; size1 = signature1->rows; size2 = signature2->rows; if( !CV_ARE_TYPES_EQ( signature1, signature2 )) CV_Error( CV_StsUnmatchedFormats, "The array must have equal types" ); if( CV_MAT_TYPE( signature1->type ) != CV_32FC1 ) CV_Error( CV_StsUnsupportedFormat, "The signatures must be 32fC1" ); if( flow ) { flow = cvGetMat( flow, &flow_stub ); if( flow->rows != size1 || flow->cols != size2 ) CV_Error( CV_StsUnmatchedSizes, "The flow matrix size does not match to the signatures' sizes" ); if( CV_MAT_TYPE( flow->type ) != CV_32FC1 ) CV_Error( CV_StsUnsupportedFormat, "The flow matrix must be 32fC1" ); } cost->data.fl = 0; cost->step = 0; if( dist_type < 0 ) { if( cost_matrix ) { if( dist_func ) CV_Error( CV_StsBadArg, "Only one of cost matrix or distance function should be non-NULL in case of user-defined distance" ); if( lower_bound ) CV_Error( CV_StsBadArg, "The lower boundary can not be calculated if the cost matrix is used" ); cost = cvGetMat( cost_matrix, &cost_stub ); if( cost->rows != size1 || cost->cols != size2 ) CV_Error( CV_StsUnmatchedSizes, "The cost matrix size does not match to the signatures' sizes" ); if( CV_MAT_TYPE( cost->type ) != CV_32FC1 ) CV_Error( CV_StsUnsupportedFormat, "The cost matrix must be 32fC1" ); } else if( !dist_func ) CV_Error( CV_StsNullPtr, "In case of user-defined distance Distance function is undefined" ); } else { if( dims == 0 ) CV_Error( CV_StsBadSize, "Number of dimensions can be 0 only if a user-defined metric is used" ); user_param = (void *) (size_t)dims; switch (dist_type) { case CV_DIST_L1: dist_func = icvDistL1; break; case CV_DIST_L2: dist_func = icvDistL2; break; case CV_DIST_C: dist_func = icvDistC; break; default: CV_Error( CV_StsBadFlag, "Bad or unsupported metric type" ); } } result = icvInitEMD( signature1->data.fl, size1, signature2->data.fl, size2, dims, dist_func, user_param, cost->data.fl, cost->step, &state, lower_bound, local_buf ); if( result > 0 && lower_bound ) { emd = *lower_bound; return emd; } eps = CV_EMD_EPS * state.max_cost; /* if ssize = 1 or dsize = 1 then we are done, else ... */ if( state.ssize > 1 && state.dsize > 1 ) { int itr; for( itr = 1; itr < MAX_ITERATIONS; itr++ ) { /* find basic variables */ result = icvFindBasicVariables( state.cost, state.is_x, state.u, state.v, state.ssize, state.dsize ); if( result < 0 ) break; /* check for optimality */ min_delta = icvIsOptimal( state.cost, state.is_x, state.u, state.v, state.ssize, state.dsize, state.enter_x ); if( min_delta == CV_EMD_INF ) CV_Error( CV_StsNoConv, "" ); /* if no negative deltamin, we found the optimal solution */ if( min_delta >= -eps ) break; /* improve solution */ if(!icvNewSolution( &state )) CV_Error( CV_StsNoConv, "" ); } } /* compute the total flow */ for( xp = state._x; xp < state.end_x; xp++ ) { float val = xp->val; int i = xp->i; int j = xp->j; if( xp == state.enter_x ) continue; int ci = state.idx1[i]; int cj = state.idx2[j]; if( ci >= 0 && cj >= 0 ) { total_cost += (double)val * state.cost[i][j]; if( flow ) ((float*)(flow->data.ptr + flow->step*ci))[cj] = val; } } emd = (float) (total_cost / state.weight); return emd; }
CV_IMPL void cvSVD( CvArr* aarr, CvArr* warr, CvArr* uarr, CvArr* varr, int flags ) { uchar* buffer = 0; int local_alloc = 0; CV_FUNCNAME( "cvSVD" ); __BEGIN__; CvMat astub, *a = (CvMat*)aarr; CvMat wstub, *w = (CvMat*)warr; CvMat ustub, *u; CvMat vstub, *v; CvMat tmat; uchar* tw = 0; int type; int a_buf_offset = 0, u_buf_offset = 0, buf_size, pix_size; int temp_u = 0, /* temporary storage for U is needed */ t_svd; /* special case: a->rows < a->cols */ int m, n; int w_rows, w_cols; int u_rows = 0, u_cols = 0; int w_is_mat = 0; if( !CV_IS_MAT( a )) CV_CALL( a = cvGetMat( a, &astub )); if( !CV_IS_MAT( w )) CV_CALL( w = cvGetMat( w, &wstub )); if( !CV_ARE_TYPES_EQ( a, w )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( a->rows >= a->cols ) { m = a->rows; n = a->cols; w_rows = w->rows; w_cols = w->cols; t_svd = 0; } else { CvArr* t; CV_SWAP( uarr, varr, t ); flags = (flags & CV_SVD_U_T ? CV_SVD_V_T : 0)| (flags & CV_SVD_V_T ? CV_SVD_U_T : 0); m = a->cols; n = a->rows; w_rows = w->cols; w_cols = w->rows; t_svd = 1; } u = (CvMat*)uarr; v = (CvMat*)varr; w_is_mat = w_cols > 1 && w_rows > 1; if( !w_is_mat && CV_IS_MAT_CONT(w->type) && w_cols + w_rows - 1 == n ) tw = w->data.ptr; if( u ) { if( !CV_IS_MAT( u )) CV_CALL( u = cvGetMat( u, &ustub )); if( !(flags & CV_SVD_U_T) ) { u_rows = u->rows; u_cols = u->cols; } else { u_rows = u->cols; u_cols = u->rows; } if( !CV_ARE_TYPES_EQ( a, u )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( u_rows != m || (u_cols != m && u_cols != n)) CV_ERROR( CV_StsUnmatchedSizes, !t_svd ? "U matrix has unappropriate size" : "V matrix has unappropriate size" ); temp_u = (u_rows != u_cols && !(flags & CV_SVD_U_T)) || u->data.ptr==a->data.ptr; if( w_is_mat && u_cols != w_rows ) CV_ERROR( CV_StsUnmatchedSizes, !t_svd ? "U and W have incompatible sizes" : "V and W have incompatible sizes" ); } else { u = &ustub; u->data.ptr = 0; u->step = 0; } if( v ) { int v_rows, v_cols; if( !CV_IS_MAT( v )) CV_CALL( v = cvGetMat( v, &vstub )); if( !(flags & CV_SVD_V_T) ) { v_rows = v->rows; v_cols = v->cols; } else { v_rows = v->cols; v_cols = v->rows; } if( !CV_ARE_TYPES_EQ( a, v )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( v_rows != n || v_cols != n ) CV_ERROR( CV_StsUnmatchedSizes, t_svd ? "U matrix has unappropriate size" : "V matrix has unappropriate size" ); if( w_is_mat && w_cols != v_cols ) CV_ERROR( CV_StsUnmatchedSizes, t_svd ? "U and W have incompatible sizes" : "V and W have incompatible sizes" ); } else { v = &vstub; v->data.ptr = 0; v->step = 0; } type = CV_MAT_TYPE( a->type ); pix_size = CV_ELEM_SIZE(type); buf_size = n*2 + m; if( !(flags & CV_SVD_MODIFY_A) ) { a_buf_offset = buf_size; buf_size += a->rows*a->cols; } if( temp_u ) { u_buf_offset = buf_size; buf_size += u->rows*u->cols; } buf_size *= pix_size; if( buf_size <= CV_MAX_LOCAL_SIZE ) { buffer = (uchar*)cvStackAlloc( buf_size ); local_alloc = 1; } else { CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); } if( !(flags & CV_SVD_MODIFY_A) ) { cvInitMatHeader( &tmat, m, n, type, buffer + a_buf_offset*pix_size ); if( !t_svd ) cvCopy( a, &tmat ); else cvT( a, &tmat ); a = &tmat; } if( temp_u ) { cvInitMatHeader( &ustub, u_cols, u_rows, type, buffer + u_buf_offset*pix_size ); u = &ustub; } if( !tw ) tw = buffer + (n + m)*pix_size; if( type == CV_32FC1 ) { icvSVD_32f( a->data.fl, a->step/sizeof(float), a->rows, a->cols, (float*)tw, u->data.fl, u->step/sizeof(float), u_cols, v->data.fl, v->step/sizeof(float), (float*)buffer ); } else if( type == CV_64FC1 ) { icvSVD_64f( a->data.db, a->step/sizeof(double), a->rows, a->cols, (double*)tw, u->data.db, u->step/sizeof(double), u_cols, v->data.db, v->step/sizeof(double), (double*)buffer ); } else { CV_ERROR( CV_StsUnsupportedFormat, "" ); } if( tw != w->data.ptr ) { int shift = w->cols != 1; cvSetZero( w ); if( type == CV_32FC1 ) for( int i = 0; i < n; i++ ) ((float*)(w->data.ptr + i*w->step))[i*shift] = ((float*)tw)[i]; else for( int i = 0; i < n; i++ ) ((double*)(w->data.ptr + i*w->step))[i*shift] = ((double*)tw)[i]; } if( uarr ) { if( !(flags & CV_SVD_U_T)) cvT( u, uarr ); else if( temp_u ) cvCopy( u, uarr ); /*CV_CHECK_NANS( uarr );*/ } if( varr ) { if( !(flags & CV_SVD_V_T)) cvT( v, varr ); /*CV_CHECK_NANS( varr );*/ } CV_CHECK_NANS( w ); __END__; if( buffer && !local_alloc ) cvFree( &buffer ); }
CV_IMPL void cvSVD( CvArr* aarr, CvArr* warr, CvArr* uarr, CvArr* varr, int flags ) { uchar* buffer = 0; int local_alloc = 0; CV_FUNCNAME( "cvSVD" ); __BEGIN__; CvMat astub, *a = (CvMat*)aarr; CvMat wstub, *w = (CvMat*)warr; CvMat ustub, *u = (CvMat*)uarr; CvMat vstub, *v = (CvMat*)varr; CvMat tmat; uchar* tw = 0; int type, nm, mn; int buf_size, pix_size; int t_svd = 0; // special case: a->rows < a->cols if( !CV_IS_ARR( a )) CV_CALL( a = cvGetMat( a, &astub )); if( !CV_IS_ARR( w )) CV_CALL( w = cvGetMat( w, &wstub )); if( !CV_ARE_TYPES_EQ( a, w )) CV_ERROR( CV_StsUnmatchedFormats, "" ); nm = MIN( a->width, a->height ); mn = MAX( a->width, a->height ); if( (w->width == 1 || w->height == 1) && CV_IS_ARR_CONT( w->type ) && w->width*w->height == nm ) { tw = w->data.ptr; } else if( !CV_ARE_SIZES_EQ( w, a )) { CV_ERROR( CV_StsBadSize, "W must be either continuous vector of " "size MIN(A->width,A->height) or matrix of " "the same size as A" ); } if( u ) { if( !CV_IS_ARR( u )) CV_CALL( u = cvGetMat( u, &ustub )); if( !CV_ARE_TYPES_EQ( a, u )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( u->width != u->height || u->height != a->height ) CV_ERROR( CV_StsUnmatchedSizes, "U matrix must be square and have the same " "linear size as number of rows in A" ); if( u->data.ptr == a->data.ptr ) CV_ERROR( CV_StsBadArg, "U can not be equal A" ); } else { u = &ustub; u->data.ptr = 0; u->step = 0; } if( v ) { if( !CV_IS_ARR( v )) CV_CALL( v = cvGetMat( v, &vstub )); if( !CV_ARE_TYPES_EQ( a, v )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( v->width != v->height || v->width != a->width ) CV_ERROR( CV_StsUnmatchedSizes, "V matrix must be square and have the same " "linear size as number of columns in A" ); if( v->data.ptr == a->data.ptr || v->data.ptr == u->data.ptr ) CV_ERROR( CV_StsBadArg, "V can not be equal U or A" ); } else { v = &vstub; v->data.ptr = 0; v->step = 0; } type = CV_ARR_TYPE( a->type ); pix_size = icvPixSize[type]; buf_size = nm*2 + mn; if( a->rows < a->cols ) { CvMat* t; CV_SWAP( u, v, t ); flags = (flags & CV_SVD_U_T ? CV_SVD_V_T : 0)| (flags & CV_SVD_V_T ? CV_SVD_U_T : 0); t_svd = 1; } if( !(flags & CV_SVD_MODIFY_A) ) buf_size += a->width*a->height; buf_size *= pix_size; if( buf_size <= CV_MAX_LOCAL_SIZE ) { buffer = (uchar*)alloca( buf_size ); local_alloc = 1; } else { CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); } if( !(flags & CV_SVD_MODIFY_A) ) { if( !t_svd ) { cvInitMatHeader( &tmat, a->height, a->width, type, buffer + (nm*2 + mn)*pix_size ); cvCopy( a, &tmat ); } else { cvInitMatHeader( &tmat, a->width, a->height, type, buffer + (nm*2 + mn)*pix_size ); cvT( a, &tmat ); } a = &tmat; } if( !tw ) tw = buffer + (nm + mn)*pix_size; if( type == CV_32FC1 ) { IPPI_CALL( icvSVD_32f( a->data.fl, a->step/sizeof(float), (float*)tw, u->data.fl, u->step/sizeof(float), v->data.fl, v->step/sizeof(float), icvGetMatSize(a), (float*)buffer )); } else if( type == CV_64FC1 ) { IPPI_CALL( icvSVD_64f( a->data.db, a->step/sizeof(double), (double*)tw, u->data.db, u->step/sizeof(double), v->data.db, v->step/sizeof(double), icvGetMatSize(a), (double*)buffer )); } else { CV_ERROR( CV_StsUnsupportedFormat, "" ); } if( tw != w->data.ptr ) { cvSetZero( w ); if( type == CV_32FC1 ) for( int i = 0; i < nm; i++ ) ((float*)(w->data.ptr + i*w->step))[i] = ((float*)tw)[i]; else for( int i = 0; i < nm; i++ ) ((double*)(w->data.ptr + i*w->step))[i] = ((double*)tw)[i]; } if( u->data.ptr ) { if( !(flags & CV_SVD_U_T)) cvT( u, u ); CV_CHECK_NANS( u ); } if( v->data.ptr) { if( !(flags & CV_SVD_V_T)) cvT( v, v ); CV_CHECK_NANS( v ); } CV_CHECK_NANS( w ); __END__; if( buffer && !local_alloc ) cvFree( (void**)&buffer ); }
bool CvCalibFilter::Undistort( CvMat** srcarr, CvMat** dstarr ) { int i; if( !srcarr || !dstarr ) { assert(0); return false; } if( isCalibrated ) { for( i = 0; i < cameraCount; i++ ) { if( srcarr[i] && dstarr[i] ) { CvMat src_stub, *src; CvMat dst_stub, *dst; src = cvGetMat( srcarr[i], &src_stub ); dst = cvGetMat( dstarr[i], &dst_stub ); if( src->data.ptr == dst->data.ptr ) { if( !undistImg || undistImg->width != src->width || undistImg->height != src->height || CV_ARE_TYPES_EQ( undistImg, src )) { cvReleaseMat( &undistImg ); undistImg = cvCreateMat( src->height, src->width, src->type ); } cvCopy( src, undistImg ); src = undistImg; } #if 1 { CvMat A = cvMat( 3, 3, CV_32FC1, cameraParams[i].matrix ); CvMat k = cvMat( 1, 4, CV_32FC1, cameraParams[i].distortion ); if( !undistMap[i][0] || undistMap[i][0]->width != src->width || undistMap[i][0]->height != src->height ) { cvReleaseMat( &undistMap[i][0] ); cvReleaseMat( &undistMap[i][1] ); undistMap[i][0] = cvCreateMat( src->height, src->width, CV_32FC1 ); undistMap[i][1] = cvCreateMat( src->height, src->width, CV_32FC1 ); cvInitUndistortMap( &A, &k, undistMap[i][0], undistMap[i][1] ); } cvRemap( src, dst, undistMap[i][0], undistMap[i][1] ); #else cvUndistort2( src, dst, &A, &k ); #endif } } } } else { for( i = 0; i < cameraCount; i++ ) { if( srcarr[i] != dstarr[i] ) cvCopy( srcarr[i], dstarr[i] ); } } return true; }
CV_IMPL void cvIntegral( const CvArr* image, CvArr* sumImage, CvArr* sumSqImage, CvArr* tiltedSumImage ) { CV_FUNCNAME( "cvIntegralImage" ); __BEGIN__; CvMat src_stub, *src = (CvMat*)image; CvMat sum_stub, *sum = (CvMat*)sumImage; CvMat sqsum_stub, *sqsum = (CvMat*)sumSqImage; CvMat tilted_stub, *tilted = (CvMat*)tiltedSumImage; int coi0 = 0, coi1 = 0, coi2 = 0, coi3 = 0; //int depth; int cn; int src_step, sum_step, sqsum_step, tilted_step; CvSize size; CV_CALL( src = cvGetMat( src, &src_stub, &coi0 )); CV_CALL( sum = cvGetMat( sum, &sum_stub, &coi1 )); if( sum->width != src->width + 1 || sum->height != src->height + 1 ) CV_ERROR( CV_StsUnmatchedSizes, "" ); if(CV_MAT_DEPTH(src->type)!=CV_8U || CV_MAT_CN(src->type)!=1) CV_ERROR( CV_StsUnsupportedFormat, "the source array must be 8UC1"); if( CV_MAT_DEPTH( sum->type ) != CV_32S || !CV_ARE_CNS_EQ( src, sum )) CV_ERROR( CV_StsUnsupportedFormat, "Sum array must have 32s type in case of 8u source array" "and the same number of channels as the source array" ); if( sqsum ) { CV_CALL( sqsum = cvGetMat( sqsum, &sqsum_stub, &coi2 )); if( !CV_ARE_SIZES_EQ( sum, sqsum ) ) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( CV_MAT_DEPTH( sqsum->type ) != CV_64S || !CV_ARE_CNS_EQ( src, sqsum )) CV_ERROR( CV_StsUnsupportedFormat, "Squares sum array must be 64s " "and the same number of channels as the source array" ); } if( tilted ) { if( !sqsum ) CV_ERROR( CV_StsNullPtr, "Squared sum array must be passed if tilted sum array is passed" ); CV_CALL( tilted = cvGetMat( tilted, &tilted_stub, &coi3 )); if( !CV_ARE_SIZES_EQ( sum, tilted ) ) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( !CV_ARE_TYPES_EQ( sum, tilted ) ) CV_ERROR( CV_StsUnmatchedFormats, "Sum and tilted sum must have the same types" ); if( CV_MAT_CN(tilted->type) != 1 ) CV_ERROR( CV_StsNotImplemented, "Tilted sum can not be computed for multi-channel arrays" ); } if( coi0 || coi1 || coi2 || coi3 ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" ); //depth = CV_MAT_DEPTH(src->type); cn = CV_MAT_CN(src->type); size = cvGetMatSize(src); src_step = src->step ? src->step : CV_STUB_STEP; sum_step = sum->step ? sum->step : CV_STUB_STEP; sqsum_step = !sqsum ? 0 : sqsum->step ? sqsum->step : CV_STUB_STEP; tilted_step = !tilted ? 0 : tilted->step ? tilted->step : CV_STUB_STEP; if( cn == 1 ) { cvIntegralImage_8u32s64s_C1R( src->data.ptr, src_step, (int*)(sum->data.ptr), sum_step, sqsum ? (int64*)(sqsum->data.ptr) : 0, sqsum_step, tilted ? (int*)(tilted->data.ptr) : 0, tilted_step, size ); } __END__; }