CV_IMPL void cvImgToObs_DCT( const void* arr, float *obs, CvSize dctSize, CvSize obsSize, CvSize delta ) { CV_FUNCNAME( "cvImgToObs_DCT" ); __BEGIN__; CvMat stub, *mat = (CvMat*)arr; CV_CALL( mat = cvGetMat( arr, &stub )); switch( CV_MAT_TYPE( mat->type )) { case CV_8UC1: IPPI_CALL( icvImgToObs_DCT_8u32f_C1R( mat->data.ptr, mat->step, cvGetMatSize(mat), obs, dctSize, obsSize, delta )); break; case CV_32FC1: IPPI_CALL( icvImgToObs_DCT_32f_C1R( mat->data.fl, mat->step, cvGetMatSize(mat), obs, dctSize, obsSize, delta )); break; default: CV_ERROR( CV_StsUnsupportedFormat, "" ); } __END__; }
CV_IMPL void cvCopyMakeBorder( const CvArr* srcarr, CvArr* dstarr, CvPoint offset, int bordertype, CvScalar value ) { CvMat srcstub, *src = (CvMat*)srcarr; CvMat dststub, *dst = (CvMat*)dstarr; CvSize srcsize, dstsize; int srcstep, dststep; int pix_size, type; if( !CV_IS_MAT(src) ) src = cvGetMat( src, &srcstub ); if( !CV_IS_MAT(dst) ) dst = cvGetMat( dst, &dststub ); if( offset.x < 0 || offset.y < 0 ) CV_Error( CV_StsOutOfRange, "Offset (left/top border width) is negative" ); if( src->rows + offset.y > dst->rows || src->cols + offset.x > dst->cols ) CV_Error( CV_StsBadSize, "Source array is too big or destination array is too small" ); if( !CV_ARE_TYPES_EQ( src, dst )) CV_Error( CV_StsUnmatchedFormats, "" ); type = CV_MAT_TYPE(src->type); pix_size = CV_ELEM_SIZE(type); srcsize = cvGetMatSize(src); dstsize = cvGetMatSize(dst); srcstep = src->step; dststep = dst->step; if( srcstep == 0 ) srcstep = CV_STUB_STEP; if( dststep == 0 ) dststep = CV_STUB_STEP; bordertype &= 15; if( bordertype == IPL_BORDER_REPLICATE ) { icvCopyReplicateBorder_8u( src->data.ptr, srcstep, srcsize, dst->data.ptr, dststep, dstsize, offset.y, offset.x, pix_size ); } else if( bordertype == IPL_BORDER_REFLECT_101 ) { icvCopyReflect101Border_8u( src->data.ptr, srcstep, srcsize, dst->data.ptr, dststep, dstsize, offset.y, offset.x, pix_size ); } else if( bordertype == IPL_BORDER_CONSTANT ) { double buf[4]; cvScalarToRawData( &value, buf, src->type, 0 ); icvCopyConstBorder_8u( src->data.ptr, srcstep, srcsize, dst->data.ptr, dststep, dstsize, offset.y, offset.x, pix_size, (uchar*)buf ); } else CV_Error( CV_StsBadFlag, "Unknown/unsupported border type" ); }
CV_IMPL void cvGetRectSubPix( const void* srcarr, void* dstarr, CvPoint2D32f center ) { static CvFuncTable gr_tab[2]; static int inittab = 0; CvMat srcstub, *src = (CvMat*)srcarr; CvMat dststub, *dst = (CvMat*)dstarr; CvSize src_size, dst_size; CvGetRectSubPixFunc func; int cn, src_step, dst_step; if( !inittab ) { icvInitGetRectSubPixC1RTable( gr_tab + 0 ); icvInitGetRectSubPixC3RTable( gr_tab + 1 ); inittab = 1; } if( !CV_IS_MAT(src)) src = cvGetMat( src, &srcstub ); if( !CV_IS_MAT(dst)) dst = cvGetMat( dst, &dststub ); cn = CV_MAT_CN( src->type ); if( (cn != 1 && cn != 3) || !CV_ARE_CNS_EQ( src, dst )) CV_Error( CV_StsUnsupportedFormat, "" ); src_size = cvGetMatSize( src ); dst_size = cvGetMatSize( dst ); src_step = src->step ? src->step : CV_STUB_STEP; dst_step = dst->step ? dst->step : CV_STUB_STEP; //if( dst_size.width > src_size.width || dst_size.height > src_size.height ) // CV_ERROR( CV_StsBadSize, "destination ROI must be smaller than source ROI" ); if( CV_ARE_DEPTHS_EQ( src, dst )) { func = (CvGetRectSubPixFunc)(gr_tab[cn != 1].fn_2d[CV_MAT_DEPTH(src->type)]); } else { if( CV_MAT_DEPTH( src->type ) != CV_8U || CV_MAT_DEPTH( dst->type ) != CV_32F ) CV_Error( CV_StsUnsupportedFormat, "" ); func = (CvGetRectSubPixFunc)(gr_tab[cn != 1].fn_2d[1]); } if( !func ) CV_Error( CV_StsUnsupportedFormat, "" ); IPPI_CALL( func( src->data.ptr, src_step, src_size, dst->data.ptr, dst_step, dst_size, center )); }
/*F/////////////////////////////////////////////////////////////////////////////////////// // Name: cvCalcOpticalFlowLK // Purpose: Optical flow implementation // Context: // Parameters: // srcA, srcB - source image // velx, vely - destination image // Returns: // // Notes: //F*/ CV_IMPL void cvCalcOpticalFlowLK(const void* srcarrA, const void* srcarrB, CvSize winSize, void* velarrx, void* velarry) { CvMat stubA, *srcA = cvGetMat(srcarrA, &stubA); CvMat stubB, *srcB = cvGetMat(srcarrB, &stubB); CvMat stubx, *velx = cvGetMat(velarrx, &stubx); CvMat stuby, *vely = cvGetMat(velarry, &stuby); if (!CV_ARE_TYPES_EQ(srcA, srcB)) { CV_Error(CV_StsUnmatchedFormats, "Source images have different formats"); } if (!CV_ARE_TYPES_EQ(velx, vely)) { CV_Error(CV_StsUnmatchedFormats, "Destination images have different formats"); } if (!CV_ARE_SIZES_EQ(srcA, srcB) || !CV_ARE_SIZES_EQ(velx, vely) || !CV_ARE_SIZES_EQ(srcA, velx)) { CV_Error(CV_StsUnmatchedSizes, ""); } if (CV_MAT_TYPE(srcA->type) != CV_8UC1 || CV_MAT_TYPE(velx->type) != CV_32FC1) CV_Error(CV_StsUnsupportedFormat, "Source images must have 8uC1 type and " "destination images must have 32fC1 type"); if (srcA->step != srcB->step || velx->step != vely->step) { CV_Error(CV_BadStep, "source and destination images have different step"); } IPPI_CALL(icvCalcOpticalFlowLK_8u32fR((uchar*)srcA->data.ptr, (uchar*)srcB->data.ptr, srcA->step, cvGetMatSize(srcA), winSize, velx->data.fl, vely->data.fl, velx->step)); }
void cvNeumannBoundCond(const CvArr * srcarr, CvArr * dstarr) { //CV_FUNCNAME("cvNeumannBoundCond"); //__BEGIN__; CvMat sstub, *src; CvMat dstub, *dst; CvSize size; int i, j; float * ptr_src, * ptr_dst; int iStep_src, iStep_dst; // CV_CALL( src = cvGetMat(srcarr, &sstub )); // CV_CALL( dst = cvGetMat(dstarr, &dstub )); src = cvGetMat(srcarr, &sstub ); dst = cvGetMat(dstarr, &dstub ); // if( CV_MAT_TYPE(src->type) != CV_32FC1) // CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" ); // if( CV_MAT_TYPE(dst->type) != CV_32FC1) // CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" ); // if( !CV_ARE_SIZES_EQ(src, dst)) // CV_ERROR( CV_StsUnmatchedSizes, "The input images must have the same size" ); size = cvGetMatSize( src ); cvCopy(src, dst); ptr_src = src->data.fl; iStep_src = src->step / sizeof(ptr_src[0]); ptr_dst = dst->data.fl; iStep_dst = dst->step / sizeof(ptr_dst[0]); ptr_dst[0] = ptr_src[2+iStep_src*2]; //dst(0,0)=src(3,3) ptr_dst[size.width-1] = ptr_src[size.width-3+iStep_src*2]; //dst(0,col-1)=src(3,col-3) ptr_dst[iStep_dst*(size.height-1)] = ptr_src[2+iStep_src*(size.height-3)]; //dst(row-1,0)=src(row-3,3) ptr_dst[size.width-1+iStep_dst*(size.height-1)] = ptr_src[size.width-3+iStep_dst*(size.height-3)]; //dst(row-1,col-1)=src(row-3,col-3) for(i = 1; i < size.width-1; i++){ ptr_dst[i] = ptr_src[i+iStep_src*2]; ptr_dst[i+iStep_dst*(size.height-1)]=ptr_src[i+iStep_src*(size.height-3)]; } for(j = 1; j < size.height-1; j++){ ptr_dst[iStep_dst*j] = ptr_src[2+iStep_src*j]; ptr_dst[size.width-1+iStep_dst*j]=ptr_src[size.width-3+iStep_src*j]; } //__END__; }
CV_IMPL void cvCalS(const CvArr* srcarr, CvArr* dstarr) { CV_FUNCNAME("cvCalS"); __BEGIN__; CvMat sstub, *src; CvMat dstub, *dst; CvMat* src_dx=0, *src_dy=0; CvSize size; int i, j; int iStep; float* fPtr; CV_CALL( src = cvGetMat(srcarr, &sstub )); CV_CALL( dst = cvGetMat(dstarr, &dstub )); if( CV_MAT_TYPE(src->type) != CV_32FC1) CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" ); if( CV_MAT_TYPE(dst->type) != CV_32FC1) CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" ); if( !CV_ARE_SIZES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedSizes, "The input images must have the same size" ); size = cvGetMatSize( src ); src_dx = cvCreateMat(size.height, size.width, CV_32FC1 ); src_dy = cvCreateMat(size.height, size.width, CV_32FC1 ); cvSetZero(src_dx); cvSetZero(src_dy); iStep = dst->step / sizeof(fPtr[0]); fPtr = dst->data.fl; cvSobel(src, src_dx, 1, 0, 1); cvSobel(src, src_dy, 0, 1, 1); cvMul(src_dx, src_dx, src_dx, 0.25f*0.25f); //rescale gradient cvMul(src_dy, src_dy, src_dy, 0.25f*0.25f); //rescale gradient cvAdd(src_dx, src_dy, dst); for(j=0; j<size.height; j++){ for (i=0; i<size.width; i++) fPtr[i+iStep*j] = sqrt(fPtr[i+iStep*j])+SMALLNUM; } cvReleaseMat(&src_dx); cvReleaseMat(&src_dy); __END__; }
CV_IMPL void cvCurvature(const CvArr* srcarr_x, const CvArr* srcarr_y, CvArr* dstarr) { CV_FUNCNAME("cvCurvature"); __BEGIN__; CvMat sstub_x, sstub_y, *src_x, *src_y; CvMat dstub, *dst; CvSize size; CvMat *Nxx=0, *Nyy=0, *ones=0; CV_CALL( src_x = cvGetMat(srcarr_x, &sstub_x )); CV_CALL( src_y = cvGetMat(srcarr_y, &sstub_y )); CV_CALL( dst = cvGetMat(dstarr, &dstub )); if( CV_MAT_TYPE(src_x->type) != CV_32FC1) CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" ); if( CV_MAT_TYPE(src_y->type) != CV_32FC1) CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" ); if( CV_MAT_TYPE(dst->type) != CV_32FC1) CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" ); if( !CV_ARE_SIZES_EQ( src_x, src_y )) CV_ERROR( CV_StsUnmatchedSizes, "The input images must have the same size" ); size = cvGetMatSize( src_x ); Nxx = cvCreateMat(size.height, size.width, CV_32FC1 ); Nyy = cvCreateMat(size.height, size.width, CV_32FC1 ); ones= cvCreateMat(size.height, size.width, CV_32FC1 ); cvSetZero(Nxx); cvSetZero(Nyy); cvSet(ones, cvScalar(1.0f)); cvSobel(src_x, Nxx, 1, 0, 1); cvSobel(src_y, Nyy, 0, 1, 1); cvMul(Nxx, ones, Nxx, 0.25f); cvMul(Nyy, ones, Nyy, 0.25f); cvAdd(Nxx, Nyy, dst); cvReleaseMat(&Nxx); cvReleaseMat(&Nyy); cvReleaseMat(&ones); __END__; }
CV_IMPL void cvDirac(const CvArr* srcarr, CvArr* dstarr, double sigma) { CV_FUNCNAME("cvDirac"); __BEGIN__; CvMat sstub, *src; CvMat dstub, *dst; CvSize size; int i, j, iStep_src, iStep_dst; float* fPtr_src, *fPtr_dst, flag=0.0f; float temp1=0.0f, temp2=0.0f; CV_CALL( src = cvGetMat(srcarr, &sstub )); CV_CALL( dst = cvGetMat(dstarr, &dstub )); if( CV_MAT_TYPE(src->type) != CV_32FC1) CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" ); if( CV_MAT_TYPE(dst->type) != CV_32FC1) CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel output images are supported" ); if( !CV_ARE_SIZES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedSizes, "The input images must have the same size" ); size = cvGetMatSize( src ); iStep_src = src->step / sizeof(fPtr_src[0]); fPtr_src = src->data.fl; iStep_dst = dst->step / sizeof(fPtr_dst[0]); fPtr_dst = dst->data.fl; for (j=0; j<size.height; j++){ for (i=0; i<size.width; i++){ temp1 = fPtr_src[i+iStep_src*j]; temp2 = (1.0f/2.0f/sigma)*(1.0f+cos(PI*temp1/sigma)); if (int(temp1*10000)<=int(sigma*10000) && int(temp1*10000)>=int(-sigma*10000)) { flag = 1.0f; } else { flag = 0.0f; } fPtr_dst[i+iStep_dst*j]=temp2*flag; } } __END__; }
/*F/////////////////////////////////////////////////////////////////////////////////////// // Name: cvCalcOpticalFlowHS // Purpose: Optical flow implementation // Context: // Parameters: // srcA, srcB - source image // velx, vely - destination image // Returns: // // Notes: //F*/ CV_IMPL void cvCalcOpticalFlowHS( const void* srcarrA, const void* srcarrB, int usePrevious, void* velarrx, void* velarry, double lambda, CvTermCriteria criteria ) { CV_FUNCNAME( "cvCalcOpticalFlowHS" ); __BEGIN__; CvMat stubA, *srcA = (CvMat*)srcarrA; CvMat stubB, *srcB = (CvMat*)srcarrB; CvMat stubx, *velx = (CvMat*)velarrx; CvMat stuby, *vely = (CvMat*)velarry; CV_CALL( srcA = cvGetMat( srcA, &stubA )); CV_CALL( srcB = cvGetMat( srcB, &stubB )); CV_CALL( velx = cvGetMat( velx, &stubx )); CV_CALL( vely = cvGetMat( vely, &stuby )); if( !CV_ARE_TYPES_EQ( srcA, srcB )) CV_ERROR( CV_StsUnmatchedFormats, "Source images have different formats" ); if( !CV_ARE_TYPES_EQ( velx, vely )) CV_ERROR( CV_StsUnmatchedFormats, "Destination images have different formats" ); if( !CV_ARE_SIZES_EQ( srcA, srcB ) || !CV_ARE_SIZES_EQ( velx, vely ) || !CV_ARE_SIZES_EQ( srcA, velx )) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( CV_MAT_TYPE( srcA->type ) != CV_8UC1 || CV_MAT_TYPE( velx->type ) != CV_32FC1 ) CV_ERROR( CV_StsUnsupportedFormat, "Source images must have 8uC1 type and " "destination images must have 32fC1 type" ); if( srcA->step != srcB->step || velx->step != vely->step ) CV_ERROR( CV_BadStep, "source and destination images have different step" ); IPPI_CALL( icvCalcOpticalFlowHS_8u32fR( (uchar*)srcA->data.ptr, (uchar*)srcB->data.ptr, srcA->step, cvGetMatSize( srcA ), usePrevious, velx->data.fl, vely->data.fl, velx->step, (float)lambda, criteria )); __END__; }
/* motion templates */ CV_IMPL void cvUpdateMotionHistory( const void* silhouette, void* mhimg, double timestamp, double mhi_duration ) { CvSize size; CvMat silhstub, *silh = (CvMat*)silhouette; CvMat mhistub, *mhi = (CvMat*)mhimg; int mhi_step, silh_step; CV_FUNCNAME( "cvUpdateMHIByTime" ); __BEGIN__; CV_CALL( silh = cvGetMat( silh, &silhstub )); CV_CALL( mhi = cvGetMat( mhi, &mhistub )); if( !CV_IS_MASK_ARR( silh )) CV_ERROR( CV_StsBadMask, "" ); if( CV_MAT_CN( mhi->type ) > 1 ) CV_ERROR( CV_BadNumChannels, "" ); if( CV_MAT_DEPTH( mhi->type ) != CV_32F ) CV_ERROR( CV_BadDepth, "" ); if( !CV_ARE_SIZES_EQ( mhi, silh )) CV_ERROR( CV_StsUnmatchedSizes, "" ); size = cvGetMatSize( mhi ); mhi_step = mhi->step; silh_step = silh->step; if( CV_IS_MAT_CONT( mhi->type & silh->type )) { size.width *= size.height; mhi_step = silh_step = CV_STUB_STEP; size.height = 1; } IPPI_CALL( icvUpdateMotionHistory_8u32f_C1IR( (const uchar*)(silh->data.ptr), silh_step, mhi->data.fl, mhi_step, size, (float)timestamp, (float)mhi_duration )); __END__; }
CV_IMPL void cvDeInterlace( const CvArr* framearr, CvArr* fieldEven, CvArr* fieldOdd ) { CV_FUNCNAME("cvDeInterlace"); __BEGIN__; CvMat frame_stub, *frame = (CvMat*)framearr; CvMat even_stub, *even = (CvMat*)fieldEven; CvMat odd_stub, *odd = (CvMat*)fieldOdd; CvSize size; int y; CV_CALL( frame = cvGetMat( frame, &frame_stub )); CV_CALL( even = cvGetMat( even, &even_stub )); CV_CALL( odd = cvGetMat( odd, &odd_stub )); if( !CV_ARE_TYPES_EQ( frame, even ) || !CV_ARE_TYPES_EQ( frame, odd )) CV_ERROR( CV_StsUnmatchedFormats, "All the input images must have the same type" ); if( frame->cols != even->cols || frame->cols != odd->cols || frame->rows != even->rows*2 || odd->rows != even->rows ) CV_ERROR( CV_StsUnmatchedSizes, "Uncorrelated sizes of the input image and output fields" ); size = cvGetMatSize( even ); size.width *= CV_ELEM_SIZE( even->type ); for( y = 0; y < size.height; y++ ) { memcpy( even->data.ptr + even->step*y, frame->data.ptr + frame->step*y*2, size.width ); memcpy( odd->data.ptr + even->step*y, frame->data.ptr + frame->step*(y*2+1), size.width ); } __END__; }
CV_IMPL void cvWatershed( const CvArr* srcarr, CvArr* dstarr ) { const int IN_QUEUE = -2; const int WSHED = -1; const int NQ = 256; CvMemStorage* storage = 0; CV_FUNCNAME( "cvWatershed" ); __BEGIN__; CvMat sstub, *src; CvMat dstub, *dst; CvSize size; CvWSNode* free_node = 0, *node; CvWSQueue q[NQ]; int active_queue; int i, j; int db, dg, dr; int* mask; uchar* img; int mstep, istep; int subs_tab[513]; // MAX(a,b) = b + MAX(a-b,0) #define ws_max(a,b) ((b) + subs_tab[(a)-(b)+NQ]) // MIN(a,b) = a - MAX(a-b,0) #define ws_min(a,b) ((a) - subs_tab[(a)-(b)+NQ]) #define ws_push(idx,mofs,iofs) \ { \ if( !free_node ) \ CV_CALL( free_node = icvAllocWSNodes( storage ));\ node = free_node; \ free_node = free_node->next;\ node->next = 0; \ node->mask_ofs = mofs; \ node->img_ofs = iofs; \ if( q[idx].last ) \ q[idx].last->next=node; \ else \ q[idx].first = node; \ q[idx].last = node; \ } #define ws_pop(idx,mofs,iofs) \ { \ node = q[idx].first; \ q[idx].first = node->next; \ if( !node->next ) \ q[idx].last = 0; \ node->next = free_node; \ free_node = node; \ mofs = node->mask_ofs; \ iofs = node->img_ofs; \ } #define c_diff(ptr1,ptr2,diff) \ { \ db = abs((ptr1)[0] - (ptr2)[0]);\ dg = abs((ptr1)[1] - (ptr2)[1]);\ dr = abs((ptr1)[2] - (ptr2)[2]);\ diff = ws_max(db,dg); \ diff = ws_max(diff,dr); \ assert( 0 <= diff && diff <= 255 ); \ } CV_CALL( src = cvGetMat( srcarr, &sstub )); CV_CALL( dst = cvGetMat( dstarr, &dstub )); if( CV_MAT_TYPE(src->type) != CV_8UC3 ) CV_ERROR( CV_StsUnsupportedFormat, "Only 8-bit, 3-channel input images are supported" ); if( CV_MAT_TYPE(dst->type) != CV_32SC1 ) CV_ERROR( CV_StsUnsupportedFormat, "Only 32-bit, 1-channel output images are supported" ); if( !CV_ARE_SIZES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedSizes, "The input and output images must have the same size" ); size = cvGetMatSize(src); CV_CALL( storage = cvCreateMemStorage() ); istep = src->step; img = src->data.ptr; mstep = dst->step / sizeof(mask[0]); mask = dst->data.i; memset( q, 0, NQ*sizeof(q[0]) ); for( i = 0; i < 256; i++ ) subs_tab[i] = 0; for( i = 256; i <= 512; i++ ) subs_tab[i] = i - 256; // draw a pixel-wide border of dummy "watershed" (i.e. boundary) pixels for( j = 0; j < size.width; j++ ) mask[j] = mask[j + mstep*(size.height-1)] = WSHED; // initial phase: put all the neighbor pixels of each marker to the ordered queue - // determine the initial boundaries of the basins for( i = 1; i < size.height-1; i++ ) { img += istep; mask += mstep; mask[0] = mask[size.width-1] = WSHED; for( j = 1; j < size.width-1; j++ ) { int* m = mask + j; if( m[0] < 0 ) m[0] = 0; if( m[0] == 0 && (m[-1] > 0 || m[1] > 0 || m[-mstep] > 0 || m[mstep] > 0) ) { uchar* ptr = img + j*3; int idx = 256, t; if( m[-1] > 0 ) c_diff( ptr, ptr - 3, idx ); if( m[1] > 0 ) { c_diff( ptr, ptr + 3, t ); idx = ws_min( idx, t ); } if( m[-mstep] > 0 ) { c_diff( ptr, ptr - istep, t ); idx = ws_min( idx, t ); } if( m[mstep] > 0 ) { c_diff( ptr, ptr + istep, t ); idx = ws_min( idx, t ); } assert( 0 <= idx && idx <= 255 ); ws_push( idx, i*mstep + j, i*istep + j*3 ); m[0] = IN_QUEUE; } } } // find the first non-empty queue for( i = 0; i < NQ; i++ ) if( q[i].first ) break; // if there is no markers, exit immediately if( i == NQ ) EXIT; active_queue = i; img = src->data.ptr; mask = dst->data.i; // recursively fill the basins for(;;) { int mofs, iofs; int lab = 0, t; int* m; uchar* ptr; if( q[active_queue].first == 0 ) { for( i = active_queue+1; i < NQ; i++ ) if( q[i].first ) break; if( i == NQ ) break; active_queue = i; } ws_pop( active_queue, mofs, iofs ); m = mask + mofs; ptr = img + iofs; t = m[-1]; if( t > 0 ) lab = t; t = m[1]; if( t > 0 ) { if( lab == 0 ) lab = t; else if( t != lab ) lab = WSHED; } t = m[-mstep]; if( t > 0 ) { if( lab == 0 ) lab = t; else if( t != lab ) lab = WSHED; } t = m[mstep]; if( t > 0 ) { if( lab == 0 ) lab = t; else if( t != lab ) lab = WSHED; } assert( lab != 0 ); m[0] = lab; if( lab == WSHED ) continue; if( m[-1] == 0 ) { c_diff( ptr, ptr - 3, t ); ws_push( t, mofs - 1, iofs - 3 ); active_queue = ws_min( active_queue, t ); m[-1] = IN_QUEUE; } if( m[1] == 0 ) { c_diff( ptr, ptr + 3, t ); ws_push( t, mofs + 1, iofs + 3 ); active_queue = ws_min( active_queue, t ); m[1] = IN_QUEUE; } if( m[-mstep] == 0 ) { c_diff( ptr, ptr - istep, t ); ws_push( t, mofs - mstep, iofs - istep ); active_queue = ws_min( active_queue, t ); m[-mstep] = IN_QUEUE; } if( m[mstep] == 0 ) { c_diff( ptr, ptr + 3, t ); ws_push( t, mofs + mstep, iofs + istep ); active_queue = ws_min( active_queue, t ); m[mstep] = IN_QUEUE; } } __END__; cvReleaseMemStorage( &storage ); }
CV_IMPL void cvPyrMeanShiftFiltering( const CvArr* srcarr, CvArr* dstarr, double sp0, double sr, int max_level, CvTermCriteria termcrit ) { const int cn = 3; const int MAX_LEVELS = 8; CvMat* src_pyramid[MAX_LEVELS+1]; CvMat* dst_pyramid[MAX_LEVELS+1]; CvMat* mask0 = 0; int i, j, level; //uchar* submask = 0; #define cdiff(ofs0) (tab[c0-dptr[ofs0]+255] + \ tab[c1-dptr[(ofs0)+1]+255] + tab[c2-dptr[(ofs0)+2]+255] >= isr22) memset( src_pyramid, 0, sizeof(src_pyramid) ); memset( dst_pyramid, 0, sizeof(dst_pyramid) ); CV_FUNCNAME( "cvPyrMeanShiftFiltering" ); __BEGIN__; double sr2 = sr * sr; int isr2 = cvRound(sr2), isr22 = MAX(isr2,16); int tab[768]; CvMat sstub0, *src0; CvMat dstub0, *dst0; CV_CALL( src0 = cvGetMat( srcarr, &sstub0 )); CV_CALL( dst0 = cvGetMat( dstarr, &dstub0 )); if( CV_MAT_TYPE(src0->type) != CV_8UC3 ) CV_ERROR( CV_StsUnsupportedFormat, "Only 8-bit, 3-channel images are supported" ); if( !CV_ARE_TYPES_EQ( src0, dst0 )) CV_ERROR( CV_StsUnmatchedFormats, "The input and output images must have the same type" ); if( !CV_ARE_SIZES_EQ( src0, dst0 )) CV_ERROR( CV_StsUnmatchedSizes, "The input and output images must have the same size" ); if( (unsigned)max_level > (unsigned)MAX_LEVELS ) CV_ERROR( CV_StsOutOfRange, "The number of pyramid levels is too large or negative" ); if( !(termcrit.type & CV_TERMCRIT_ITER) ) termcrit.max_iter = 5; termcrit.max_iter = MAX(termcrit.max_iter,1); termcrit.max_iter = MIN(termcrit.max_iter,100); if( !(termcrit.type & CV_TERMCRIT_EPS) ) termcrit.epsilon = 1.f; termcrit.epsilon = MAX(termcrit.epsilon, 0.f); for( i = 0; i < 768; i++ ) tab[i] = (i - 255)*(i - 255); // 1. construct pyramid src_pyramid[0] = src0; dst_pyramid[0] = dst0; for( level = 1; level <= max_level; level++ ) { CV_CALL( src_pyramid[level] = cvCreateMat( (src_pyramid[level-1]->rows+1)/2, (src_pyramid[level-1]->cols+1)/2, src_pyramid[level-1]->type )); CV_CALL( dst_pyramid[level] = cvCreateMat( src_pyramid[level]->rows, src_pyramid[level]->cols, src_pyramid[level]->type )); CV_CALL( cvPyrDown( src_pyramid[level-1], src_pyramid[level] )); //CV_CALL( cvResize( src_pyramid[level-1], src_pyramid[level], CV_INTER_AREA )); } CV_CALL( mask0 = cvCreateMat( src0->rows, src0->cols, CV_8UC1 )); //CV_CALL( submask = (uchar*)cvAlloc( (sp+2)*(sp+2) )); // 2. apply meanshift, starting from the pyramid top (i.e. the smallest layer) for( level = max_level; level >= 0; level-- ) { CvMat* src = src_pyramid[level]; CvSize size = cvGetMatSize(src); uchar* sptr = src->data.ptr; int sstep = src->step; uchar* mask = 0; int mstep = 0; uchar* dptr; int dstep; float sp = (float)(sp0 / (1 << level)); sp = MAX( sp, 1 ); if( level < max_level ) { CvSize size1 = cvGetMatSize(dst_pyramid[level+1]); CvMat m = cvMat( size.height, size.width, CV_8UC1, mask0->data.ptr ); dstep = dst_pyramid[level+1]->step; dptr = dst_pyramid[level+1]->data.ptr + dstep + cn; mstep = m.step; mask = m.data.ptr + mstep; //cvResize( dst_pyramid[level+1], dst_pyramid[level], CV_INTER_CUBIC ); cvPyrUp( dst_pyramid[level+1], dst_pyramid[level] ); cvZero( &m ); for( i = 1; i < size1.height-1; i++, dptr += dstep - (size1.width-2)*3, mask += mstep*2 ) { for( j = 1; j < size1.width-1; j++, dptr += cn ) { int c0 = dptr[0], c1 = dptr[1], c2 = dptr[2]; mask[j*2 - 1] = cdiff(-3) || cdiff(3) || cdiff(-dstep-3) || cdiff(-dstep) || cdiff(-dstep+3) || cdiff(dstep-3) || cdiff(dstep) || cdiff(dstep+3); } } cvDilate( &m, &m, 0, 1 ); mask = m.data.ptr; } dptr = dst_pyramid[level]->data.ptr; dstep = dst_pyramid[level]->step; for( i = 0; i < size.height; i++, sptr += sstep - size.width*3, dptr += dstep - size.width*3, mask += mstep ) { for( j = 0; j < size.width; j++, sptr += 3, dptr += 3 ) { int x0 = j, y0 = i, x1, y1, iter; int c0, c1, c2; if( mask && !mask[j] ) continue; c0 = sptr[0], c1 = sptr[1], c2 = sptr[2]; // iterate meanshift procedure for( iter = 0; iter < termcrit.max_iter; iter++ ) { uchar* ptr; int x, y, count = 0; int minx, miny, maxx, maxy; int s0 = 0, s1 = 0, s2 = 0, sx = 0, sy = 0; double icount; int stop_flag; //mean shift: process pixels in window (p-sigmaSp)x(p+sigmaSp) minx = cvRound(x0 - sp); minx = MAX(minx, 0); miny = cvRound(y0 - sp); miny = MAX(miny, 0); maxx = cvRound(x0 + sp); maxx = MIN(maxx, size.width-1); maxy = cvRound(y0 + sp); maxy = MIN(maxy, size.height-1); ptr = sptr + (miny - i)*sstep + (minx - j)*3; for( y = miny; y <= maxy; y++, ptr += sstep - (maxx-minx+1)*3 ) { int row_count = 0; x = minx; for( ; x + 3 <= maxx; x += 4, ptr += 12 ) { int t0 = ptr[0], t1 = ptr[1], t2 = ptr[2]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x; row_count++; } t0 = ptr[3], t1 = ptr[4], t2 = ptr[5]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x+1; row_count++; } t0 = ptr[6], t1 = ptr[7], t2 = ptr[8]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x+2; row_count++; } t0 = ptr[9], t1 = ptr[10], t2 = ptr[11]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x+3; row_count++; } } for( ; x <= maxx; x++, ptr += 3 ) { int t0 = ptr[0], t1 = ptr[1], t2 = ptr[2]; if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) { s0 += t0; s1 += t1; s2 += t2; sx += x; row_count++; } } count += row_count; sy += y*row_count; } if( count == 0 ) break; icount = 1./count; x1 = cvRound(sx*icount); y1 = cvRound(sy*icount); s0 = cvRound(s0*icount); s1 = cvRound(s1*icount); s2 = cvRound(s2*icount); stop_flag = (x0 == x1 && y0 == y1) || abs(x1-x0) + abs(y1-y0) + tab[s0 - c0 + 255] + tab[s1 - c1 + 255] + tab[s2 - c2 + 255] <= termcrit.epsilon; x0 = x1; y0 = y1; c0 = s0; c1 = s1; c2 = s2; if( stop_flag ) break; } dptr[0] = (uchar)c0; dptr[1] = (uchar)c1; dptr[2] = (uchar)c2; } } } __END__; for( i = 1; i <= MAX_LEVELS; i++ ) { cvReleaseMat( &src_pyramid[i] ); cvReleaseMat( &dst_pyramid[i] ); } cvReleaseMat( &mask0 ); }
CV_IMPL void cvPreCornerDetect( const void* srcarr, void* dstarr, int aperture_size ) { CvSepFilter dx_filter, dy_filter, d2x_filter, d2y_filter, dxy_filter; CvMat *Dx = 0, *Dy = 0, *D2x = 0, *D2y = 0, *Dxy = 0; CvMat *tempsrc = 0; int buf_size = 1 << 12; CV_FUNCNAME( "cvPreCornerDetect" ); __BEGIN__; int i, j, y, dst_y = 0, max_dy, delta = 0; int temp_step = 0, d_step; uchar* shifted_ptr = 0; int depth, d_depth; int stage = CV_START; CvSobelFixedIPPFunc ipp_sobel_vert = 0, ipp_sobel_horiz = 0, ipp_sobel_vert_second = 0, ipp_sobel_horiz_second = 0, ipp_sobel_cross = 0; CvSize el_size, size, stripe_size; int aligned_width; CvPoint el_anchor; double factor; CvMat stub, *src = (CvMat*)srcarr; CvMat dststub, *dst = (CvMat*)dstarr; bool use_ipp = false; CV_CALL( src = cvGetMat( srcarr, &stub )); CV_CALL( dst = cvGetMat( dst, &dststub )); if( CV_MAT_TYPE(src->type) != CV_8UC1 && CV_MAT_TYPE(src->type) != CV_32FC1 || CV_MAT_TYPE(dst->type) != CV_32FC1 ) CV_ERROR( CV_StsUnsupportedFormat, "Input must be 8uC1 or 32fC1, output must be 32fC1" ); if( !CV_ARE_SIZES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( aperture_size == CV_SCHARR ) CV_ERROR( CV_StsOutOfRange, "CV_SCHARR is not supported by this function" ); if( aperture_size < 3 || aperture_size > 7 || !(aperture_size & 1) ) CV_ERROR( CV_StsOutOfRange, "Derivative filter aperture size must be 3, 5 or 7" ); depth = CV_MAT_DEPTH(src->type); d_depth = depth == CV_8U ? CV_16S : CV_32F; size = cvGetMatSize(src); aligned_width = cvAlign(size.width, 4); el_size = cvSize( aperture_size, aperture_size ); el_anchor = cvPoint( aperture_size/2, aperture_size/2 ); if( aperture_size <= 5 && icvFilterSobelVert_8u16s_C1R_p ) { if( depth == CV_8U ) { ipp_sobel_vert = icvFilterSobelVert_8u16s_C1R_p; ipp_sobel_horiz = icvFilterSobelHoriz_8u16s_C1R_p; ipp_sobel_vert_second = icvFilterSobelVertSecond_8u16s_C1R_p; ipp_sobel_horiz_second = icvFilterSobelHorizSecond_8u16s_C1R_p; ipp_sobel_cross = icvFilterSobelCross_8u16s_C1R_p; } else if( depth == CV_32F ) { ipp_sobel_vert = icvFilterSobelVert_32f_C1R_p; ipp_sobel_horiz = icvFilterSobelHoriz_32f_C1R_p; ipp_sobel_vert_second = icvFilterSobelVertSecond_32f_C1R_p; ipp_sobel_horiz_second = icvFilterSobelHorizSecond_32f_C1R_p; ipp_sobel_cross = icvFilterSobelCross_32f_C1R_p; } } if( ipp_sobel_vert && ipp_sobel_horiz && ipp_sobel_vert_second && ipp_sobel_horiz_second && ipp_sobel_cross ) { CV_CALL( tempsrc = icvIPPFilterInit( src, buf_size, el_size )); shifted_ptr = tempsrc->data.ptr + el_anchor.y*tempsrc->step + el_anchor.x*CV_ELEM_SIZE(depth); temp_step = tempsrc->step ? tempsrc->step : CV_STUB_STEP; max_dy = tempsrc->rows - aperture_size + 1; use_ipp = true; } else { ipp_sobel_vert = ipp_sobel_horiz = 0; ipp_sobel_vert_second = ipp_sobel_horiz_second = ipp_sobel_cross = 0; dx_filter.init_deriv( size.width, depth, d_depth, 1, 0, aperture_size ); dy_filter.init_deriv( size.width, depth, d_depth, 0, 1, aperture_size ); d2x_filter.init_deriv( size.width, depth, d_depth, 2, 0, aperture_size ); d2y_filter.init_deriv( size.width, depth, d_depth, 0, 2, aperture_size ); dxy_filter.init_deriv( size.width, depth, d_depth, 1, 1, aperture_size ); max_dy = buf_size / src->cols; max_dy = MAX( max_dy, aperture_size ); } CV_CALL( Dx = cvCreateMat( max_dy, aligned_width, d_depth )); CV_CALL( Dy = cvCreateMat( max_dy, aligned_width, d_depth )); CV_CALL( D2x = cvCreateMat( max_dy, aligned_width, d_depth )); CV_CALL( D2y = cvCreateMat( max_dy, aligned_width, d_depth )); CV_CALL( Dxy = cvCreateMat( max_dy, aligned_width, d_depth )); Dx->cols = Dy->cols = D2x->cols = D2y->cols = Dxy->cols = size.width; if( !use_ipp ) max_dy -= aperture_size - 1; d_step = Dx->step ? Dx->step : CV_STUB_STEP; stripe_size = size; factor = 1 << (aperture_size - 1); if( depth == CV_8U ) factor *= 255; factor = 1./(factor * factor * factor); aperture_size = aperture_size * 10 + aperture_size; for( y = 0; y < size.height; y += delta ) { if( !use_ipp ) { delta = MIN( size.height - y, max_dy ); CvRect roi = cvRect(0,y,size.width,delta); CvPoint origin=cvPoint(0,0); if( y + delta == size.height ) stage = stage & CV_START ? CV_START + CV_END : CV_END; dx_filter.process(src,Dx,roi,origin,stage); dy_filter.process(src,Dy,roi,origin,stage); d2x_filter.process(src,D2x,roi,origin,stage); d2y_filter.process(src,D2y,roi,origin,stage); stripe_size.height = dxy_filter.process(src,Dxy,roi,origin,stage); } else { delta = icvIPPFilterNextStripe( src, tempsrc, y, el_size, el_anchor ); stripe_size.height = delta; IPPI_CALL( ipp_sobel_vert( shifted_ptr, temp_step, Dx->data.ptr, d_step, stripe_size, aperture_size )); IPPI_CALL( ipp_sobel_horiz( shifted_ptr, temp_step, Dy->data.ptr, d_step, stripe_size, aperture_size )); IPPI_CALL( ipp_sobel_vert_second( shifted_ptr, temp_step, D2x->data.ptr, d_step, stripe_size, aperture_size )); IPPI_CALL( ipp_sobel_horiz_second( shifted_ptr, temp_step, D2y->data.ptr, d_step, stripe_size, aperture_size )); IPPI_CALL( ipp_sobel_cross( shifted_ptr, temp_step, Dxy->data.ptr, d_step, stripe_size, aperture_size )); } for( i = 0; i < stripe_size.height; i++, dst_y++ ) { float* dstdata = (float*)(dst->data.ptr + dst_y*dst->step); if( d_depth == CV_16S ) { const short* dxdata = (const short*)(Dx->data.ptr + i*Dx->step); const short* dydata = (const short*)(Dy->data.ptr + i*Dy->step); const short* d2xdata = (const short*)(D2x->data.ptr + i*D2x->step); const short* d2ydata = (const short*)(D2y->data.ptr + i*D2y->step); const short* dxydata = (const short*)(Dxy->data.ptr + i*Dxy->step); for( j = 0; j < stripe_size.width; j++ ) { double dx = dxdata[j]; double dx2 = dx * dx; double dy = dydata[j]; double dy2 = dy * dy; dstdata[j] = (float)(factor*(dx2*d2ydata[j] + dy2*d2xdata[j] - 2*dx*dy*dxydata[j])); } } else { const float* dxdata = (const float*)(Dx->data.ptr + i*Dx->step); const float* dydata = (const float*)(Dy->data.ptr + i*Dy->step); const float* d2xdata = (const float*)(D2x->data.ptr + i*D2x->step); const float* d2ydata = (const float*)(D2y->data.ptr + i*D2y->step); const float* dxydata = (const float*)(Dxy->data.ptr + i*Dxy->step); for( j = 0; j < stripe_size.width; j++ ) { double dx = dxdata[j]; double dy = dydata[j]; dstdata[j] = (float)(factor*(dx*dx*d2ydata[j] + dy*dy*d2xdata[j] - 2*dx*dy*dxydata[j])); } } } stage = CV_MIDDLE; } __END__; cvReleaseMat( &Dx ); cvReleaseMat( &Dy ); cvReleaseMat( &D2x ); cvReleaseMat( &D2y ); cvReleaseMat( &Dxy ); cvReleaseMat( &tempsrc ); }
static void icvCornerEigenValsVecs( const CvMat* src, CvMat* eigenv, int block_size, int aperture_size, int op_type, double k=0. ) { CvSepFilter dx_filter, dy_filter; CvBoxFilter blur_filter; CvMat *tempsrc = 0; CvMat *Dx = 0, *Dy = 0, *cov = 0; CvMat *sqrt_buf = 0; int buf_size = 1 << 12; CV_FUNCNAME( "icvCornerEigenValsVecs" ); __BEGIN__; int i, j, y, dst_y = 0, max_dy, delta = 0; int aperture_size0 = aperture_size; int temp_step = 0, d_step; uchar* shifted_ptr = 0; int depth, d_depth; int stage = CV_START; CvSobelFixedIPPFunc ipp_sobel_vert = 0, ipp_sobel_horiz = 0; CvFilterFixedIPPFunc ipp_scharr_vert = 0, ipp_scharr_horiz = 0; CvSize el_size, size, stripe_size; int aligned_width; CvPoint el_anchor; double factorx, factory; bool use_ipp = false; if( block_size < 3 || !(block_size & 1) ) CV_ERROR( CV_StsOutOfRange, "averaging window size must be an odd number >= 3" ); if( aperture_size < 3 && aperture_size != CV_SCHARR || !(aperture_size & 1) ) CV_ERROR( CV_StsOutOfRange, "Derivative filter aperture size must be a positive odd number >=3 or CV_SCHARR" ); depth = CV_MAT_DEPTH(src->type); d_depth = depth == CV_8U ? CV_16S : CV_32F; size = cvGetMatSize(src); aligned_width = cvAlign(size.width, 4); aperture_size = aperture_size == CV_SCHARR ? 3 : aperture_size; el_size = cvSize( aperture_size, aperture_size ); el_anchor = cvPoint( aperture_size/2, aperture_size/2 ); if( aperture_size <= 5 && icvFilterSobelVert_8u16s_C1R_p ) { if( depth == CV_8U && aperture_size0 == CV_SCHARR ) { ipp_scharr_vert = icvFilterScharrVert_8u16s_C1R_p; ipp_scharr_horiz = icvFilterScharrHoriz_8u16s_C1R_p; } else if( depth == CV_32F && aperture_size0 == CV_SCHARR ) { ipp_scharr_vert = icvFilterScharrVert_32f_C1R_p; ipp_scharr_horiz = icvFilterScharrHoriz_32f_C1R_p; } else if( depth == CV_8U ) { ipp_sobel_vert = icvFilterSobelVert_8u16s_C1R_p; ipp_sobel_horiz = icvFilterSobelHoriz_8u16s_C1R_p; } else if( depth == CV_32F ) { ipp_sobel_vert = icvFilterSobelVert_32f_C1R_p; ipp_sobel_horiz = icvFilterSobelHoriz_32f_C1R_p; } } if( ipp_sobel_vert && ipp_sobel_horiz || ipp_scharr_vert && ipp_scharr_horiz ) { CV_CALL( tempsrc = icvIPPFilterInit( src, buf_size, cvSize(el_size.width,el_size.height + block_size))); shifted_ptr = tempsrc->data.ptr + el_anchor.y*tempsrc->step + el_anchor.x*CV_ELEM_SIZE(depth); temp_step = tempsrc->step ? tempsrc->step : CV_STUB_STEP; max_dy = tempsrc->rows - aperture_size + 1; use_ipp = true; } else { ipp_sobel_vert = ipp_sobel_horiz = 0; ipp_scharr_vert = ipp_scharr_horiz = 0; CV_CALL( dx_filter.init_deriv( size.width, depth, d_depth, 1, 0, aperture_size0 )); CV_CALL( dy_filter.init_deriv( size.width, depth, d_depth, 0, 1, aperture_size0 )); max_dy = buf_size / src->cols; max_dy = MAX( max_dy, aperture_size + block_size ); } CV_CALL( Dx = cvCreateMat( max_dy, aligned_width, d_depth )); CV_CALL( Dy = cvCreateMat( max_dy, aligned_width, d_depth )); CV_CALL( cov = cvCreateMat( max_dy + block_size + 1, size.width, CV_32FC3 )); CV_CALL( sqrt_buf = cvCreateMat( 2, size.width, CV_32F )); Dx->cols = Dy->cols = size.width; if( !use_ipp ) max_dy -= aperture_size - 1; d_step = Dx->step ? Dx->step : CV_STUB_STEP; CV_CALL(blur_filter.init(size.width, CV_32FC3, CV_32FC3, 0, cvSize(block_size,block_size))); stripe_size = size; factorx = (double)(1 << (aperture_size - 1)) * block_size; if( aperture_size0 == CV_SCHARR ) factorx *= 2; if( depth == CV_8U ) factorx *= 255.; factory = factorx = 1./factorx; if( ipp_sobel_vert ) factory = -factory; for( y = 0; y < size.height; y += delta ) { if( !use_ipp ) { delta = MIN( size.height - y, max_dy ); if( y + delta == size.height ) stage = stage & CV_START ? CV_START + CV_END : CV_END; dx_filter.process( src, Dx, cvRect(0,y,-1,delta), cvPoint(0,0), stage ); stripe_size.height = dy_filter.process( src, Dy, cvRect(0,y,-1,delta), cvPoint(0,0), stage ); } else { delta = icvIPPFilterNextStripe( src, tempsrc, y, el_size, el_anchor ); stripe_size.height = delta; if( ipp_sobel_vert ) { IPPI_CALL( ipp_sobel_vert( shifted_ptr, temp_step, Dx->data.ptr, d_step, stripe_size, aperture_size*10 + aperture_size )); IPPI_CALL( ipp_sobel_horiz( shifted_ptr, temp_step, Dy->data.ptr, d_step, stripe_size, aperture_size*10 + aperture_size )); } else /*if( ipp_scharr_vert )*/ { IPPI_CALL( ipp_scharr_vert( shifted_ptr, temp_step, Dx->data.ptr, d_step, stripe_size )); IPPI_CALL( ipp_scharr_horiz( shifted_ptr, temp_step, Dy->data.ptr, d_step, stripe_size )); } } for( i = 0; i < stripe_size.height; i++ ) { float* cov_data = (float*)(cov->data.ptr + i*cov->step); if( d_depth == CV_16S ) { const short* dxdata = (const short*)(Dx->data.ptr + i*Dx->step); const short* dydata = (const short*)(Dy->data.ptr + i*Dy->step); for( j = 0; j < size.width; j++ ) { double dx = dxdata[j]*factorx; double dy = dydata[j]*factory; cov_data[j*3] = (float)(dx*dx); cov_data[j*3+1] = (float)(dx*dy); cov_data[j*3+2] = (float)(dy*dy); } } else { const float* dxdata = (const float*)(Dx->data.ptr + i*Dx->step); const float* dydata = (const float*)(Dy->data.ptr + i*Dy->step); for( j = 0; j < size.width; j++ ) { double dx = dxdata[j]*factorx; double dy = dydata[j]*factory; cov_data[j*3] = (float)(dx*dx); cov_data[j*3+1] = (float)(dx*dy); cov_data[j*3+2] = (float)(dy*dy); } } } if( y + stripe_size.height >= size.height ) stage = stage & CV_START ? CV_START + CV_END : CV_END; stripe_size.height = blur_filter.process(cov,cov, cvRect(0,0,-1,stripe_size.height),cvPoint(0,0),stage+CV_ISOLATED_ROI); if( op_type == ICV_MINEIGENVAL ) icvCalcMinEigenVal( cov->data.fl, cov->step, (float*)(eigenv->data.ptr + dst_y*eigenv->step), eigenv->step, stripe_size, sqrt_buf ); else if( op_type == ICV_HARRIS ) icvCalcHarris( cov->data.fl, cov->step, (float*)(eigenv->data.ptr + dst_y*eigenv->step), eigenv->step, stripe_size, sqrt_buf, k ); else if( op_type == ICV_EIGENVALSVECS ) icvCalcEigenValsVecs( cov->data.fl, cov->step, (float*)(eigenv->data.ptr + dst_y*eigenv->step), eigenv->step, stripe_size, sqrt_buf ); dst_y += stripe_size.height; stage = CV_MIDDLE; } __END__; cvReleaseMat( &Dx ); cvReleaseMat( &Dy ); cvReleaseMat( &cov ); cvReleaseMat( &sqrt_buf ); cvReleaseMat( &tempsrc ); }
CV_IMPL int cvCountNonZero( const CvArr* arr ) { static CvFuncTable nz_tab; static CvFuncTable nzcoi_tab; static int inittab = 0; int count = 0; CV_FUNCNAME("cvCountNonZero"); __BEGIN__; int type, coi = 0; int mat_step; CvSize size; CvMat stub, *mat = (CvMat*)arr; if( !inittab ) { icvInitCountNonZeroC1RTable( &nz_tab ); icvInitCountNonZeroCnCRTable( &nzcoi_tab ); inittab = 1; } if( !CV_IS_MAT(mat) ) { if( CV_IS_MATND(mat) ) { void* matnd = (void*)arr; CvMatND nstub; CvNArrayIterator iterator; CvFunc2D_1A1P func; CV_CALL( cvInitNArrayIterator( 1, &matnd, 0, &nstub, &iterator )); type = CV_MAT_TYPE(iterator.hdr[0]->type); if( CV_MAT_CN(type) != 1 ) CV_ERROR( CV_BadNumChannels, "Only single-channel array are supported here" ); func = (CvFunc2D_1A1P)(nz_tab.fn_2d[CV_MAT_DEPTH(type)]); if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); do { int temp; IPPI_CALL( func( iterator.ptr[0], CV_STUB_STEP, iterator.size, &temp )); count += temp; } while( cvNextNArraySlice( &iterator )); EXIT; } else CV_CALL( mat = cvGetMat( mat, &stub, &coi )); } type = CV_MAT_TYPE(mat->type); size = cvGetMatSize( mat ); mat_step = mat->step; if( CV_IS_MAT_CONT( mat->type )) { size.width *= size.height; size.height = 1; mat_step = CV_STUB_STEP; } if( CV_MAT_CN(type) == 1 || coi == 0 ) { CvFunc2D_1A1P func = (CvFunc2D_1A1P)(nz_tab.fn_2d[CV_MAT_DEPTH(type)]); if( CV_MAT_CN(type) != 1 ) CV_ERROR( CV_BadNumChannels, "The function can handle only a single channel at a time (use COI)"); if( !func ) CV_ERROR( CV_StsBadArg, cvUnsupportedFormat ); IPPI_CALL( func( mat->data.ptr, mat_step, size, &count )); } else { CvFunc2DnC_1A1P func = (CvFunc2DnC_1A1P)(nzcoi_tab.fn_2d[CV_MAT_DEPTH(type)]); if( !func ) CV_ERROR( CV_StsBadArg, cvUnsupportedFormat ); IPPI_CALL( func( mat->data.ptr, mat_step, size, CV_MAT_CN(type), coi, &count )); } __END__; return count; }
CV_IMPL void cvDistReg(const CvArr* srcarr, CvArr* dstarr) { CV_FUNCNAME("cvDistReg"); __BEGIN__; CvMat sstub, *src; CvMat dstub, *dst; CvMat* src_dx=0, *src_dy=0, *s=0, *ps=0; CvMat* dps_x=0, *dps_y=0, *del=0, *ones=0; CvSize size; int i, j, iStep_s, iStep_ps; float* fPtr_s, *fPtr_ps; float temp_s=0.0f, temp_ps=0.0f; float flag_s1=0.0f, flag_s2=0.0f, flag_ps1=0.0f, flag_ps2=0.0f; CV_CALL( src = cvGetMat(srcarr, &sstub )); CV_CALL( dst = cvGetMat(dstarr, &dstub )); if( CV_MAT_TYPE(src->type) != CV_32FC1) CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" ); if( CV_MAT_TYPE(dst->type) != CV_32FC1) CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" ); if( !CV_ARE_SIZES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedSizes, "The input images must have the same size" ); size = cvGetMatSize( src ); src_dx = cvCreateMat(size.height, size.width, CV_32FC1 ); src_dy = cvCreateMat(size.height, size.width, CV_32FC1 ); s = cvCreateMat(size.height, size.width, CV_32FC1 ); ps = cvCreateMat(size.height, size.width, CV_32FC1 ); dps_x = cvCreateMat(size.height, size.width, CV_32FC1 ); dps_y = cvCreateMat(size.height, size.width, CV_32FC1 ); del = cvCreateMat(size.height, size.width, CV_32FC1 ); ones = cvCreateMat(size.height, size.width, CV_32FC1 ); cvSetZero(src_dx); cvSetZero(src_dy); cvSetZero(s); cvSetZero(ps); cvSetZero(dps_x); cvSetZero(dps_y); cvSetZero(del); cvSet(ones, cvScalar(1.0f)); iStep_s = s->step / sizeof(fPtr_s[0]); fPtr_s = s->data.fl; iStep_ps= ps->step/ sizeof(fPtr_ps[0]); fPtr_ps = ps->data.fl; cvSobel(src, src_dx, 1, 0, 1); cvSobel(src, src_dy, 0, 1, 1); cvMul(src_dx, ones, src_dx, 0.25f); cvMul(src_dy, ones, src_dy, 0.25f); cvCalS(src,s); for (j=0; j<size.height; j++){ for (i=0; i<size.width; i++){ temp_s = fPtr_s[i+iStep_s*j]; if (int(temp_s*10000)>=0 && int(temp_s*10000)<=10000) { flag_s1 = 1.0f; } else { flag_s1 = 0.0f; } if (int(temp_s*10000) > 10000) { flag_s2 = 1.0f; } else { flag_s2 = 0.0f; } temp_ps = flag_s1*sin(2*PI*temp_s)/2/PI+flag_s2*(temp_s-1.0f); if (int(temp_ps*10000) == 0) { flag_ps1 = 0.0f; } else { flag_ps1 = 1.0f; } if (int(temp_s*10000) == 0) { flag_ps2 = 0.0f; } else { flag_ps2 = 1.0f; } fPtr_ps[i+iStep_ps*j] = (flag_ps1*temp_ps+1.0f-flag_ps1)/(flag_ps2*temp_s+1.0f-flag_ps2); if ((flag_ps2*temp_s+1.0f-flag_ps2)==0){ printf("Something wrong in last: temp_s = %f, flag_ps2 = %f\n", temp_s, flag_ps2); exit(0); } } } cvMul(ps, src_dx, dps_x); cvMul(ps, src_dy, dps_y); cvSub(dps_x, src_dx, dps_x); cvSub(dps_y, src_dy, dps_y); cvCurvature(dps_x, dps_y, dst); cvLaplace(src,del,1); cvMul(del, ones, del, 0.2f); cvAdd(dst, del, dst); cvReleaseMat(&src_dx); cvReleaseMat(&src_dy); cvReleaseMat(&s); cvReleaseMat(&ps); cvReleaseMat(&dps_x); cvReleaseMat(&dps_y); cvReleaseMat(&del); cvReleaseMat(&ones); __END__; }
/* motion templates */ CV_IMPL void cvUpdateMotionHistory( const void* silhouette, void* mhimg, double timestamp, double mhi_duration ) { CvMat silhstub, *silh = cvGetMat(silhouette, &silhstub); CvMat mhistub, *mhi = cvGetMat(mhimg, &mhistub); if( !CV_IS_MASK_ARR( silh )) CV_Error( CV_StsBadMask, "" ); if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 ) CV_Error( CV_StsUnsupportedFormat, "" ); if( !CV_ARE_SIZES_EQ( mhi, silh )) CV_Error( CV_StsUnmatchedSizes, "" ); CvSize size = cvGetMatSize( mhi ); int mhi_step = mhi->step; int silh_step = silh->step; if( CV_IS_MAT_CONT( mhi->type & silh->type )) { size.width *= size.height; mhi_step = silh_step = CV_STUB_STEP; size.height = 1; } float ts = (float)timestamp; float delbound = (float)(timestamp - mhi_duration); int x, y; #if CV_SSE2 volatile bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2); #endif for( y = 0; y < size.height; y++ ) { const uchar* silhData = silh->data.ptr + silh->step*y; float* mhiData = (float*)(mhi->data.ptr + mhi->step*y); x = 0; #if CV_SSE2 if( useSIMD ) { __m128 ts4 = _mm_set1_ps(ts), db4 = _mm_set1_ps(delbound); for( ; x <= size.width - 8; x += 8 ) { __m128i z = _mm_setzero_si128(); __m128i s = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(silhData + x)), z); __m128 s0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(s, z)), s1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(s, z)); __m128 v0 = _mm_loadu_ps(mhiData + x), v1 = _mm_loadu_ps(mhiData + x + 4); __m128 fz = _mm_setzero_ps(); v0 = _mm_and_ps(v0, _mm_cmpge_ps(v0, db4)); v1 = _mm_and_ps(v1, _mm_cmpge_ps(v1, db4)); __m128 m0 = _mm_and_ps(_mm_xor_ps(v0, ts4), _mm_cmpneq_ps(s0, fz)); __m128 m1 = _mm_and_ps(_mm_xor_ps(v1, ts4), _mm_cmpneq_ps(s1, fz)); v0 = _mm_xor_ps(v0, m0); v1 = _mm_xor_ps(v1, m1); _mm_storeu_ps(mhiData + x, v0); _mm_storeu_ps(mhiData + x + 4, v1); } } #endif for( ; x < size.width; x++ ) { float val = mhiData[x]; val = silhData[x] ? ts : val < delbound ? 0 : val; mhiData[x] = val; } } }
void CvArrTest::print_time( int test_case_idx, double time_clocks, double time_cpu_clocks ) { int in_type = -1, out_type = -1; CvSize size = { -1, -1 }; const CvFileNode* size_node = find_timing_param( "size" ); char str[1024], *ptr = str; int len; bool have_mask; double cpe; if( size_node ) { if( !CV_NODE_IS_SEQ(size_node->tag) ) { size.width = cvReadInt(size_node,-1); size.height = 1; } else { size.width = cvReadInt((const CvFileNode*)cvGetSeqElem(size_node->data.seq,0),-1); size.height = cvReadInt((const CvFileNode*)cvGetSeqElem(size_node->data.seq,1),-1); } } if( test_array[INPUT].size() ) { in_type = CV_MAT_TYPE(test_mat[INPUT][0].type); if( size.width == -1 ) size = cvGetMatSize(&test_mat[INPUT][0]); } if( test_array[OUTPUT].size() ) { out_type = CV_MAT_TYPE(test_mat[OUTPUT][0].type); if( in_type < 0 ) in_type = out_type; if( size.width == -1 ) size = cvGetMatSize(&test_mat[OUTPUT][0]); } if( out_type < 0 && test_array[INPUT_OUTPUT].size() ) { out_type = CV_MAT_TYPE(test_mat[INPUT_OUTPUT][0].type); if( in_type < 0 ) in_type = out_type; if( size.width == -1 ) size = cvGetMatSize(&test_mat[INPUT_OUTPUT][0]); } have_mask = test_array[MASK].size() > 0 && test_array[MASK][0] != 0; if( in_type < 0 && out_type < 0 ) return; if( out_type < 0 ) out_type = in_type; ptr = strchr( (char*)tested_functions, ',' ); if( ptr ) { len = (int)(ptr - tested_functions); strncpy( str, tested_functions, len ); } else { len = (int)strlen( tested_functions ); strcpy( str, tested_functions ); } ptr = str + len; *ptr = '\0'; if( have_mask ) { sprintf( ptr, "(Mask)" ); ptr += strlen(ptr); } *ptr++ = ','; sprintf( ptr, "%s", cvTsGetTypeName(in_type) ); ptr += strlen(ptr); if( CV_MAT_DEPTH(out_type) != CV_MAT_DEPTH(in_type) ) { sprintf( ptr, "%s", cvTsGetTypeName(out_type) ); ptr += strlen(ptr); } *ptr++ = ','; sprintf( ptr, "C%d", CV_MAT_CN(in_type) ); ptr += strlen(ptr); if( CV_MAT_CN(out_type) != CV_MAT_CN(in_type) ) { sprintf( ptr, "C%d", CV_MAT_CN(out_type) ); ptr += strlen(ptr); } *ptr++ = ','; sprintf( ptr, "%dx%d,", size.width, size.height ); ptr += strlen(ptr); print_timing_params( test_case_idx, ptr ); ptr += strlen(ptr); cpe = time_cpu_clocks / ((double)size.width * size.height); if( cpe >= 100 ) sprintf( ptr, "%.0f,", cpe ); else sprintf( ptr, "%.1f,", cpe ); ptr += strlen(ptr); sprintf( ptr, "%g", time_clocks*1e6/cv::getTickFrequency() ); ts->printf( CvTS::CSV, "%s\n", str ); }
CV_IMPL void cvMinMaxLoc( const void* img, double* _minVal, double* _maxVal, CvPoint* _minLoc, CvPoint* _maxLoc, const void* mask ) { static CvFuncTable minmax_tab, minmaxcoi_tab; static CvFuncTable minmaxmask_tab, minmaxmaskcoi_tab; static int inittab = 0; CV_FUNCNAME("cvMinMaxLoc"); __BEGIN__; int type, depth, cn, coi = 0; int mat_step, mask_step = 0; CvSize size; CvMat stub, maskstub, *mat = (CvMat*)img, *matmask = (CvMat*)mask; CvPoint minLoc, maxLoc; double minVal = 0, maxVal = 0; if( !inittab ) { icvInitMinMaxIndxC1RTable( &minmax_tab ); icvInitMinMaxIndxCnCRTable( &minmaxcoi_tab ); icvInitMinMaxIndxC1MRTable( &minmaxmask_tab ); icvInitMinMaxIndxCnCMRTable( &minmaxmaskcoi_tab ); inittab = 1; } CV_CALL( mat = cvGetMat( mat, &stub, &coi )); type = CV_MAT_TYPE( mat->type ); depth = CV_MAT_DEPTH( type ); cn = CV_MAT_CN( type ); size = cvGetMatSize( mat ); if( cn > 1 && coi == 0 ) CV_ERROR( CV_StsBadArg, "" ); mat_step = mat->step; if( !mask ) { if( size.height == 1 ) mat_step = CV_STUB_STEP; if( CV_MAT_CN(type) == 1 || coi == 0 ) { CvFunc2D_1A4P func = (CvFunc2D_1A4P)(minmax_tab.fn_2d[depth]); if( !func ) CV_ERROR( CV_StsBadArg, cvUnsupportedFormat ); IPPI_CALL( func( mat->data.ptr, mat_step, size, &minVal, &maxVal, &minLoc, &maxLoc )); } else { CvFunc2DnC_1A4P func = (CvFunc2DnC_1A4P)(minmaxcoi_tab.fn_2d[depth]); if( !func ) CV_ERROR( CV_StsBadArg, cvUnsupportedFormat ); IPPI_CALL( func( mat->data.ptr, mat_step, size, cn, coi, &minVal, &maxVal, &minLoc, &maxLoc )); } } else { CV_CALL( matmask = cvGetMat( matmask, &maskstub )); if( !CV_IS_MASK_ARR( matmask )) CV_ERROR( CV_StsBadMask, "" ); if( !CV_ARE_SIZES_EQ( mat, matmask )) CV_ERROR( CV_StsUnmatchedSizes, "" ); mask_step = matmask->step; if( size.height == 1 ) mat_step = mask_step = CV_STUB_STEP; if( CV_MAT_CN(type) == 1 || coi == 0 ) { CvFunc2D_2A4P func = (CvFunc2D_2A4P)(minmaxmask_tab.fn_2d[depth]); if( !func ) CV_ERROR( CV_StsBadArg, cvUnsupportedFormat ); IPPI_CALL( func( mat->data.ptr, mat_step, matmask->data.ptr, mask_step, size, &minVal, &maxVal, &minLoc, &maxLoc )); } else { CvFunc2DnC_2A4P func = (CvFunc2DnC_2A4P)(minmaxmaskcoi_tab.fn_2d[depth]); if( !func ) CV_ERROR( CV_StsBadArg, cvUnsupportedFormat ); IPPI_CALL( func( mat->data.ptr, mat_step, matmask->data.ptr, mask_step, size, cn, coi, &minVal, &maxVal, &minLoc, &maxLoc )); } } if( depth < CV_32S || depth == CV_32F ) { minVal = *(float*)&minVal; maxVal = *(float*)&maxVal; } if( _minVal ) *_minVal = minVal; if( _maxVal ) *_maxVal = maxVal; if( _minLoc ) *_minLoc = minLoc; if( _maxLoc ) *_maxLoc = maxLoc; __END__; }
CV_IMPL CvScalar cvAvg( const void* img, const void* maskarr ) { CvScalar mean = {{0,0,0,0}}; static CvBigFuncTable mean_tab; static CvFuncTable meancoi_tab; static int inittab = 0; CV_FUNCNAME("cvAvg"); __BEGIN__; CvSize size; double scale; if( !maskarr ) { CV_CALL( mean = cvSum(img)); size = cvGetSize( img ); size.width *= size.height; scale = size.width ? 1./size.width : 0; mean.val[0] *= scale; mean.val[1] *= scale; mean.val[2] *= scale; mean.val[3] *= scale; } else { int type, coi = 0; int mat_step, mask_step; CvMat stub, maskstub, *mat = (CvMat*)img, *mask = (CvMat*)maskarr; if( !inittab ) { icvInitMeanMRTable( &mean_tab ); icvInitMeanCnCMRTable( &meancoi_tab ); inittab = 1; } if( !CV_IS_MAT(mat) ) CV_CALL( mat = cvGetMat( mat, &stub, &coi )); if( !CV_IS_MAT(mask) ) CV_CALL( mask = cvGetMat( mask, &maskstub )); if( !CV_IS_MASK_ARR(mask) ) CV_ERROR( CV_StsBadMask, "" ); if( !CV_ARE_SIZES_EQ( mat, mask ) ) CV_ERROR( CV_StsUnmatchedSizes, "" ); type = CV_MAT_TYPE( mat->type ); size = cvGetMatSize( mat ); mat_step = mat->step; mask_step = mask->step; if( CV_IS_MAT_CONT( mat->type & mask->type )) { size.width *= size.height; size.height = 1; mat_step = mask_step = CV_STUB_STEP; } if( CV_MAT_CN(type) == 1 || coi == 0 ) { CvFunc2D_2A1P func; if( CV_MAT_CN(type) > 4 ) CV_ERROR( CV_StsOutOfRange, "The input array must have at most 4 channels unless COI is set" ); func = (CvFunc2D_2A1P)(mean_tab.fn_2d[type]); if( !func ) CV_ERROR( CV_StsBadArg, cvUnsupportedFormat ); IPPI_CALL( func( mat->data.ptr, mat_step, mask->data.ptr, mask_step, size, mean.val )); } else { CvFunc2DnC_2A1P func = (CvFunc2DnC_2A1P)( meancoi_tab.fn_2d[CV_MAT_DEPTH(type)]); if( !func ) CV_ERROR( CV_StsBadArg, cvUnsupportedFormat ); IPPI_CALL( func( mat->data.ptr, mat_step, mask->data.ptr, mask_step, size, CV_MAT_CN(type), coi, mean.val )); } } __END__; return mean; }
CV_IMPL void cvFindCornerSubPix( const void* srcarr, CvPoint2D32f* corners, int count, CvSize win, CvSize zeroZone, CvTermCriteria criteria ) { cv::AutoBuffer<float> buffer; const int MAX_ITERS = 100; const float drv_x[] = { -1.f, 0.f, 1.f }; const float drv_y[] = { 0.f, 0.5f, 0.f }; float *maskX; float *maskY; float *mask; float *src_buffer; float *gx_buffer; float *gy_buffer; int win_w = win.width * 2 + 1, win_h = win.height * 2 + 1; int win_rect_size = (win_w + 4) * (win_h + 4); double coeff; CvSize size, src_buf_size; int i, j, k, pt_i; int max_iters = 10; double eps = 0; CvMat stub, *src = (CvMat*)srcarr; src = cvGetMat( srcarr, &stub ); if( CV_MAT_TYPE( src->type ) != CV_8UC1 ) CV_Error( CV_StsBadMask, "" ); if( !corners ) CV_Error( CV_StsNullPtr, "" ); if( count < 0 ) CV_Error( CV_StsBadSize, "" ); if( count == 0 ) return; if( win.width <= 0 || win.height <= 0 ) CV_Error( CV_StsBadSize, "" ); size = cvGetMatSize( src ); if( size.width < win_w + 4 || size.height < win_h + 4 ) CV_Error( CV_StsBadSize, "" ); /* initialize variables, controlling loop termination */ switch( criteria.type ) { case CV_TERMCRIT_ITER: eps = 0.f; max_iters = criteria.max_iter; break; case CV_TERMCRIT_EPS: eps = criteria.epsilon; max_iters = MAX_ITERS; break; case CV_TERMCRIT_ITER | CV_TERMCRIT_EPS: eps = criteria.epsilon; max_iters = criteria.max_iter; break; default: assert( 0 ); CV_Error( CV_StsBadFlag, "" ); } eps = MAX( eps, 0 ); eps *= eps; /* use square of error in comparsion operations. */ max_iters = MAX( max_iters, 1 ); max_iters = MIN( max_iters, MAX_ITERS ); buffer.allocate( win_rect_size * 5 + win_w + win_h + 32 ); /* assign pointers */ maskX = buffer; maskY = maskX + win_w + 4; mask = maskY + win_h + 4; src_buffer = mask + win_w * win_h; gx_buffer = src_buffer + win_rect_size; gy_buffer = gx_buffer + win_rect_size; coeff = 1. / (win.width * win.width); /* calculate mask */ for( i = -win.width, k = 0; i <= win.width; i++, k++ ) { maskX[k] = (float)exp( -i * i * coeff ); } if( win.width == win.height ) { maskY = maskX; } else { coeff = 1. / (win.height * win.height); for( i = -win.height, k = 0; i <= win.height; i++, k++ ) { maskY[k] = (float) exp( -i * i * coeff ); } } for( i = 0; i < win_h; i++ ) { for( j = 0; j < win_w; j++ ) { mask[i * win_w + j] = maskX[j] * maskY[i]; } } /* make zero_zone */ if( zeroZone.width >= 0 && zeroZone.height >= 0 && zeroZone.width * 2 + 1 < win_w && zeroZone.height * 2 + 1 < win_h ) { for( i = win.height - zeroZone.height; i <= win.height + zeroZone.height; i++ ) { for( j = win.width - zeroZone.width; j <= win.width + zeroZone.width; j++ ) { mask[i * win_w + j] = 0; } } } /* set sizes of image rectangles, used in convolutions */ src_buf_size.width = win_w + 2; src_buf_size.height = win_h + 2; /* do optimization loop for all the points */ for( pt_i = 0; pt_i < count; pt_i++ ) { CvPoint2D32f cT = corners[pt_i], cI = cT; int iter = 0; double err; do { CvPoint2D32f cI2; double a, b, c, bb1, bb2; IPPI_CALL( icvGetRectSubPix_8u32f_C1R( (uchar*)src->data.ptr, src->step, size, src_buffer, (win_w + 2) * sizeof( src_buffer[0] ), cvSize( win_w + 2, win_h + 2 ), cI )); /* calc derivatives */ icvSepConvSmall3_32f( src_buffer, src_buf_size.width * sizeof(src_buffer[0]), gx_buffer, win_w * sizeof(gx_buffer[0]), src_buf_size, drv_x, drv_y, buffer ); icvSepConvSmall3_32f( src_buffer, src_buf_size.width * sizeof(src_buffer[0]), gy_buffer, win_w * sizeof(gy_buffer[0]), src_buf_size, drv_y, drv_x, buffer ); a = b = c = bb1 = bb2 = 0; /* process gradient */ for( i = 0, k = 0; i < win_h; i++ ) { double py = i - win.height; for( j = 0; j < win_w; j++, k++ ) { double m = mask[k]; double tgx = gx_buffer[k]; double tgy = gy_buffer[k]; double gxx = tgx * tgx * m; double gxy = tgx * tgy * m; double gyy = tgy * tgy * m; double px = j - win.width; a += gxx; b += gxy; c += gyy; bb1 += gxx * px + gxy * py; bb2 += gxy * px + gyy * py; } } { double A[4]; double InvA[4]; CvMat matA, matInvA; A[0] = a; A[1] = A[2] = b; A[3] = c; cvInitMatHeader( &matA, 2, 2, CV_64F, A ); cvInitMatHeader( &matInvA, 2, 2, CV_64FC1, InvA ); cvInvert( &matA, &matInvA, CV_SVD ); cI2.x = (float)(cI.x + InvA[0]*bb1 + InvA[1]*bb2); cI2.y = (float)(cI.y + InvA[2]*bb1 + InvA[3]*bb2); } err = (cI2.x - cI.x) * (cI2.x - cI.x) + (cI2.y - cI.y) * (cI2.y - cI.y); cI = cI2; } while( ++iter < max_iters && err > eps ); /* if new point is too far from initial, it means poor convergence. leave initial point as the result */ if( fabs( cI.x - cT.x ) > win.width || fabs( cI.y - cT.y ) > win.height ) { cI = cT; } corners[pt_i] = cI; /* store result */ } }
CV_IMPL void cvRunningAvg( const void* arrY, void* arrU, double alpha, const void* maskarr ) { static CvFuncTable acc_tab; static CvBigFuncTable accmask_tab; static int inittab = 0; CV_FUNCNAME( "cvRunningAvg" ); __BEGIN__; int coi1, coi2; int type; int mat_step, sum_step, mask_step = 0; CvSize size; CvMat stub, *mat = (CvMat*)arrY; CvMat sumstub, *sum = (CvMat*)arrU; CvMat maskstub, *mask = (CvMat*)maskarr; if( !inittab ) { icvInitAddWeightedTable( &acc_tab, &accmask_tab ); inittab = 1; } CV_CALL( mat = cvGetMat( mat, &stub, &coi1 )); CV_CALL( sum = cvGetMat( sum, &sumstub, &coi2 )); if( coi1 != 0 || coi2 != 0 ) CV_ERROR( CV_BadCOI, "" ); if( !CV_ARE_CNS_EQ( mat, sum )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( CV_MAT_DEPTH( sum->type ) != CV_32F ) CV_ERROR( CV_BadDepth, "" ); if( !CV_ARE_SIZES_EQ( mat, sum )) CV_ERROR( CV_StsUnmatchedSizes, "" ); size = cvGetMatSize( mat ); type = CV_MAT_TYPE( mat->type ); mat_step = mat->step; sum_step = sum->step; if( !mask ) { CvAddWeightedFunc func = (CvAddWeightedFunc)acc_tab.fn_2d[CV_MAT_DEPTH(type)]; if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); size.width *= CV_MAT_CN(type); if( CV_IS_MAT_CONT( mat->type & sum->type )) { size.width *= size.height; mat_step = sum_step = CV_STUB_STEP; size.height = 1; } IPPI_CALL( func( mat->data.ptr, mat_step, sum->data.ptr, sum_step, size, (float)alpha )); } else { CvAddWeightedMaskFunc func = (CvAddWeightedMaskFunc)accmask_tab.fn_2d[type]; if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); CV_CALL( mask = cvGetMat( mask, &maskstub )); if( !CV_IS_MASK_ARR( mask )) CV_ERROR( CV_StsBadMask, "" ); if( !CV_ARE_SIZES_EQ( mat, mask )) CV_ERROR( CV_StsUnmatchedSizes, "" ); mask_step = mask->step; if( CV_IS_MAT_CONT( mat->type & sum->type & mask->type )) { size.width *= size.height; mat_step = sum_step = mask_step = CV_STUB_STEP; size.height = 1; } IPPI_CALL( func( mat->data.ptr, mat_step, mask->data.ptr, mask_step, sum->data.ptr, sum_step, size, (float)alpha )); } __END__; }
void OptFlowEMD::calculate_flow(IplImage* imageT, IplImage* imageTMinus1, IplImage* velx, IplImage* vely, IplImage* abs){ #ifdef __CV_BEGIN__ __CV_BEGIN__ #else __BEGIN__ #endif CV_FUNCNAME( "OptFlowGenGrad::calculate_flow" ); CvMat stubA, *srcA = (CvMat*)imageT; // stubA takes the new header data for the matrix according to ROI CvMat stubB, *srcB = (CvMat*)imageTMinus1; CvMat stubx, *vel_x = (CvMat*)velx; CvMat stuby, *vel_y = (CvMat*)vely; CvMat stubAbs, *abs_ = NULL; if (abs != NULL) abs_ = (CvMat*)abs; // see GetMat function doc: This returns a matrix header with the current image ROI! // this gives basically a view on the ROI, stubA takes the header data of the matrix // srcA is pointed to this new 'augmented' data-header CV_CALL( srcA = cvGetMat( srcA, &stubA )); CV_CALL( srcB = cvGetMat( srcB, &stubB )); CV_CALL( vel_x = cvGetMat( vel_x, &stubx )); CV_CALL( vel_y = cvGetMat( vel_y, &stuby )); if (abs_ != NULL) CV_CALL( abs_ = cvGetMat ( abs_, &stubAbs )); if( !CV_ARE_TYPES_EQ( srcA, srcB )) CV_ERROR( CV_StsUnmatchedFormats, "Source images have different formats" ); if( !CV_ARE_TYPES_EQ( vel_x, vel_y )) CV_ERROR( CV_StsUnmatchedFormats, "Destination images have different formats" ); if (abs_ != NULL) if (!CV_ARE_TYPES_EQ( vel_x, abs_)) CV_ERROR( CV_StsUnmatchedFormats, "Destination images have different formats" ); if( !CV_ARE_SIZES_EQ( srcA, srcB ) || !CV_ARE_SIZES_EQ( vel_x, vel_y ) || !CV_ARE_SIZES_EQ( srcA, vel_x )) CV_ERROR( CV_StsUnmatchedSizes, "Some images have different sizes" ); if(abs_ != NULL) if (!CV_ARE_SIZES_EQ( srcA, abs_)) CV_ERROR( CV_StsUnmatchedSizes, "Some images have different sizes" ); if( CV_MAT_TYPE( srcA->type ) != CV_8UC1) CV_ERROR( CV_StsUnsupportedFormat, "Source images must have 8uC1 type"); if( CV_MAT_TYPE( vel_x->type ) != CV_32FC1 ) CV_ERROR( CV_StsUnsupportedFormat, "Destination images must have 32fC1 type" ); if( srcA->step != srcB->step || vel_x->step != vel_y->step) CV_ERROR( CV_BadStep, "source and destination images have different step" ); if (abs_ != NULL) if (vel_x->step != abs_->step) CV_ERROR( CV_BadStep, "source and destination images have different step" ); if (abs_ != NULL){ IPPI_CALL( calcOptFlowEMD( (uchar*)srcA->data.ptr, (uchar*)srcB->data.ptr, srcA->step, cvGetMatSize( srcA ), vel_x->data.fl, vel_y->data.fl, vel_x->step, abs_->data.fl)); } else{ IPPI_CALL( calcOptFlowEMD( (uchar*)srcA->data.ptr, (uchar*)srcB->data.ptr, srcA->step, cvGetMatSize( srcA ), vel_x->data.fl, vel_y->data.fl, vel_x->step, NULL)); } #ifdef __CV_END__ __CV_END__ #else __END__ #endif }
CV_IMPL void cvFloodFill( CvArr* arr, CvPoint seed_point, CvScalar newVal, CvScalar lo_diff, CvScalar up_diff, CvConnectedComp* comp, int flags, CvArr* maskarr ) { cv::Ptr<CvMat> tempMask; std::vector<CvFFillSegment> buffer; if( comp ) memset( comp, 0, sizeof(*comp) ); int i, type, depth, cn, is_simple; int buffer_size, connectivity = flags & 255; union { uchar b[4]; int i[4]; float f[4]; double _[4]; } nv_buf; nv_buf._[0] = nv_buf._[1] = nv_buf._[2] = nv_buf._[3] = 0; struct { cv::Vec3b b; cv::Vec3i i; cv::Vec3f f; } ld_buf, ud_buf; CvMat stub, *img = cvGetMat(arr, &stub); CvMat maskstub, *mask = (CvMat*)maskarr; CvSize size; type = CV_MAT_TYPE( img->type ); depth = CV_MAT_DEPTH(type); cn = CV_MAT_CN(type); if( connectivity == 0 ) connectivity = 4; else if( connectivity != 4 && connectivity != 8 ) CV_Error( CV_StsBadFlag, "Connectivity must be 4, 0(=4) or 8" ); is_simple = mask == 0 && (flags & CV_FLOODFILL_MASK_ONLY) == 0; for( i = 0; i < cn; i++ ) { if( lo_diff.val[i] < 0 || up_diff.val[i] < 0 ) CV_Error( CV_StsBadArg, "lo_diff and up_diff must be non-negative" ); is_simple &= fabs(lo_diff.val[i]) < DBL_EPSILON && fabs(up_diff.val[i]) < DBL_EPSILON; } size = cvGetMatSize( img ); if( (unsigned)seed_point.x >= (unsigned)size.width || (unsigned)seed_point.y >= (unsigned)size.height ) CV_Error( CV_StsOutOfRange, "Seed point is outside of image" ); cvScalarToRawData( &newVal, &nv_buf, type, 0 ); buffer_size = MAX( size.width, size.height ) * 2; buffer.resize( buffer_size ); if( is_simple ) { int elem_size = CV_ELEM_SIZE(type); const uchar* seed_ptr = img->data.ptr + img->step*seed_point.y + elem_size*seed_point.x; for(i = 0; i < elem_size; i++) if (seed_ptr[i] != nv_buf.b[i]) break; if (i != elem_size) { if( type == CV_8UC1 ) icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, nv_buf.b[0], comp, flags, &buffer); else if( type == CV_8UC3 ) icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, cv::Vec3b(nv_buf.b), comp, flags, &buffer); else if( type == CV_32SC1 ) icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, nv_buf.i[0], comp, flags, &buffer); else if( type == CV_32FC1 ) icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, nv_buf.f[0], comp, flags, &buffer); else if( type == CV_32SC3 ) icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, cv::Vec3i(nv_buf.i), comp, flags, &buffer); else if( type == CV_32FC3 ) icvFloodFill_CnIR(img->data.ptr, img->step, size, seed_point, cv::Vec3f(nv_buf.f), comp, flags, &buffer); else CV_Error( CV_StsUnsupportedFormat, "" ); return; } } if( !mask ) { /* created mask will be 8-byte aligned */ tempMask = cvCreateMat( size.height + 2, (size.width + 9) & -8, CV_8UC1 ); mask = tempMask; } else { mask = cvGetMat( mask, &maskstub ); if( !CV_IS_MASK_ARR( mask )) CV_Error( CV_StsBadMask, "" ); if( mask->width != size.width + 2 || mask->height != size.height + 2 ) CV_Error( CV_StsUnmatchedSizes, "mask must be 2 pixel wider " "and 2 pixel taller than filled image" ); } int width = tempMask ? mask->step : size.width + 2; uchar* mask_row = mask->data.ptr + mask->step; memset( mask_row - mask->step, 1, width ); for( i = 1; i <= size.height; i++, mask_row += mask->step ) { if( tempMask ) memset( mask_row, 0, width ); mask_row[0] = mask_row[size.width+1] = (uchar)1; } memset( mask_row, 1, width ); if( depth == CV_8U ) for( i = 0; i < cn; i++ ) { int t = cvFloor(lo_diff.val[i]); ld_buf.b[i] = CV_CAST_8U(t); t = cvFloor(up_diff.val[i]); ud_buf.b[i] = CV_CAST_8U(t); } else if( depth == CV_32S ) for( i = 0; i < cn; i++ ) { int t = cvFloor(lo_diff.val[i]); ld_buf.i[i] = t; t = cvFloor(up_diff.val[i]); ud_buf.i[i] = t; } else if( depth == CV_32F ) for( i = 0; i < cn; i++ ) { ld_buf.f[i] = (float)lo_diff.val[i]; ud_buf.f[i] = (float)up_diff.val[i]; } else CV_Error( CV_StsUnsupportedFormat, "" ); if( type == CV_8UC1 ) icvFloodFillGrad_CnIR<uchar, int, Diff8uC1>( img->data.ptr, img->step, mask->data.ptr, mask->step, size, seed_point, nv_buf.b[0], Diff8uC1(ld_buf.b[0], ud_buf.b[0]), comp, flags, &buffer); else if( type == CV_8UC3 ) icvFloodFillGrad_CnIR<cv::Vec3b, cv::Vec3i, Diff8uC3>( img->data.ptr, img->step, mask->data.ptr, mask->step, size, seed_point, cv::Vec3b(nv_buf.b), Diff8uC3(ld_buf.b, ud_buf.b), comp, flags, &buffer); else if( type == CV_32SC1 ) icvFloodFillGrad_CnIR<int, int, Diff32sC1>( img->data.ptr, img->step, mask->data.ptr, mask->step, size, seed_point, nv_buf.i[0], Diff32sC1(ld_buf.i[0], ud_buf.i[0]), comp, flags, &buffer); else if( type == CV_32SC3 ) icvFloodFillGrad_CnIR<cv::Vec3i, cv::Vec3i, Diff32sC3>( img->data.ptr, img->step, mask->data.ptr, mask->step, size, seed_point, cv::Vec3i(nv_buf.i), Diff32sC3(ld_buf.i, ud_buf.i), comp, flags, &buffer); else if( type == CV_32FC1 ) icvFloodFillGrad_CnIR<float, float, Diff32fC1>( img->data.ptr, img->step, mask->data.ptr, mask->step, size, seed_point, nv_buf.f[0], Diff32fC1(ld_buf.f[0], ud_buf.f[0]), comp, flags, &buffer); else if( type == CV_32FC3 ) icvFloodFillGrad_CnIR<cv::Vec3f, cv::Vec3f, Diff32fC3>( img->data.ptr, img->step, mask->data.ptr, mask->step, size, seed_point, cv::Vec3f(nv_buf.f), Diff32fC3(ld_buf.f, ud_buf.f), comp, flags, &buffer); else CV_Error(CV_StsUnsupportedFormat, ""); }
CV_IMPL void cvMultiplyAcc( const void* arrA, const void* arrB, void* acc, const void* maskarr ) { static CvFuncTable acc_tab; static CvBigFuncTable accmask_tab; static int inittab = 0; CV_FUNCNAME( "cvMultiplyAcc" ); __BEGIN__; int coi1, coi2, coi3; int type; int mat1_step, mat2_step, sum_step, mask_step = 0; CvSize size; CvMat stub1, *mat1 = (CvMat*)arrA; CvMat stub2, *mat2 = (CvMat*)arrB; CvMat sumstub, *sum = (CvMat*)acc; CvMat maskstub, *mask = (CvMat*)maskarr; if( !inittab ) { icvInitAddProductTable( &acc_tab, &accmask_tab ); inittab = 1; } CV_CALL( mat1 = cvGetMat( mat1, &stub1, &coi1 )); CV_CALL( mat2 = cvGetMat( mat2, &stub2, &coi2 )); CV_CALL( sum = cvGetMat( sum, &sumstub, &coi3 )); if( coi1 != 0 || coi2 != 0 || coi3 != 0 ) CV_ERROR( CV_BadCOI, "" ); if( !CV_ARE_CNS_EQ( mat1, mat2 ) || !CV_ARE_CNS_EQ( mat1, sum )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( CV_MAT_DEPTH( sum->type ) != CV_32F ) CV_ERROR( CV_BadDepth, "" ); if( !CV_ARE_SIZES_EQ( mat1, sum ) || !CV_ARE_SIZES_EQ( mat2, sum )) CV_ERROR( CV_StsUnmatchedSizes, "" ); size = cvGetMatSize( mat1 ); type = CV_MAT_TYPE( mat1->type ); mat1_step = mat1->step; mat2_step = mat2->step; sum_step = sum->step; if( !mask ) { CvFunc2D_3A func = (CvFunc2D_3A)acc_tab.fn_2d[CV_MAT_DEPTH(type)]; if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); size.width *= CV_MAT_CN(type); if( CV_IS_MAT_CONT( mat1->type & mat2->type & sum->type )) { size.width *= size.height; mat1_step = mat2_step = sum_step = CV_STUB_STEP; size.height = 1; } IPPI_CALL( func( mat1->data.ptr, mat1_step, mat2->data.ptr, mat2_step, sum->data.ptr, sum_step, size )); } else { CvFunc2D_4A func = (CvFunc2D_4A)accmask_tab.fn_2d[type]; if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); CV_CALL( mask = cvGetMat( mask, &maskstub )); if( !CV_IS_MASK_ARR( mask )) CV_ERROR( CV_StsBadMask, "" ); if( !CV_ARE_SIZES_EQ( mat1, mask )) CV_ERROR( CV_StsUnmatchedSizes, "" ); mask_step = mask->step; if( CV_IS_MAT_CONT( mat1->type & mat2->type & sum->type & mask->type )) { size.width *= size.height; mat1_step = mat2_step = sum_step = mask_step = CV_STUB_STEP; size.height = 1; } IPPI_CALL( func( mat1->data.ptr, mat1_step, mat2->data.ptr, mat2_step, mask->data.ptr, mask_step, sum->data.ptr, sum_step, size )); } __END__; }
CV_IMPL void cvCalcMotionGradient( const CvArr* mhiimg, CvArr* maskimg, CvArr* orientation, double delta1, double delta2, int aperture_size ) { cv::Ptr<CvMat> dX_min, dY_max; CvMat mhistub, *mhi = cvGetMat(mhiimg, &mhistub); CvMat maskstub, *mask = cvGetMat(maskimg, &maskstub); CvMat orientstub, *orient = cvGetMat(orientation, &orientstub); CvMat dX_min_row, dY_max_row, orient_row, mask_row; CvSize size; int x, y; float gradient_epsilon = 1e-4f * aperture_size * aperture_size; float min_delta, max_delta; if( !CV_IS_MASK_ARR( mask )) CV_Error( CV_StsBadMask, "" ); if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 ) CV_Error( CV_StsOutOfRange, "aperture_size must be 3, 5 or 7" ); if( delta1 <= 0 || delta2 <= 0 ) CV_Error( CV_StsOutOfRange, "both delta's must be positive" ); if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 ) CV_Error( CV_StsUnsupportedFormat, "MHI and orientation must be single-channel floating-point images" ); if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi )) CV_Error( CV_StsUnmatchedSizes, "" ); if( orient->data.ptr == mhi->data.ptr ) CV_Error( CV_StsInplaceNotSupported, "orientation image must be different from MHI" ); if( delta1 > delta2 ) { double t; CV_SWAP( delta1, delta2, t ); } size = cvGetMatSize( mhi ); min_delta = (float)delta1; max_delta = (float)delta2; dX_min = cvCreateMat( mhi->rows, mhi->cols, CV_32F ); dY_max = cvCreateMat( mhi->rows, mhi->cols, CV_32F ); // calc Dx and Dy cvSobel( mhi, dX_min, 1, 0, aperture_size ); cvSobel( mhi, dY_max, 0, 1, aperture_size ); cvGetRow( dX_min, &dX_min_row, 0 ); cvGetRow( dY_max, &dY_max_row, 0 ); cvGetRow( orient, &orient_row, 0 ); cvGetRow( mask, &mask_row, 0 ); // calc gradient for( y = 0; y < size.height; y++ ) { dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step; dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step; orient_row.data.ptr = orient->data.ptr + y*orient->step; mask_row.data.ptr = mask->data.ptr + y*mask->step; cvCartToPolar( &dX_min_row, &dY_max_row, 0, &orient_row, 1 ); // make orientation zero where the gradient is very small for( x = 0; x < size.width; x++ ) { float dY = dY_max_row.data.fl[x]; float dX = dX_min_row.data.fl[x]; if( fabs(dX) < gradient_epsilon && fabs(dY) < gradient_epsilon ) { mask_row.data.ptr[x] = 0; orient_row.data.i[x] = 0; } else mask_row.data.ptr[x] = 1; } } cvErode( mhi, dX_min, 0, (aperture_size-1)/2); cvDilate( mhi, dY_max, 0, (aperture_size-1)/2); // mask off pixels which have little motion difference in their neighborhood for( y = 0; y < size.height; y++ ) { dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step; dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step; mask_row.data.ptr = mask->data.ptr + y*mask->step; orient_row.data.ptr = orient->data.ptr + y*orient->step; for( x = 0; x < size.width; x++ ) { float d0 = dY_max_row.data.fl[x] - dX_min_row.data.fl[x]; if( mask_row.data.ptr[x] == 0 || d0 < min_delta || max_delta < d0 ) { mask_row.data.ptr[x] = 0; orient_row.data.i[x] = 0; } } } }
CV_IMPL void cvAcc( const void* arr, void* sumarr, const void* maskarr ) { static CvFuncTable acc_tab; static CvBigFuncTable accmask_tab; static int inittab = 0; CV_FUNCNAME( "cvAcc" ); __BEGIN__; int type, sumdepth; int mat_step, sum_step, mask_step = 0; CvSize size; CvMat stub, *mat = (CvMat*)arr; CvMat sumstub, *sum = (CvMat*)sumarr; CvMat maskstub, *mask = (CvMat*)maskarr; if( !inittab ) { icvInitAddTable( &acc_tab, &accmask_tab ); inittab = 1; } if( !CV_IS_MAT( mat ) || !CV_IS_MAT( sum )) { int coi1 = 0, coi2 = 0; CV_CALL( mat = cvGetMat( mat, &stub, &coi1 )); CV_CALL( sum = cvGetMat( sum, &sumstub, &coi2 )); if( coi1 + coi2 != 0 ) CV_ERROR( CV_BadCOI, "" ); } if( CV_MAT_DEPTH( sum->type ) != CV_32F ) CV_ERROR( CV_BadDepth, "" ); if( !CV_ARE_CNS_EQ( mat, sum )) CV_ERROR( CV_StsUnmatchedFormats, "" ); sumdepth = CV_MAT_DEPTH( sum->type ); if( sumdepth != CV_32F && (maskarr != 0 || sumdepth != CV_64F)) CV_ERROR( CV_BadDepth, "Bad accumulator type" ); if( !CV_ARE_SIZES_EQ( mat, sum )) CV_ERROR( CV_StsUnmatchedSizes, "" ); size = cvGetMatSize( mat ); type = CV_MAT_TYPE( mat->type ); mat_step = mat->step; sum_step = sum->step; if( !mask ) { CvFunc2D_2A func=(CvFunc2D_2A)acc_tab.fn_2d[CV_MAT_DEPTH(type)]; if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "Unsupported type combination" ); size.width *= CV_MAT_CN(type); if( CV_IS_MAT_CONT( mat->type & sum->type )) { size.width *= size.height; mat_step = sum_step = CV_STUB_STEP; size.height = 1; } IPPI_CALL( func( mat->data.ptr, mat_step, sum->data.ptr, sum_step, size )); } else { CvFunc2D_3A func = (CvFunc2D_3A)accmask_tab.fn_2d[type]; if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); CV_CALL( mask = cvGetMat( mask, &maskstub )); if( !CV_IS_MASK_ARR( mask )) CV_ERROR( CV_StsBadMask, "" ); if( !CV_ARE_SIZES_EQ( mat, mask )) CV_ERROR( CV_StsUnmatchedSizes, "" ); mask_step = mask->step; if( CV_IS_MAT_CONT( mat->type & sum->type & mask->type )) { size.width *= size.height; mat_step = sum_step = mask_step = CV_STUB_STEP; size.height = 1; } IPPI_CALL( func( mat->data.ptr, mat_step, mask->data.ptr, mask_step, sum->data.ptr, sum_step, size )); } __END__; }
CV_IMPL void cvGetQuadrangleSubPix( const void* srcarr, void* dstarr, const CvMat* mat ) { static CvFuncTable gq_tab[2]; static int inittab = 0; CV_FUNCNAME( "cvGetQuadrangleSubPix" ); __BEGIN__; CvMat srcstub, *src = (CvMat*)srcarr; CvMat dststub, *dst = (CvMat*)dstarr; CvSize src_size, dst_size; CvGetQuadrangleSubPixFunc func; float m[6]; int k, cn; if( !inittab ) { icvInitGetQuadrangleSubPixC1RTable( gq_tab + 0 ); icvInitGetQuadrangleSubPixC3RTable( gq_tab + 1 ); inittab = 1; } if( !CV_IS_MAT(src)) CV_CALL( src = cvGetMat( src, &srcstub )); if( !CV_IS_MAT(dst)) CV_CALL( dst = cvGetMat( dst, &dststub )); if( !CV_IS_MAT(mat)) CV_ERROR( CV_StsBadArg, "map matrix is not valid" ); cn = CV_MAT_CN( src->type ); if( (cn != 1 && cn != 3) || !CV_ARE_CNS_EQ( src, dst )) CV_ERROR( CV_StsUnsupportedFormat, "" ); src_size = cvGetMatSize( src ); dst_size = cvGetMatSize( dst ); /*if( dst_size.width > src_size.width || dst_size.height > src_size.height ) CV_ERROR( CV_StsBadSize, "destination ROI must not be larger than source ROI" );*/ if( mat->rows != 2 || mat->cols != 3 ) CV_ERROR( CV_StsBadArg, "Transformation matrix must be 2x3" ); if( CV_MAT_TYPE( mat->type ) == CV_32FC1 ) { for( k = 0; k < 3; k++ ) { m[k] = mat->data.fl[k]; m[3 + k] = ((float*)(mat->data.ptr + mat->step))[k]; } } else if( CV_MAT_TYPE( mat->type ) == CV_64FC1 ) { for( k = 0; k < 3; k++ ) { m[k] = (float)mat->data.db[k]; m[3 + k] = (float)((double*)(mat->data.ptr + mat->step))[k]; } } else CV_ERROR( CV_StsUnsupportedFormat, "The transformation matrix should have 32fC1 or 64fC1 type" ); if( CV_ARE_DEPTHS_EQ( src, dst )) { func = (CvGetQuadrangleSubPixFunc)(gq_tab[cn != 1].fn_2d[CV_MAT_DEPTH(src->type)]); } else { if( CV_MAT_DEPTH( src->type ) != CV_8U || CV_MAT_DEPTH( dst->type ) != CV_32F ) CV_ERROR( CV_StsUnsupportedFormat, "" ); func = (CvGetQuadrangleSubPixFunc)(gq_tab[cn != 1].fn_2d[1]); } if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); IPPI_CALL( func( src->data.ptr, src->step, src_size, dst->data.ptr, dst->step, dst_size, m )); __END__; }
CV_IMPL void cvDrlse_edge(CvArr * srcphi, CvArr * srcgrad, CvArr * dstarr, double lambda, double mu, double alfa, double epsilon, int timestep, int iter) { CV_FUNCNAME( "cvDrlse_edge" ); __BEGIN__; CvMat sstub1, sstub2, *phi, *grad; CvMat dstub, *dst; CvMat *gradx=0, *grady=0, *phi_0=0, *phix=0, *phiy=0; CvMat *s=0, *Nx=0, *Ny=0, *curvature=0, *distRegTerm=0; CvMat *diracPhi=0, *areaTerm=0, *edgeTerm=0; CvMat *temp1=0, *temp2=0, *temp3=0, *ones=0; CvSize size; int i; CV_CALL( phi = cvGetMat(srcphi, &sstub1 )); CV_CALL( grad = cvGetMat(srcgrad, &sstub2 )); CV_CALL( dst = cvGetMat(dstarr, &dstub)); if( CV_MAT_TYPE(phi->type) != CV_32FC1) CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" ); if( CV_MAT_TYPE(grad->type) != CV_32FC1) CV_ERROR( CV_StsUnsupportedFormat, "Only-32bit, 1-channel input images are supported" ); if( !CV_ARE_SIZES_EQ( phi, grad )) CV_ERROR( CV_StsUnmatchedSizes, "The input images must have the same size" ); size = cvGetMatSize( phi ); //Initialization gradx = cvCreateMat(size.height, size.width, CV_32FC1 ); grady = cvCreateMat(size.height, size.width, CV_32FC1 ); phi_0 = cvCreateMat(size.height, size.width, CV_32FC1 ); phix = cvCreateMat(size.height, size.width, CV_32FC1 ); phiy = cvCreateMat(size.height, size.width, CV_32FC1 ); Nx = cvCreateMat(size.height, size.width, CV_32FC1 ); Ny = cvCreateMat(size.height, size.width, CV_32FC1 ); s = cvCreateMat(size.height, size.width, CV_32FC1 ); curvature= cvCreateMat(size.height, size.width, CV_32FC1 ); distRegTerm= cvCreateMat(size.height, size.width, CV_32FC1 ); diracPhi = cvCreateMat(size.height, size.width, CV_32FC1 ); areaTerm = cvCreateMat(size.height, size.width, CV_32FC1 ); edgeTerm = cvCreateMat(size.height, size.width, CV_32FC1 ); temp1 = cvCreateMat(size.height, size.width, CV_32FC1 ); temp2 = cvCreateMat(size.height, size.width, CV_32FC1 ); temp3 = cvCreateMat(size.height, size.width, CV_32FC1 ); ones = cvCreateMat(size.height, size.width, CV_32FC1 ); cvSetZero(gradx); cvSetZero(grady); cvSetZero(phix); cvSetZero(phiy); cvSetZero(Nx); cvSetZero(Ny); cvSetZero(s); cvSetZero(curvature); cvSetZero(distRegTerm); cvSetZero(diracPhi); cvSetZero(areaTerm); cvSetZero(edgeTerm); cvSetZero(temp1); cvSetZero(temp2); cvSetZero(temp3); cvSet(ones, cvScalar(1.0f)); //--------------BEGIN---------------------- cvSobel(grad, gradx, 1, 0, 1); cvSobel(grad, grady, 0, 1, 1); cvMul(gradx, ones, gradx, 0.25f); cvMul(grady, ones, grady, 0.25f); cvCopy(phi, dst); for(i=0; i<iter; i++){ cvNeumannBoundCond(dst, dst); cvSobel(dst, phix, 1, 0, 1); cvSobel(dst, phiy, 0, 1, 1); cvCalS(dst,s); cvDiv(phix, s, Nx, 0.25f); cvDiv(phiy, s, Ny, 0.25f); cvCurvature(Nx, Ny, curvature); cvDistReg(dst, distRegTerm); cvDirac(dst, diracPhi, epsilon); //Compute driacPhi; cvMul(diracPhi, grad, areaTerm); //Compute areaTerm cvMul(gradx, Nx, gradx); //------------------// cvMul(grady, Ny, grady); // Computing // cvAdd(gradx, grady, temp1); // // cvMul(diracPhi, temp1, temp2); // edgeTerm // cvMul(areaTerm, curvature, temp3); // // cvAdd(temp2, temp3, edgeTerm); //------------------// cvMul(distRegTerm, ones, distRegTerm, mu); // distRegTerm = mu * distRegTerm cvMul(edgeTerm, ones, edgeTerm, lambda); // edgeTerm = lambda * edgeTerm cvMul(areaTerm, ones, areaTerm, alfa); // areaTerm = alfa * areaTerm cvAdd(distRegTerm, edgeTerm, temp1); cvAdd(temp1, areaTerm, temp2); // (distRegTerm + edgeTerm + areaTerm) cvMul(temp2, ones, temp2, double(timestep)); // timestep * (distRegTerm + edgeTerm + areaTerm) cvAdd(dst, temp2, dst); // phi = phi + timestep * (distRegTerm + edgeTerm + areaTerm) } //----------------END------------------------ // Clean up cvReleaseMat(&ones); cvReleaseMat(&phi_0); cvReleaseMat(&gradx); cvReleaseMat(&grady); cvReleaseMat(&phix); cvReleaseMat(&phiy); cvReleaseMat(&Nx); cvReleaseMat(&Ny); cvReleaseMat(&s); cvReleaseMat(&curvature); cvReleaseMat(&distRegTerm); cvReleaseMat(&diracPhi); cvReleaseMat(&areaTerm); cvReleaseMat(&edgeTerm); cvReleaseMat(&temp1); cvReleaseMat(&temp2); cvReleaseMat(&temp3); __END__; }