CV_IMPL CvSeq* cvConvexHull2( const CvArr* array, void* hull_storage, int orientation, int return_points ) { union { CvContour* c; CvSeq* s; } hull; CvPoint** pointer = 0; CvPoint2D32f** pointerf = 0; int* stack = 0; CV_FUNCNAME( "cvConvexHull2" ); hull.s = 0; __BEGIN__; CvMat* mat = 0; CvSeqReader reader; CvSeqWriter writer; CvContour contour_header; union { CvContour c; CvSeq s; } hull_header; CvSeqBlock block, hullblock; CvSeq* ptseq = 0; CvSeq* hullseq = 0; int is_float; int* t_stack; int t_count; int i, miny_ind = 0, maxy_ind = 0, total; int hulltype; int stop_idx; sklansky_func sklansky; if( CV_IS_SEQ( array )) { ptseq = (CvSeq*)array; if( !CV_IS_SEQ_POINT_SET( ptseq )) CV_ERROR( CV_StsBadArg, "Unsupported sequence type" ); if( hull_storage == 0 ) hull_storage = ptseq->storage; } else { CV_CALL( ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block )); } if( CV_IS_STORAGE( hull_storage )) { if( return_points ) { CV_CALL( hullseq = cvCreateSeq( CV_SEQ_KIND_CURVE|CV_SEQ_ELTYPE(ptseq)| CV_SEQ_FLAG_CLOSED|CV_SEQ_FLAG_CONVEX, sizeof(CvContour), sizeof(CvPoint),(CvMemStorage*)hull_storage )); } else { CV_CALL( hullseq = cvCreateSeq( CV_SEQ_KIND_CURVE|CV_SEQ_ELTYPE_PPOINT| CV_SEQ_FLAG_CLOSED|CV_SEQ_FLAG_CONVEX, sizeof(CvContour), sizeof(CvPoint*), (CvMemStorage*)hull_storage )); } } else { if( !CV_IS_MAT( hull_storage )) CV_ERROR(CV_StsBadArg, "Destination must be valid memory storage or matrix"); mat = (CvMat*)hull_storage; if( mat->cols != 1 && mat->rows != 1 || !CV_IS_MAT_CONT(mat->type)) CV_ERROR( CV_StsBadArg, "The hull matrix should be continuous and have a single row or a single column" ); if( mat->cols + mat->rows - 1 < ptseq->total ) CV_ERROR( CV_StsBadSize, "The hull matrix size might be not enough to fit the hull" ); if( CV_MAT_TYPE(mat->type) != CV_SEQ_ELTYPE(ptseq) && CV_MAT_TYPE(mat->type) != CV_32SC1 ) CV_ERROR( CV_StsUnsupportedFormat, "The hull matrix must have the same type as input or 32sC1 (integers)" ); CV_CALL( hullseq = cvMakeSeqHeaderForArray( CV_SEQ_KIND_CURVE|CV_MAT_TYPE(mat->type)|CV_SEQ_FLAG_CLOSED, sizeof(contour_header), CV_ELEM_SIZE(mat->type), mat->data.ptr, mat->cols + mat->rows - 1, &hull_header.s, &hullblock )); cvClearSeq( hullseq ); } total = ptseq->total; if( total == 0 ) { if( mat ) CV_ERROR( CV_StsBadSize, "Point sequence can not be empty if the output is matrix" ); EXIT; } cvStartAppendToSeq( hullseq, &writer ); is_float = CV_SEQ_ELTYPE(ptseq) == CV_32FC2; hulltype = CV_SEQ_ELTYPE(hullseq); sklansky = !is_float ? (sklansky_func)icvSklansky_32s : (sklansky_func)icvSklansky_32f; CV_CALL( pointer = (CvPoint**)cvAlloc( ptseq->total*sizeof(pointer[0]) )); CV_CALL( stack = (int*)cvAlloc( (ptseq->total + 2)*sizeof(stack[0]) )); pointerf = (CvPoint2D32f**)pointer; cvStartReadSeq( ptseq, &reader ); for( i = 0; i < total; i++ ) { pointer[i] = (CvPoint*)reader.ptr; CV_NEXT_SEQ_ELEM( ptseq->elem_size, reader ); } // sort the point set by x-coordinate, find min and max y if( !is_float ) { icvSortPointsByPointers_32s( pointer, total, 0 ); for( i = 1; i < total; i++ ) { int y = pointer[i]->y; if( pointer[miny_ind]->y > y ) miny_ind = i; if( pointer[maxy_ind]->y < y ) maxy_ind = i; } } else { icvSortPointsByPointers_32f( pointerf, total, 0 ); for( i = 1; i < total; i++ ) { float y = pointerf[i]->y; if( pointerf[miny_ind]->y > y ) miny_ind = i; if( pointerf[maxy_ind]->y < y ) maxy_ind = i; } } if( pointer[0]->x == pointer[total-1]->x && pointer[0]->y == pointer[total-1]->y ) { if( hulltype == CV_SEQ_ELTYPE_PPOINT ) { CV_WRITE_SEQ_ELEM( pointer[0], writer ); } else if( hulltype == CV_SEQ_ELTYPE_INDEX ) { int index = 0; CV_WRITE_SEQ_ELEM( index, writer ); } else { CvPoint pt = pointer[0][0]; CV_WRITE_SEQ_ELEM( pt, writer ); } goto finish_hull; } /*upper half */ { int *tl_stack = stack; int tl_count = sklansky( pointer, 0, maxy_ind, tl_stack, -1, 1 ); int *tr_stack = tl_stack + tl_count; int tr_count = sklansky( pointer, ptseq->total - 1, maxy_ind, tr_stack, -1, -1 ); /* gather upper part of convex hull to output */ if( orientation == CV_COUNTER_CLOCKWISE ) { CV_SWAP( tl_stack, tr_stack, t_stack ); CV_SWAP( tl_count, tr_count, t_count ); } if( hulltype == CV_SEQ_ELTYPE_PPOINT ) { for( i = 0; i < tl_count - 1; i++ ) CV_WRITE_SEQ_ELEM( pointer[tl_stack[i]], writer ); for( i = tr_count - 1; i > 0; i-- ) CV_WRITE_SEQ_ELEM( pointer[tr_stack[i]], writer ); } else if( hulltype == CV_SEQ_ELTYPE_INDEX ) { CV_CALL( icvCalcAndWritePtIndices( pointer, tl_stack, 0, tl_count-1, ptseq, &writer )); CV_CALL( icvCalcAndWritePtIndices( pointer, tr_stack, tr_count-1, 0, ptseq, &writer )); } else { for( i = 0; i < tl_count - 1; i++ ) CV_WRITE_SEQ_ELEM( pointer[tl_stack[i]][0], writer ); for( i = tr_count - 1; i > 0; i-- ) CV_WRITE_SEQ_ELEM( pointer[tr_stack[i]][0], writer ); } stop_idx = tr_count > 2 ? tr_stack[1] : tl_count > 2 ? tl_stack[tl_count - 2] : -1; } /* lower half */ { int *bl_stack = stack; int bl_count = sklansky( pointer, 0, miny_ind, bl_stack, 1, -1 ); int *br_stack = stack + bl_count; int br_count = sklansky( pointer, ptseq->total - 1, miny_ind, br_stack, 1, 1 ); if( orientation != CV_COUNTER_CLOCKWISE ) { CV_SWAP( bl_stack, br_stack, t_stack ); CV_SWAP( bl_count, br_count, t_count ); } if( stop_idx >= 0 ) { int check_idx = bl_count > 2 ? bl_stack[1] : bl_count + br_count > 2 ? br_stack[2-bl_count] : -1; if( check_idx == stop_idx || check_idx >= 0 && pointer[check_idx]->x == pointer[stop_idx]->x && pointer[check_idx]->y == pointer[stop_idx]->y ) { /* if all the points lie on the same line, then the bottom part of the convex hull is the mirrored top part (except the exteme points).*/ bl_count = MIN( bl_count, 2 ); br_count = MIN( br_count, 2 ); } } if( hulltype == CV_SEQ_ELTYPE_PPOINT ) { for( i = 0; i < bl_count - 1; i++ ) CV_WRITE_SEQ_ELEM( pointer[bl_stack[i]], writer ); for( i = br_count - 1; i > 0; i-- ) CV_WRITE_SEQ_ELEM( pointer[br_stack[i]], writer ); } else if( hulltype == CV_SEQ_ELTYPE_INDEX ) { CV_CALL( icvCalcAndWritePtIndices( pointer, bl_stack, 0, bl_count-1, ptseq, &writer )); CV_CALL( icvCalcAndWritePtIndices( pointer, br_stack, br_count-1, 0, ptseq, &writer )); } else { for( i = 0; i < bl_count - 1; i++ ) CV_WRITE_SEQ_ELEM( pointer[bl_stack[i]][0], writer ); for( i = br_count - 1; i > 0; i-- ) CV_WRITE_SEQ_ELEM( pointer[br_stack[i]][0], writer ); } } finish_hull: CV_CALL( cvEndWriteSeq( &writer )); if( mat ) { if( mat->rows > mat->cols ) mat->rows = hullseq->total; else mat->cols = hullseq->total; } else { hull.s = hullseq; hull.c->rect = cvBoundingRect( ptseq, ptseq->header_size < (int)sizeof(CvContour) || &ptseq->flags == &contour_header.flags ); /*if( ptseq != (CvSeq*)&contour_header ) hullseq->v_prev = ptseq;*/ } __END__; cvFree( &pointer ); cvFree( &stack ); return hull.s; }
CV_IMPL CvSeq* cvConvexHull2( const CvArr* array, void* hull_storage, int orientation, int return_points ) { CvMat* mat = 0; CvContour contour_header; CvSeq hull_header; CvSeqBlock block, hullblock; CvSeq* ptseq = 0; CvSeq* hullseq = 0; if( CV_IS_SEQ( array )) { ptseq = (CvSeq*)array; if( !CV_IS_SEQ_POINT_SET( ptseq )) CV_Error( CV_StsBadArg, "Unsupported sequence type" ); if( hull_storage == 0 ) hull_storage = ptseq->storage; } else { ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block ); } bool isStorage = isStorageOrMat(hull_storage); if(isStorage) { if( return_points ) { hullseq = cvCreateSeq(CV_SEQ_KIND_CURVE|CV_SEQ_ELTYPE(ptseq)| CV_SEQ_FLAG_CLOSED|CV_SEQ_FLAG_CONVEX, sizeof(CvContour), sizeof(CvPoint),(CvMemStorage*)hull_storage ); } else { hullseq = cvCreateSeq( CV_SEQ_KIND_CURVE|CV_SEQ_ELTYPE_PPOINT| CV_SEQ_FLAG_CLOSED|CV_SEQ_FLAG_CONVEX, sizeof(CvContour), sizeof(CvPoint*), (CvMemStorage*)hull_storage ); } } else { mat = (CvMat*)hull_storage; if( (mat->cols != 1 && mat->rows != 1) || !CV_IS_MAT_CONT(mat->type)) CV_Error( CV_StsBadArg, "The hull matrix should be continuous and have a single row or a single column" ); if( mat->cols + mat->rows - 1 < ptseq->total ) CV_Error( CV_StsBadSize, "The hull matrix size might be not enough to fit the hull" ); if( CV_MAT_TYPE(mat->type) != CV_SEQ_ELTYPE(ptseq) && CV_MAT_TYPE(mat->type) != CV_32SC1 ) CV_Error( CV_StsUnsupportedFormat, "The hull matrix must have the same type as input or 32sC1 (integers)" ); hullseq = cvMakeSeqHeaderForArray( CV_SEQ_KIND_CURVE|CV_MAT_TYPE(mat->type)|CV_SEQ_FLAG_CLOSED, sizeof(hull_header), CV_ELEM_SIZE(mat->type), mat->data.ptr, mat->cols + mat->rows - 1, &hull_header, &hullblock ); cvClearSeq( hullseq ); } int hulltype = CV_SEQ_ELTYPE(hullseq); int total = ptseq->total; if( total == 0 ) { if( !isStorage ) CV_Error( CV_StsBadSize, "Point sequence can not be empty if the output is matrix" ); return 0; } cv::AutoBuffer<double> _ptbuf; cv::Mat h0; cv::convexHull(cv::cvarrToMat(ptseq, false, false, 0, &_ptbuf), h0, orientation == CV_CLOCKWISE, CV_MAT_CN(hulltype) == 2); if( hulltype == CV_SEQ_ELTYPE_PPOINT ) { const int* idx = h0.ptr<int>(); int ctotal = (int)h0.total(); for( int i = 0; i < ctotal; i++ ) { void* ptr = cvGetSeqElem(ptseq, idx[i]); cvSeqPush( hullseq, &ptr ); } } else cvSeqPushMulti(hullseq, h0.ptr(), (int)h0.total()); if (isStorage) { return hullseq; } else { if( mat->rows > mat->cols ) mat->rows = hullseq->total; else mat->cols = hullseq->total; return 0; } }
CV_IMPL void cvEigenVV( CvArr* srcarr, CvArr* evectsarr, CvArr* evalsarr, double eps ) { CV_FUNCNAME( "cvEigenVV" ); __BEGIN__; CvMat sstub, *src = (CvMat*)srcarr; CvMat estub1, *evects = (CvMat*)evectsarr; CvMat estub2, *evals = (CvMat*)evalsarr; if( !CV_IS_MAT( src )) CV_CALL( src = cvGetMat( src, &sstub )); if( !CV_IS_MAT( evects )) CV_CALL( evects = cvGetMat( evects, &estub1 )); if( !CV_IS_MAT( evals )) CV_CALL( evals = cvGetMat( evals, &estub2 )); if( src->cols != src->rows ) CV_ERROR( CV_StsUnmatchedSizes, "source is not quadratic matrix" ); if( !CV_ARE_SIZES_EQ( src, evects) ) CV_ERROR( CV_StsUnmatchedSizes, "eigenvectors matrix has inappropriate size" ); if( (evals->rows != src->rows || evals->cols != 1) && (evals->cols != src->rows || evals->rows != 1)) CV_ERROR( CV_StsBadSize, "eigenvalues vector has inappropriate size" ); if( !CV_ARE_TYPES_EQ( src, evects ) || !CV_ARE_TYPES_EQ( src, evals )) CV_ERROR( CV_StsUnmatchedFormats, "input matrix, eigenvalues and eigenvectors must have the same type" ); if( !CV_IS_MAT_CONT( src->type & evals->type & evects->type )) CV_ERROR( CV_BadStep, "all the matrices must be continuous" ); if( CV_MAT_TYPE(src->type) == CV_32FC1 ) { IPPI_CALL( icvJacobiEigens_32f( src->data.fl, evects->data.fl, evals->data.fl, src->cols, (float)eps )); } else if( CV_MAT_TYPE(src->type) == CV_64FC1 ) { IPPI_CALL( icvJacobiEigens_64d( src->data.db, evects->data.db, evals->data.db, src->cols, eps )); } else { CV_ERROR( CV_StsUnsupportedFormat, "Only 32fC1 and 64fC1 types are supported" ); } CV_CHECK_NANS( evects ); CV_CHECK_NANS( evals ); __END__; }
CV_IMPL void cvRandShuffle( CvArr* arr, CvRNG* rng, double iter_factor ) { CV_FUNCNAME( "cvRandShuffle" ); __BEGIN__; const int sizeof_int = (int)sizeof(int); CvMat stub, *mat = (CvMat*)arr; int i, j, k, iters, delta = 0; int cont_flag, arr_size, elem_size, cols, step; const int pair_buf_sz = 100; int* pair_buf = (int*)cvStackAlloc( pair_buf_sz*sizeof(pair_buf[0])*2 ); CvMat _pair_buf = cvMat( 1, pair_buf_sz*2, CV_32S, pair_buf ); CvRNG _rng = cvRNG(-1); uchar* data = 0; int* idata = 0; if( !CV_IS_MAT(mat) ) CV_CALL( mat = cvGetMat( mat, &stub )); if( !rng ) rng = &_rng; cols = mat->cols; step = mat->step; arr_size = cols*mat->rows; iters = cvRound(iter_factor*arr_size)*2; cont_flag = CV_IS_MAT_CONT(mat->type); elem_size = CV_ELEM_SIZE(mat->type); if( elem_size % sizeof_int == 0 && (cont_flag || step % sizeof_int == 0) ) { idata = mat->data.i; step /= sizeof_int; elem_size /= sizeof_int; } else data = mat->data.ptr; for( i = 0; i < iters; i += delta ) { delta = MIN( iters - i, pair_buf_sz*2 ); _pair_buf.cols = delta; cvRandArr( rng, &_pair_buf, CV_RAND_UNI, cvRealScalar(0), cvRealScalar(arr_size) ); if( cont_flag ) { if( idata ) for( j = 0; j < delta; j += 2 ) { int* p = idata + pair_buf[j]*elem_size, *q = idata + pair_buf[j+1]*elem_size, t; for( k = 0; k < elem_size; k++ ) CV_SWAP( p[k], q[k], t ); } else for( j = 0; j < delta; j += 2 ) { uchar* p = data + pair_buf[j]*elem_size, *q = data + pair_buf[j+1]*elem_size, t; for( k = 0; k < elem_size; k++ ) CV_SWAP( p[k], q[k], t ); } } else { if( idata ) for( j = 0; j < delta; j += 2 ) { int idx1 = pair_buf[j], idx2 = pair_buf[j+1], row1, row2; int* p, *q, t; row1 = idx1/step; row2 = idx2/step; p = idata + row1*step + (idx1 - row1*cols)*elem_size; q = idata + row2*step + (idx2 - row2*cols)*elem_size; for( k = 0; k < elem_size; k++ ) CV_SWAP( p[k], q[k], t ); } else for( j = 0; j < delta; j += 2 ) { int idx1 = pair_buf[j], idx2 = pair_buf[j+1], row1, row2; uchar* p, *q, t; row1 = idx1/step; row2 = idx2/step; p = data + row1*step + (idx1 - row1*cols)*elem_size; q = data + row2*step + (idx2 - row2*cols)*elem_size; for( k = 0; k < elem_size; k++ ) CV_SWAP( p[k], q[k], t ); } } } __END__; }
CV_IMPL CvArr* cvRange( CvArr* arr, double start, double end ) { int ok = 0; CV_FUNCNAME( "cvRange" ); __BEGIN__; CvMat stub, *mat = (CvMat*)arr; double delta; int type, step; double val = start; int i, j; int rows, cols; if( !CV_IS_MAT(mat) ) CV_CALL( mat = cvGetMat( mat, &stub) ); rows = mat->rows; cols = mat->cols; type = CV_MAT_TYPE(mat->type); delta = (end-start)/(rows*cols); if( CV_IS_MAT_CONT(mat->type) ) { cols *= rows; rows = 1; step = 1; } else step = mat->step / CV_ELEM_SIZE(type); if( type == CV_32SC1 ) { int* idata = mat->data.i; int ival = cvRound(val), idelta = cvRound(delta); if( fabs(val - ival) < DBL_EPSILON && fabs(delta - idelta) < DBL_EPSILON ) { for( i = 0; i < rows; i++, idata += step ) for( j = 0; j < cols; j++, ival += idelta ) idata[j] = ival; } else { for( i = 0; i < rows; i++, idata += step ) for( j = 0; j < cols; j++, val += delta ) idata[j] = cvRound(val); } } else if( type == CV_32FC1 ) { float* fdata = mat->data.fl; for( i = 0; i < rows; i++, fdata += step ) for( j = 0; j < cols; j++, val += delta ) fdata[j] = (float)val; } else CV_ERROR( CV_StsUnsupportedFormat, "The function only supports 32sC1 and 32fC1 datatypes" ); ok = 1; __END__; return ok ? arr : 0; }
CV_IMPL void cvAbsDiff( const void* srcarr1, const void* srcarr2, void* dstarr ) { static CvFuncTable adiff_tab; static int inittab = 0; CV_FUNCNAME( "cvAbsDiff" ); __BEGIN__; int coi1 = 0, coi2 = 0, coi3 = 0; CvMat srcstub1, *src1 = (CvMat*)srcarr1; CvMat srcstub2, *src2 = (CvMat*)srcarr2; CvMat dststub, *dst = (CvMat*)dstarr; int src1_step, src2_step, dst_step; CvSize size; int type; if( !inittab ) { icvInitAbsDiffTable( &adiff_tab ); inittab = 1; } CV_CALL( src1 = cvGetMat( src1, &srcstub1, &coi1 )); CV_CALL( src2 = cvGetMat( src2, &srcstub2, &coi2 )); CV_CALL( dst = cvGetMat( dst, &dststub, &coi3 )); if( coi1 != 0 || coi2 != 0 || coi3 != 0 ) CV_ERROR( CV_BadCOI, "" ); if( !CV_ARE_SIZES_EQ( src1, src2 ) ) CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); size = icvGetMatSize( src1 ); type = CV_MAT_TYPE(src1->type); if( !CV_ARE_SIZES_EQ( src1, dst )) CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); if( !CV_ARE_TYPES_EQ( src1, src2 )) CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); if( !CV_ARE_TYPES_EQ( src1, dst )) CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); size.width *= CV_MAT_CN( type ); src1_step = src1->step; src2_step = src2->step; dst_step = dst->step; if( CV_IS_MAT_CONT( src1->type & src2->type & dst->type )) { size.width *= size.height; size.height = 1; src1_step = src2_step = dst_step = CV_STUB_STEP; } { CvFunc2D_3A func = (CvFunc2D_3A) (adiff_tab.fn_2d[CV_MAT_DEPTH(type)]); if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); IPPI_CALL( func( src1->data.ptr, src1_step, src2->data.ptr, src2_step, dst->data.ptr, dst_step, size )); } __END__; }
//the main function to update the background model static void icvUpdatePixelBackgroundGMM2( const CvArr* srcarr, CvArr* dstarr , CvPBGMMGaussian *pGMM, unsigned char *pUsedModes, //CvGaussBGStatModel2Params* pGMMPar, int nM, float fTb, float fTB, float fTg, float fVarInit, float fVarMax, float fVarMin, float fCT, float fTau, bool bShadowDetection, unsigned char nShadowDetection, float alpha) { CvMat sstub, *src = cvGetMat(srcarr, &sstub); CvMat dstub, *dst = cvGetMat(dstarr, &dstub); CvSize size = cvGetMatSize(src); int nD=CV_MAT_CN(src->type); //reshape if possible if( CV_IS_MAT_CONT(src->type & dst->type) ) { size.width *= size.height; size.height = 1; } int x, y; float data[CV_BGFG_MOG2_NDMAX]; float prune=-alpha*fCT; //general nD if (nD!=3) { switch (CV_MAT_DEPTH(src->type)) { case CV_8U: for( y = 0; y < size.height; y++ ) { uchar* sptr = src->data.ptr + src->step*y; uchar* pDataOutput = dst->data.ptr + dst->step*y; for( x = 0; x < size.width; x++, pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD) { //convert data for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]); //update GMM model int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune); //detect shadows in the foreground if (bShadowDetection) if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau); //generate output (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255; } } break; case CV_16S: for( y = 0; y < size.height; y++ ) { short* sptr = src->data.s + src->step*y; uchar* pDataOutput = dst->data.ptr + dst->step*y; for( x = 0; x < size.width; x++, pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD) { //convert data for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]); //update GMM model int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune); //detect shadows in the foreground if (bShadowDetection) if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau); //generate output (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255; } } break; case CV_16U: for( y = 0; y < size.height; y++ ) { unsigned short* sptr = (unsigned short*) (src->data.s + src->step*y); uchar* pDataOutput = dst->data.ptr + dst->step*y; for( x = 0; x < size.width; x++, pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD) { //convert data for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]); //update GMM model int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune); //detect shadows in the foreground if (bShadowDetection) if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau); //generate output (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255; } } break; case CV_32S: for( y = 0; y < size.height; y++ ) { int* sptr = src->data.i + src->step*y; uchar* pDataOutput = dst->data.ptr + dst->step*y; for( x = 0; x < size.width; x++, pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD) { //convert data for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]); //update GMM model int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune); //detect shadows in the foreground if (bShadowDetection) if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau); //generate output (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255; } } break; case CV_32F: for( y = 0; y < size.height; y++ ) { float* sptr = src->data.fl + src->step*y; uchar* pDataOutput = dst->data.ptr + dst->step*y; for( x = 0; x < size.width; x++, pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD) { //update GMM model int result = _icvUpdateGMM(sptr,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune); //detect shadows in the foreground if (bShadowDetection) if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau); //generate output (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255; } } break; case CV_64F: for( y = 0; y < size.height; y++ ) { double* sptr = src->data.db + src->step*y; uchar* pDataOutput = dst->data.ptr + dst->step*y; for( x = 0; x < size.width; x++, pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD) { //convert data for (int iD=0;iD<nD;iD++) data[iD]=float(sptr[iD]); //update GMM model int result = _icvUpdateGMM(data,nD,pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune); //detect shadows in the foreground if (bShadowDetection) if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau); //generate output (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255; } } break; } }else ///if (nD==3) - a bit faster { switch (CV_MAT_DEPTH(src->type)) { case CV_8U: for( y = 0; y < size.height; y++ ) { uchar* sptr = src->data.ptr + src->step*y; uchar* pDataOutput = dst->data.ptr + dst->step*y; for( x = 0; x < size.width; x++, pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD) { //convert data data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]); //update GMM model int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune); //detect shadows in the foreground if (bShadowDetection) if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau); //generate output (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255; } } break; case CV_16S: for( y = 0; y < size.height; y++ ) { short* sptr = src->data.s + src->step*y; uchar* pDataOutput = dst->data.ptr + dst->step*y; for( x = 0; x < size.width; x++, pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD) { //convert data data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]); //update GMM model int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune); //detect shadows in the foreground if (bShadowDetection) if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau); //generate output (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255; } } break; case CV_16U: for( y = 0; y < size.height; y++ ) { unsigned short* sptr = (unsigned short*) src->data.s + src->step*y; uchar* pDataOutput = dst->data.ptr + dst->step*y; for( x = 0; x < size.width; x++, pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD) { //convert data data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]); //update GMM model int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune); //detect shadows in the foreground if (bShadowDetection) if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau); //generate output (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255; } } break; case CV_32S: for( y = 0; y < size.height; y++ ) { int* sptr = src->data.i + src->step*y; uchar* pDataOutput = dst->data.ptr + dst->step*y; for( x = 0; x < size.width; x++, pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD) { //convert data data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]); //update GMM model int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune); //detect shadows in the foreground if (bShadowDetection) if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau); //generate output (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255; } } break; case CV_32F: for( y = 0; y < size.height; y++ ) { float* sptr = src->data.fl + src->step*y; uchar* pDataOutput = dst->data.ptr + dst->step*y; for( x = 0; x < size.width; x++, pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD) { //update GMM model int result = _icvUpdateGMM_C3(sptr[0],sptr[1],sptr[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune); //detect shadows in the foreground if (bShadowDetection) if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau); //generate output (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255; } } break; case CV_64F: for( y = 0; y < size.height; y++ ) { double* sptr = src->data.db + src->step*y; uchar* pDataOutput = dst->data.ptr + dst->step*y; for( x = 0; x < size.width; x++, pGMM+=nM,pUsedModes++,pDataOutput++,sptr+=nD) { //convert data data[0]=float(sptr[0]),data[1]=float(sptr[1]),data[2]=float(sptr[2]); //update GMM model int result = _icvUpdateGMM_C3(data[0],data[1],data[2],pUsedModes,pGMM,nM,alpha, fTb, fTB, fTg, fVarInit, fVarMax, fVarMin,prune); //detect shadows in the foreground if (bShadowDetection) if (result==0) result= _icvRemoveShadowGMM(data,nD,(*pUsedModes),pGMM,fTb,fTB,fTau); //generate output (* pDataOutput)= (result==1) ? 0 : (result==2) ? (nShadowDetection) : 255; } } break; } }//a bit faster for nD=3; }
/* motion templates */ CV_IMPL void cvUpdateMotionHistory( const void* silhouette, void* mhimg, double timestamp, double mhi_duration ) { CvMat silhstub, *silh = cvGetMat(silhouette, &silhstub); CvMat mhistub, *mhi = cvGetMat(mhimg, &mhistub); if( !CV_IS_MASK_ARR( silh )) CV_Error( CV_StsBadMask, "" ); if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 ) CV_Error( CV_StsUnsupportedFormat, "" ); if( !CV_ARE_SIZES_EQ( mhi, silh )) CV_Error( CV_StsUnmatchedSizes, "" ); CvSize size = cvGetMatSize( mhi ); if( CV_IS_MAT_CONT( mhi->type & silh->type )) { size.width *= size.height; size.height = 1; } float ts = (float)timestamp; float delbound = (float)(timestamp - mhi_duration); int x, y; #if CV_SSE2 volatile bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2); #endif for( y = 0; y < size.height; y++ ) { const uchar* silhData = silh->data.ptr + silh->step*y; float* mhiData = (float*)(mhi->data.ptr + mhi->step*y); x = 0; #if CV_SSE2 if( useSIMD ) { __m128 ts4 = _mm_set1_ps(ts), db4 = _mm_set1_ps(delbound); for( ; x <= size.width - 8; x += 8 ) { __m128i z = _mm_setzero_si128(); __m128i s = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(silhData + x)), z); __m128 s0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(s, z)), s1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(s, z)); __m128 v0 = _mm_loadu_ps(mhiData + x), v1 = _mm_loadu_ps(mhiData + x + 4); __m128 fz = _mm_setzero_ps(); v0 = _mm_and_ps(v0, _mm_cmpge_ps(v0, db4)); v1 = _mm_and_ps(v1, _mm_cmpge_ps(v1, db4)); __m128 m0 = _mm_and_ps(_mm_xor_ps(v0, ts4), _mm_cmpneq_ps(s0, fz)); __m128 m1 = _mm_and_ps(_mm_xor_ps(v1, ts4), _mm_cmpneq_ps(s1, fz)); v0 = _mm_xor_ps(v0, m0); v1 = _mm_xor_ps(v1, m1); _mm_storeu_ps(mhiData + x, v0); _mm_storeu_ps(mhiData + x + 4, v1); } } #endif for( ; x < size.width; x++ ) { float val = mhiData[x]; val = silhData[x] ? ts : val < delbound ? 0 : val; mhiData[x] = val; } } }
CV_IMPL double cvCalcGlobalOrientation( const void* orientation, const void* maskimg, const void* mhiimg, double curr_mhi_timestamp, double mhi_duration ) { double angle = 0; int hist_size = 12; CvHistogram* hist = 0; CV_FUNCNAME( "cvCalcGlobalOrientation" ); __BEGIN__; CvMat mhistub, *mhi = (CvMat*)mhiimg; CvMat maskstub, *mask = (CvMat*)maskimg; CvMat orientstub, *orient = (CvMat*)orientation; void* _orient; float _ranges[] = { 0, 360 }; float* ranges = _ranges; int base_orient; double shift_orient = 0, shift_weight = 0, fbase_orient; double a, b; float delbound; CvMat mhi_row, mask_row, orient_row; int x, y, mhi_rows, mhi_cols; CV_CALL( mhi = cvGetMat( mhi, &mhistub )); CV_CALL( mask = cvGetMat( mask, &maskstub )); CV_CALL( orient = cvGetMat( orient, &orientstub )); if( !CV_IS_MASK_ARR( mask )) CV_ERROR( CV_StsBadMask, "" ); if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 ) CV_ERROR( CV_StsUnsupportedFormat, "MHI and orientation must be single-channel floating-point images" ); if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi )) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( mhi_duration <= 0 ) CV_ERROR( CV_StsOutOfRange, "MHI duration must be positive" ); if( orient->data.ptr == mhi->data.ptr ) CV_ERROR( CV_StsInplaceNotSupported, "orientation image must be different from MHI" ); // calculate histogram of different orientation values CV_CALL( hist = cvCreateHist( 1, &hist_size, CV_HIST_ARRAY, &ranges )); _orient = orient; cvCalcArrHist( &_orient, hist, 0, mask ); // find the maximum index (the dominant orientation) cvGetMinMaxHistValue( hist, 0, 0, 0, &base_orient ); base_orient *= 360/hist_size; // override timestamp with the maximum value in MHI cvMinMaxLoc( mhi, 0, &curr_mhi_timestamp, 0, 0, mask ); // find the shift relative to the dominant orientation as weighted sum of relative angles a = 254. / 255. / mhi_duration; b = 1. - curr_mhi_timestamp * a; fbase_orient = base_orient; delbound = (float)(curr_mhi_timestamp - mhi_duration); mhi_rows = mhi->rows; mhi_cols = mhi->cols; if( CV_IS_MAT_CONT( mhi->type & mask->type & orient->type )) { mhi_cols *= mhi_rows; mhi_rows = 1; } cvGetRow( mhi, &mhi_row, 0 ); cvGetRow( mask, &mask_row, 0 ); cvGetRow( orient, &orient_row, 0 ); /* a = 254/(255*dt) b = 1 - t*a = 1 - 254*t/(255*dur) = (255*dt - 254*t)/(255*dt) = (dt - (t - dt)*254)/(255*dt); -------------------------------------------------------- ax + b = 254*x/(255*dt) + (dt - (t - dt)*254)/(255*dt) = (254*x + dt - (t - dt)*254)/(255*dt) = ((x - (t - dt))*254 + dt)/(255*dt) = (((x - (t - dt))/dt)*254 + 1)/255 = (((x - low_time)/dt)*254 + 1)/255 */ for( y = 0; y < mhi_rows; y++ ) { mhi_row.data.ptr = mhi->data.ptr + mhi->step*y; mask_row.data.ptr = mask->data.ptr + mask->step*y; orient_row.data.ptr = orient->data.ptr + orient->step*y; for( x = 0; x < mhi_cols; x++ ) if( mask_row.data.ptr[x] != 0 && mhi_row.data.fl[x] > delbound ) { /* orient in 0..360, base_orient in 0..360 -> (rel_angle = orient - base_orient) in -360..360. rel_angle is translated to -180..180 */ double weight = mhi_row.data.fl[x] * a + b; int rel_angle = cvRound( orient_row.data.fl[x] - fbase_orient ); rel_angle += (rel_angle < -180 ? 360 : 0); rel_angle += (rel_angle > 180 ? -360 : 0); if( abs(rel_angle) < 90 ) { shift_orient += weight * rel_angle; shift_weight += weight; } } } // add the dominant orientation and the relative shift if( shift_weight == 0 ) shift_weight = 0.01; base_orient = base_orient + cvRound( shift_orient / shift_weight ); base_orient -= (base_orient < 360 ? 0 : 360); base_orient += (base_orient >= 0 ? 0 : 360); angle = base_orient; __END__; cvReleaseHist( &hist ); return angle; }
void CvANN_MLP::calc_activ_func_deriv( CvMat* _xf, CvMat* _df, const double* bias ) const { int i, j, n = _xf->rows, cols = _xf->cols; double* xf = _xf->data.db; double* df = _df->data.db; double scale, scale2 = f_param2; assert( CV_IS_MAT_CONT( _xf->type & _df->type ) ); if( activ_func == IDENTITY ) { for( i = 0; i < n; i++, xf += cols, df += cols ) for( j = 0; j < cols; j++ ) { xf[j] += bias[j]; df[j] = 1; } return; } else if( activ_func == GAUSSIAN ) { scale = -f_param1*f_param1; scale2 *= scale; for( i = 0; i < n; i++, xf += cols, df += cols ) for( j = 0; j < cols; j++ ) { double t = xf[j] + bias[j]; df[j] = t*2*scale2; xf[j] = t*t*scale; } } else { scale = -f_param1; for( i = 0; i < n; i++, xf += cols, df += cols ) for( j = 0; j < cols; j++ ) xf[j] = (xf[j] + bias[j])*scale; } cvExp( _xf, _xf ); n *= cols; xf -= n; df -= n; // ((1+exp(-ax))^-1)'=a*((1+exp(-ax))^-2)*exp(-ax); // ((1-exp(-ax))/(1+exp(-ax)))'=(a*exp(-ax)*(1+exp(-ax)) + a*exp(-ax)*(1-exp(-ax)))/(1+exp(-ax))^2= // 2*a*exp(-ax)/(1+exp(-ax))^2 switch( activ_func ) { case SIGMOID_SYM: scale *= -2*f_param2; for( i = 0; i <= n - 4; i += 4 ) { double x0 = 1.+xf[i], x1 = 1.+xf[i+1], x2 = 1.+xf[i+2], x3 = 1.+xf[i+3]; double a = x0*x1, b = x2*x3, d = 1./(a*b), t0, t1; a *= d; b *= d; t0 = b*x1; t1 = b*x0; df[i] = scale*xf[i]*t0*t0; df[i+1] = scale*xf[i+1]*t1*t1; t0 *= scale2*(2 - x0); t1 *= scale2*(2 - x1); xf[i] = t0; xf[i+1] = t1; t0 = a*x3; t1 = a*x2; df[i+2] = scale*xf[i+2]*t0*t0; df[i+3] = scale*xf[i+3]*t1*t1; t0 *= scale2*(2 - x2); t1 *= scale2*(2 - x3); xf[i+2] = t0; xf[i+3] = t1; } for( ; i < n; i++ ) { double t0 = 1./(1. + xf[i]); double t1 = scale*xf[i]*t0*t0; t0 *= scale2*(1. - xf[i]); df[i] = t1; xf[i] = t0; } break; case GAUSSIAN: for( i = 0; i < n; i++ ) df[i] *= xf[i]; break; default: ; } }
bool CvANN_MLP::prepare_to_train( const CvMat* _inputs, const CvMat* _outputs, const CvMat* _sample_weights, const CvMat* _sample_idx, CvVectors* _ivecs, CvVectors* _ovecs, double** _sw, int _flags ) { bool ok = false; CvMat* sample_idx = 0; CvVectors ivecs, ovecs; double* sw = 0; int count = 0; CV_FUNCNAME( "CvANN_MLP::prepare_to_train" ); ivecs.data.ptr = ovecs.data.ptr = 0; assert( _ivecs && _ovecs ); __BEGIN__; const int* sidx = 0; int i, sw_type = 0, sw_count = 0; int sw_step = 0; double sw_sum = 0; if( !layer_sizes ) CV_ERROR( CV_StsError, "The network has not been created. Use method create or the appropriate constructor" ); if( !CV_IS_MAT(_inputs) || CV_MAT_TYPE(_inputs->type) != CV_32FC1 && CV_MAT_TYPE(_inputs->type) != CV_64FC1 || _inputs->cols != layer_sizes->data.i[0] ) CV_ERROR( CV_StsBadArg, "input training data should be a floating-point matrix with" "the number of rows equal to the number of training samples and " "the number of columns equal to the size of 0-th (input) layer" ); if( !CV_IS_MAT(_outputs) || CV_MAT_TYPE(_outputs->type) != CV_32FC1 && CV_MAT_TYPE(_outputs->type) != CV_64FC1 || _outputs->cols != layer_sizes->data.i[layer_sizes->cols - 1] ) CV_ERROR( CV_StsBadArg, "output training data should be a floating-point matrix with" "the number of rows equal to the number of training samples and " "the number of columns equal to the size of last (output) layer" ); if( _inputs->rows != _outputs->rows ) CV_ERROR( CV_StsUnmatchedSizes, "The numbers of input and output samples do not match" ); if( _sample_idx ) { CV_CALL( sample_idx = cvPreprocessIndexArray( _sample_idx, _inputs->rows )); sidx = sample_idx->data.i; count = sample_idx->cols + sample_idx->rows - 1; } else count = _inputs->rows; if( _sample_weights ) { if( !CV_IS_MAT(_sample_weights) ) CV_ERROR( CV_StsBadArg, "sample_weights (if passed) must be a valid matrix" ); sw_type = CV_MAT_TYPE(_sample_weights->type); sw_count = _sample_weights->cols + _sample_weights->rows - 1; if( sw_type != CV_32FC1 && sw_type != CV_64FC1 || _sample_weights->cols != 1 && _sample_weights->rows != 1 || sw_count != count && sw_count != _inputs->rows ) CV_ERROR( CV_StsBadArg, "sample_weights must be 1d floating-point vector containing weights " "of all or selected training samples" ); sw_step = CV_IS_MAT_CONT(_sample_weights->type) ? 1 : _sample_weights->step/CV_ELEM_SIZE(sw_type); CV_CALL( sw = (double*)cvAlloc( count*sizeof(sw[0]) )); } CV_CALL( ivecs.data.ptr = (uchar**)cvAlloc( count*sizeof(ivecs.data.ptr[0]) )); CV_CALL( ovecs.data.ptr = (uchar**)cvAlloc( count*sizeof(ovecs.data.ptr[0]) )); ivecs.type = CV_MAT_TYPE(_inputs->type); ovecs.type = CV_MAT_TYPE(_outputs->type); ivecs.count = ovecs.count = count; for( i = 0; i < count; i++ ) { int idx = sidx ? sidx[i] : i; ivecs.data.ptr[i] = _inputs->data.ptr + idx*_inputs->step; ovecs.data.ptr[i] = _outputs->data.ptr + idx*_outputs->step; if( sw ) { int si = sw_count == count ? i : idx; double w = sw_type == CV_32FC1 ? (double)_sample_weights->data.fl[si*sw_step] : _sample_weights->data.db[si*sw_step]; sw[i] = w; if( w < 0 ) CV_ERROR( CV_StsOutOfRange, "some of sample weights are negative" ); sw_sum += w; } } // normalize weights if( sw ) { sw_sum = sw_sum > DBL_EPSILON ? 1./sw_sum : 0; for( i = 0; i < count; i++ ) sw[i] *= sw_sum; } calc_input_scale( &ivecs, _flags ); CV_CALL( calc_output_scale( &ovecs, _flags )); ok = true; __END__; if( !ok ) { cvFree( &ivecs.data.ptr ); cvFree( &ovecs.data.ptr ); cvFree( &sw ); } cvReleaseMat( &sample_idx ); *_ivecs = ivecs; *_ovecs = ovecs; *_sw = sw; return ok; }
void CvANN_MLP::calc_activ_func( CvMat* sums, const double* bias ) const { int i, j, n = sums->rows, cols = sums->cols; double* data = sums->data.db; double scale = 0, scale2 = f_param2; switch( activ_func ) { case IDENTITY: scale = 1.; break; case SIGMOID_SYM: scale = -f_param1; break; case GAUSSIAN: scale = -f_param1*f_param1; break; default: ; } assert( CV_IS_MAT_CONT(sums->type) ); if( activ_func != GAUSSIAN ) { for( i = 0; i < n; i++, data += cols ) for( j = 0; j < cols; j++ ) data[j] = (data[j] + bias[j])*scale; if( activ_func == IDENTITY ) return; } else { for( i = 0; i < n; i++, data += cols ) for( j = 0; j < cols; j++ ) { double t = data[j] + bias[j]; data[j] = t*t*scale; } } cvExp( sums, sums ); n *= cols; data -= n; switch( activ_func ) { case SIGMOID_SYM: for( i = 0; i <= n - 4; i += 4 ) { double x0 = 1.+data[i], x1 = 1.+data[i+1], x2 = 1.+data[i+2], x3 = 1.+data[i+3]; double a = x0*x1, b = x2*x3, d = scale2/(a*b), t0, t1; a *= d; b *= d; t0 = (2 - x0)*b*x1; t1 = (2 - x1)*b*x0; data[i] = t0; data[i+1] = t1; t0 = (2 - x2)*a*x3; t1 = (2 - x3)*a*x2; data[i+2] = t0; data[i+3] = t1; } for( ; i < n; i++ ) { double t = scale2*(1. - data[i])/(1. + data[i]); data[i] = t; } break; case GAUSSIAN: for( i = 0; i < n; i++ ) data[i] = scale2*data[i]; break; default: ; } }
CV_IMPL int cvCountNonZero( const CvArr* arr ) { static CvFuncTable nz_tab; static CvFuncTable nzcoi_tab; static int inittab = 0; int count = 0; CV_FUNCNAME("cvCountNonZero"); __BEGIN__; int type, coi = 0; int mat_step; CvSize size; CvMat stub, *mat = (CvMat*)arr; if( !inittab ) { icvInitCountNonZeroC1RTable( &nz_tab ); icvInitCountNonZeroCnCRTable( &nzcoi_tab ); inittab = 1; } if( !CV_IS_MAT(mat) ) { if( CV_IS_MATND(mat) ) { void* matnd = (void*)arr; CvMatND nstub; CvNArrayIterator iterator; CvFunc2D_1A1P func; CV_CALL( cvInitNArrayIterator( 1, &matnd, 0, &nstub, &iterator )); type = CV_MAT_TYPE(iterator.hdr[0]->type); if( CV_MAT_CN(type) != 1 ) CV_ERROR( CV_BadNumChannels, "Only single-channel array are supported here" ); func = (CvFunc2D_1A1P)(nz_tab.fn_2d[CV_MAT_DEPTH(type)]); if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); do { int temp; IPPI_CALL( func( iterator.ptr[0], CV_STUB_STEP, iterator.size, &temp )); count += temp; } while( cvNextNArraySlice( &iterator )); EXIT; } else CV_CALL( mat = cvGetMat( mat, &stub, &coi )); } type = CV_MAT_TYPE(mat->type); size = cvGetMatSize( mat ); mat_step = mat->step; if( CV_IS_MAT_CONT( mat->type )) { size.width *= size.height; size.height = 1; mat_step = CV_STUB_STEP; } if( CV_MAT_CN(type) == 1 || coi == 0 ) { CvFunc2D_1A1P func = (CvFunc2D_1A1P)(nz_tab.fn_2d[CV_MAT_DEPTH(type)]); if( CV_MAT_CN(type) != 1 ) CV_ERROR( CV_BadNumChannels, "The function can handle only a single channel at a time (use COI)"); if( !func ) CV_ERROR( CV_StsBadArg, cvUnsupportedFormat ); IPPI_CALL( func( mat->data.ptr, mat_step, size, &count )); } else { CvFunc2DnC_1A1P func = (CvFunc2DnC_1A1P)(nzcoi_tab.fn_2d[CV_MAT_DEPTH(type)]); if( !func ) CV_ERROR( CV_StsBadArg, cvUnsupportedFormat ); IPPI_CALL( func( mat->data.ptr, mat_step, size, CV_MAT_CN(type), coi, &count )); } __END__; return count; }
CV_IMPL CvScalar cvSum( const CvArr* arr ) { static CvBigFuncTable sum_tab; static CvFuncTable sumcoi_tab; static int inittab = 0; CvScalar sum = {{0,0,0,0}}; CV_FUNCNAME("cvSum"); __BEGIN__; int type, coi = 0; int mat_step; CvSize size; CvMat stub, *mat = (CvMat*)arr; if( !inittab ) { icvInitSumRTable( &sum_tab ); icvInitSumCnCRTable( &sumcoi_tab ); inittab = 1; } if( !CV_IS_MAT(mat) ) { if( CV_IS_MATND(mat) ) { void* matnd = (void*)mat; CvMatND nstub; CvNArrayIterator iterator; int pass_hint; CV_CALL( cvInitNArrayIterator( 1, &matnd, 0, &nstub, &iterator )); type = CV_MAT_TYPE(iterator.hdr[0]->type); if( CV_MAT_CN(type) > 4 ) CV_ERROR( CV_StsOutOfRange, "The input array must have at most 4 channels" ); pass_hint = CV_MAT_DEPTH(type) == CV_32F; if( !pass_hint ) { CvFunc2D_1A1P func = (CvFunc2D_1A1P)(sum_tab.fn_2d[type]); if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); do { CvScalar temp = {{0,0,0,0}}; IPPI_CALL( func( iterator.ptr[0], CV_STUB_STEP, iterator.size, temp.val )); sum.val[0] += temp.val[0]; sum.val[1] += temp.val[1]; sum.val[2] += temp.val[2]; sum.val[3] += temp.val[3]; } while( cvNextNArraySlice( &iterator )); } else { CvFunc2D_1A1P1I func = (CvFunc2D_1A1P1I)(sum_tab.fn_2d[type]); if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); do { CvScalar temp = {{0,0,0,0}}; IPPI_CALL( func( iterator.ptr[0], CV_STUB_STEP, iterator.size, temp.val, cvAlgHintAccurate )); sum.val[0] += temp.val[0]; sum.val[1] += temp.val[1]; sum.val[2] += temp.val[2]; sum.val[3] += temp.val[3]; } while( cvNextNArraySlice( &iterator )); } EXIT; } else CV_CALL( mat = cvGetMat( mat, &stub, &coi )); } type = CV_MAT_TYPE(mat->type); size = cvGetMatSize( mat ); mat_step = mat->step; if( CV_IS_MAT_CONT( mat->type )) { size.width *= size.height; if( size.width <= CV_MAX_INLINE_MAT_OP_SIZE ) { if( type == CV_32FC1 ) { float* data = mat->data.fl; do { sum.val[0] += data[size.width - 1]; } while( --size.width ); EXIT; } if( type == CV_64FC1 ) { double* data = mat->data.db; do { sum.val[0] += data[size.width - 1]; } while( --size.width ); EXIT; } } size.height = 1; mat_step = CV_STUB_STEP; } if( CV_MAT_CN(type) == 1 || coi == 0 ) { int pass_hint = CV_MAT_DEPTH(type) == CV_32F; if( CV_MAT_CN(type) > 4 ) CV_ERROR( CV_StsOutOfRange, "The input array must have at most 4 channels" ); if( !pass_hint ) { CvFunc2D_1A1P func = (CvFunc2D_1A1P)(sum_tab.fn_2d[type]); if( !func ) CV_ERROR( CV_StsBadArg, cvUnsupportedFormat ); IPPI_CALL( func( mat->data.ptr, mat_step, size, sum.val )); } else { CvFunc2D_1A1P1I func = (CvFunc2D_1A1P1I)(sum_tab.fn_2d[type]); if( !func ) CV_ERROR( CV_StsBadArg, cvUnsupportedFormat ); IPPI_CALL( func( mat->data.ptr, mat_step, size, sum.val, cvAlgHintAccurate )); } } else { CvFunc2DnC_1A1P func = (CvFunc2DnC_1A1P)(sumcoi_tab.fn_2d[CV_MAT_DEPTH(type)]); if( !func ) CV_ERROR( CV_StsBadArg, cvUnsupportedFormat ); IPPI_CALL( func( mat->data.ptr, mat_step, size, CV_MAT_CN(type), coi, sum.val )); } __END__; return sum; }
/* it must have more than 3 points */ CV_IMPL CvSeq* cvConvexityDefects( const CvArr* array, const CvArr* hullarray, CvMemStorage* storage ) { CvSeq* defects = 0; CV_FUNCNAME( "cvConvexityDefects" ); __BEGIN__; int i, index; CvPoint* hull_cur; /* is orientation of hull different from contour one */ int rev_orientation; CvContour contour_header; union { CvContour c; CvSeq s; } hull_header; CvSeqBlock block, hullblock; CvSeq *ptseq = (CvSeq*)array, *hull = (CvSeq*)hullarray; CvSeqReader hull_reader; CvSeqReader ptseq_reader; CvSeqWriter writer; int is_index; if( CV_IS_SEQ( ptseq )) { if( !CV_IS_SEQ_POINT_SET( ptseq )) CV_ERROR( CV_StsUnsupportedFormat, "Input sequence is not a sequence of points" ); if( !storage ) storage = ptseq->storage; } else { CV_CALL( ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block )); } if( CV_SEQ_ELTYPE( ptseq ) != CV_32SC2 ) CV_ERROR( CV_StsUnsupportedFormat, "Floating-point coordinates are not supported here" ); if( CV_IS_SEQ( hull )) { int hulltype = CV_SEQ_ELTYPE( hull ); if( hulltype != CV_SEQ_ELTYPE_PPOINT && hulltype != CV_SEQ_ELTYPE_INDEX ) CV_ERROR( CV_StsUnsupportedFormat, "Convex hull must represented as a sequence " "of indices or sequence of pointers" ); if( !storage ) storage = hull->storage; } else { CvMat* mat = (CvMat*)hull; if( !CV_IS_MAT( hull )) CV_ERROR(CV_StsBadArg, "Convex hull is neither sequence nor matrix"); if( mat->cols != 1 && mat->rows != 1 || !CV_IS_MAT_CONT(mat->type) || CV_MAT_TYPE(mat->type) != CV_32SC1 ) CV_ERROR( CV_StsBadArg, "The matrix should be 1-dimensional and continuous array of int's" ); if( mat->cols + mat->rows - 1 > ptseq->total ) CV_ERROR( CV_StsBadSize, "Convex hull is larger than the point sequence" ); CV_CALL( hull = cvMakeSeqHeaderForArray( CV_SEQ_KIND_CURVE|CV_MAT_TYPE(mat->type)|CV_SEQ_FLAG_CLOSED, sizeof(CvContour), CV_ELEM_SIZE(mat->type), mat->data.ptr, mat->cols + mat->rows - 1, &hull_header.s, &hullblock )); } is_index = CV_SEQ_ELTYPE(hull) == CV_SEQ_ELTYPE_INDEX; if( !storage ) CV_ERROR( CV_StsNullPtr, "NULL storage pointer" ); CV_CALL( defects = cvCreateSeq( CV_SEQ_KIND_GENERIC, sizeof(CvSeq), sizeof(CvConvexityDefect), storage )); if( ptseq->total < 4 || hull->total < 3) { //CV_ERROR( CV_StsBadSize, // "point seq size must be >= 4, convex hull size must be >= 3" ); EXIT; } /* recognize co-orientation of ptseq and its hull */ { int sign = 0; int index1, index2, index3; if( !is_index ) { CvPoint* pos = *CV_SEQ_ELEM( hull, CvPoint*, 0 ); CV_CALL( index1 = cvSeqElemIdx( ptseq, pos )); pos = *CV_SEQ_ELEM( hull, CvPoint*, 1 ); CV_CALL( index2 = cvSeqElemIdx( ptseq, pos )); pos = *CV_SEQ_ELEM( hull, CvPoint*, 2 ); CV_CALL( index3 = cvSeqElemIdx( ptseq, pos )); } else {
CV_IMPL void cvNormalize( const CvArr* src, CvArr* dst, double a, double b, int norm_type, const CvArr* mask ) { CvMat* tmp = 0; CV_FUNCNAME( "cvNormalize" ); __BEGIN__; double scale, shift; if( norm_type == CV_MINMAX ) { double smin = 0, smax = 0; double dmin = MIN( a, b ), dmax = MAX( a, b ); cvMinMaxLoc( src, &smin, &smax, 0, 0, mask ); scale = (dmax - dmin)*(smax - smin > DBL_EPSILON ? 1./(smax - smin) : 0); shift = dmin - smin*scale; } else if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C ) { CvMat *s = (CvMat*)src, *d = (CvMat*)dst; if( CV_IS_MAT(s) && CV_IS_MAT(d) && CV_IS_MAT_CONT(s->type & d->type) && CV_ARE_TYPES_EQ(s,d) && CV_ARE_SIZES_EQ(s,d) && !mask && s->cols*s->rows <= CV_MAX_INLINE_MAT_OP_SIZE*CV_MAX_INLINE_MAT_OP_SIZE ) { int i, len = s->cols*s->rows; double norm = 0, v; if( CV_MAT_TYPE(s->type) == CV_32FC1 ) { const float* sptr = s->data.fl; float* dptr = d->data.fl; if( norm_type == CV_L2 ) { for( i = 0; i < len; i++ ) { v = sptr[i]; norm += v*v; } norm = sqrt(norm); } else if( norm_type == CV_L1 ) for( i = 0; i < len; i++ ) { v = fabs((double)sptr[i]); norm += v; } else for( i = 0; i < len; i++ ) { v = fabs((double)sptr[i]); norm = MAX(norm,v); } norm = norm > DBL_EPSILON ? 1./norm : 0.; for( i = 0; i < len; i++ ) dptr[i] = (float)(sptr[i]*norm); EXIT; } if( CV_MAT_TYPE(s->type) == CV_64FC1 ) { const double* sptr = s->data.db; double* dptr = d->data.db; if( norm_type == CV_L2 ) { for( i = 0; i < len; i++ ) { v = sptr[i]; norm += v*v; } norm = sqrt(norm); } else if( norm_type == CV_L1 ) for( i = 0; i < len; i++ ) { v = fabs(sptr[i]); norm += v; } else for( i = 0; i < len; i++ ) { v = fabs(sptr[i]); norm = MAX(norm,v); } norm = norm > DBL_EPSILON ? 1./norm : 0.; for( i = 0; i < len; i++ ) dptr[i] = sptr[i]*norm; EXIT; } } scale = cvNorm( src, 0, norm_type, mask ); scale = scale > DBL_EPSILON ? 1./scale : 0.; shift = 0; } else CV_ERROR( CV_StsBadArg, "Unknown/unsupported norm type" ); if( !mask ) cvConvertScale( src, dst, scale, shift ); else { CvMat stub, *dmat; CV_CALL( dmat = cvGetMat(dst, &stub)); CV_CALL( tmp = cvCreateMat(dmat->rows, dmat->cols, dmat->type) ); cvConvertScale( src, tmp, scale, shift ); cvCopy( tmp, dst, mask ); } __END__; if( tmp ) cvReleaseMat( &tmp ); }
CV_IMPL double cvThreshold( const void* srcarr, void* dstarr, double thresh, double maxval, int type ) { CvHistogram* hist = 0; CV_FUNCNAME( "cvThreshold" ); __BEGIN__; CvSize roi; int src_step, dst_step; CvMat src_stub, *src = (CvMat*)srcarr; CvMat dst_stub, *dst = (CvMat*)dstarr; CvMat src0, dst0; int coi1 = 0, coi2 = 0; int ithresh, imaxval, cn; bool use_otsu; CV_CALL( src = cvGetMat( src, &src_stub, &coi1 )); CV_CALL( dst = cvGetMat( dst, &dst_stub, &coi2 )); if( coi1 + coi2 ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" ); if( !CV_ARE_CNS_EQ( src, dst ) ) CV_ERROR( CV_StsUnmatchedFormats, "Both arrays must have equal number of channels" ); cn = CV_MAT_CN(src->type); if( cn > 1 ) { src = cvReshape( src, &src0, 1 ); dst = cvReshape( dst, &dst0, 1 ); } use_otsu = (type & ~CV_THRESH_MASK) == CV_THRESH_OTSU; type &= CV_THRESH_MASK; if( use_otsu ) { float _ranges[] = { 0, 256 }; float* ranges = _ranges; int hist_size = 256; void* srcarr0 = src; if( CV_MAT_TYPE(src->type) != CV_8UC1 ) CV_ERROR( CV_StsNotImplemented, "Otsu method can only be used with 8uC1 images" ); CV_CALL( hist = cvCreateHist( 1, &hist_size, CV_HIST_ARRAY, &ranges )); cvCalcArrHist( &srcarr0, hist ); thresh = cvFloor(icvGetThreshVal_Otsu( hist )); } if( !CV_ARE_DEPTHS_EQ( src, dst ) ) { if( CV_MAT_TYPE(dst->type) != CV_8UC1 ) CV_ERROR( CV_StsUnsupportedFormat, "In case of different types destination should be 8uC1" ); if( type != CV_THRESH_BINARY && type != CV_THRESH_BINARY_INV ) CV_ERROR( CV_StsBadArg, "In case of different types only CV_THRESH_BINARY " "and CV_THRESH_BINARY_INV thresholding types are supported" ); if( maxval < 0 ) { CV_CALL( cvSetZero( dst )); } else { CV_CALL( cvCmpS( src, thresh, dst, type == CV_THRESH_BINARY ? CV_CMP_GT : CV_CMP_LE )); if( maxval < 255 ) CV_CALL( cvAndS( dst, cvScalarAll( maxval ), dst )); } EXIT; } if( !CV_ARE_SIZES_EQ( src, dst ) ) CV_ERROR( CV_StsUnmatchedSizes, "" ); roi = cvGetMatSize( src ); if( CV_IS_MAT_CONT( src->type & dst->type )) { roi.width *= roi.height; roi.height = 1; src_step = dst_step = CV_STUB_STEP; } else { src_step = src->step; dst_step = dst->step; } switch( CV_MAT_DEPTH(src->type) ) { case CV_8U: ithresh = cvFloor(thresh); imaxval = cvRound(maxval); if( type == CV_THRESH_TRUNC ) imaxval = ithresh; imaxval = CV_CAST_8U(imaxval); if( ithresh < 0 || ithresh >= 255 ) { if( type == CV_THRESH_BINARY || type == CV_THRESH_BINARY_INV || ((type == CV_THRESH_TRUNC || type == CV_THRESH_TOZERO_INV) && ithresh < 0) || (type == CV_THRESH_TOZERO && ithresh >= 255) ) { int v = type == CV_THRESH_BINARY ? (ithresh >= 255 ? 0 : imaxval) : type == CV_THRESH_BINARY_INV ? (ithresh >= 255 ? imaxval : 0) : type == CV_THRESH_TRUNC ? imaxval : 0; cvSet( dst, cvScalarAll(v) ); EXIT; } else { cvCopy( src, dst ); EXIT; } } if( type == CV_THRESH_BINARY || type == CV_THRESH_BINARY_INV ) { if( icvCompareC_8u_C1R_cv_p && icvAndC_8u_C1R_p ) { IPPI_CALL( icvCompareC_8u_C1R_cv_p( src->data.ptr, src_step, (uchar)ithresh, dst->data.ptr, dst_step, roi, type == CV_THRESH_BINARY ? cvCmpGreater : cvCmpLessEq )); if( imaxval < 255 ) IPPI_CALL( icvAndC_8u_C1R_p( dst->data.ptr, dst_step, (uchar)imaxval, dst->data.ptr, dst_step, roi )); EXIT; } } else if( type == CV_THRESH_TRUNC || type == CV_THRESH_TOZERO_INV ) { if( icvThreshold_GTVal_8u_C1R_p ) { IPPI_CALL( icvThreshold_GTVal_8u_C1R_p( src->data.ptr, src_step, dst->data.ptr, dst_step, roi, (uchar)ithresh, (uchar)(type == CV_THRESH_TRUNC ? ithresh : 0) )); EXIT; } } else { assert( type == CV_THRESH_TOZERO ); if( icvThreshold_LTVal_8u_C1R_p ) { ithresh = cvFloor(thresh+1.); ithresh = CV_CAST_8U(ithresh); IPPI_CALL( icvThreshold_LTVal_8u_C1R_p( src->data.ptr, src_step, dst->data.ptr, dst_step, roi, (uchar)ithresh, 0 )); EXIT; } } icvThresh_8u_C1R( src->data.ptr, src_step, dst->data.ptr, dst_step, roi, (uchar)ithresh, (uchar)imaxval, type ); break; case CV_32F: if( type == CV_THRESH_TRUNC || type == CV_THRESH_TOZERO_INV ) { if( icvThreshold_GTVal_32f_C1R_p ) { IPPI_CALL( icvThreshold_GTVal_32f_C1R_p( src->data.fl, src_step, dst->data.fl, dst_step, roi, (float)thresh, type == CV_THRESH_TRUNC ? (float)thresh : 0 )); EXIT; } } else if( type == CV_THRESH_TOZERO ) { if( icvThreshold_LTVal_32f_C1R_p ) { IPPI_CALL( icvThreshold_LTVal_32f_C1R_p( src->data.fl, src_step, dst->data.fl, dst_step, roi, (float)(thresh*(1 + FLT_EPSILON)), 0 )); EXIT; } } icvThresh_32f_C1R( src->data.fl, src_step, dst->data.fl, dst_step, roi, (float)thresh, (float)maxval, type ); break; default: CV_ERROR( CV_BadDepth, cvUnsupportedFormat ); } __END__; if( hist ) cvReleaseHist( &hist ); return thresh; }
CV_IMPL void cvKMeans2( const CvArr* samples_arr, int cluster_count, CvArr* labels_arr, CvTermCriteria termcrit ) { CvMat* centers = 0; CvMat* old_centers = 0; CvMat* counters = 0; CV_FUNCNAME( "cvKMeans2" ); __BEGIN__; CvMat samples_stub, labels_stub; CvMat* samples = (CvMat*)samples_arr; CvMat* labels = (CvMat*)labels_arr; CvMat* temp = 0; CvRNG rng = CvRNG(-1); int i, j, k, sample_count, dims; int ids_delta, iter; double max_dist; if( !CV_IS_MAT( samples )) CV_CALL( samples = cvGetMat( samples, &samples_stub )); if( !CV_IS_MAT( labels )) CV_CALL( labels = cvGetMat( labels, &labels_stub )); if( cluster_count < 1 ) CV_ERROR( CV_StsOutOfRange, "Number of clusters should be positive" ); if( CV_MAT_DEPTH(samples->type) != CV_32F || CV_MAT_TYPE(labels->type) != CV_32SC1 ) CV_ERROR( CV_StsUnsupportedFormat, "samples should be floating-point matrix, cluster_idx - integer vector" ); if( labels->rows != 1 && (labels->cols != 1 || !CV_IS_MAT_CONT(labels->type)) || labels->rows + labels->cols - 1 != samples->rows ) CV_ERROR( CV_StsUnmatchedSizes, "cluster_idx should be 1D vector of the same number of elements as samples' number of rows" ); CV_CALL( termcrit = cvCheckTermCriteria( termcrit, 1e-6, 100 )); termcrit.epsilon *= termcrit.epsilon; sample_count = samples->rows; if( cluster_count > sample_count ) cluster_count = sample_count; dims = samples->cols*CV_MAT_CN(samples->type); ids_delta = labels->step ? labels->step/(int)sizeof(int) : 1; CV_CALL( centers = cvCreateMat( cluster_count, dims, CV_64FC1 )); CV_CALL( old_centers = cvCreateMat( cluster_count, dims, CV_64FC1 )); CV_CALL( counters = cvCreateMat( 1, cluster_count, CV_32SC1 )); // init centers for( i = 0; i < sample_count; i++ ) labels->data.i[i] = cvRandInt(&rng) % cluster_count; counters->cols = cluster_count; // cut down counters max_dist = termcrit.epsilon*2; for( iter = 0; iter < termcrit.max_iter; iter++ ) { // computer centers cvZero( centers ); cvZero( counters ); for( i = 0; i < sample_count; i++ ) { float* s = (float*)(samples->data.ptr + i*samples->step); k = labels->data.i[i*ids_delta]; double* c = (double*)(centers->data.ptr + k*centers->step); for( j = 0; j <= dims - 4; j += 4 ) { double t0 = c[j] + s[j]; double t1 = c[j+1] + s[j+1]; c[j] = t0; c[j+1] = t1; t0 = c[j+2] + s[j+2]; t1 = c[j+3] + s[j+3]; c[j+2] = t0; c[j+3] = t1; } for( ; j < dims; j++ ) c[j] += s[j]; counters->data.i[k]++; } if( iter > 0 ) max_dist = 0; for( k = 0; k < cluster_count; k++ ) { double* c = (double*)(centers->data.ptr + k*centers->step); if( counters->data.i[k] != 0 ) { double scale = 1./counters->data.i[k]; for( j = 0; j < dims; j++ ) c[j] *= scale; } else { i = cvRandInt( &rng ) % sample_count; float* s = (float*)(samples->data.ptr + i*samples->step); for( j = 0; j < dims; j++ ) c[j] = s[j]; } if( iter > 0 ) { double dist = 0; double* c_o = (double*)(old_centers->data.ptr + k*old_centers->step); for( j = 0; j < dims; j++ ) { double t = c[j] - c_o[j]; dist += t*t; } if( max_dist < dist ) max_dist = dist; } } // assign labels for( i = 0; i < sample_count; i++ ) { float* s = (float*)(samples->data.ptr + i*samples->step); int k_best = 0; double min_dist = DBL_MAX; for( k = 0; k < cluster_count; k++ ) { double* c = (double*)(centers->data.ptr + k*centers->step); double dist = 0; j = 0; for( ; j <= dims - 4; j += 4 ) { double t0 = c[j] - s[j]; double t1 = c[j+1] - s[j+1]; dist += t0*t0 + t1*t1; t0 = c[j+2] - s[j+2]; t1 = c[j+3] - s[j+3]; dist += t0*t0 + t1*t1; } for( ; j < dims; j++ ) { double t = c[j] - s[j]; dist += t*t; } if( min_dist > dist ) { min_dist = dist; k_best = k; } } labels->data.i[i*ids_delta] = k_best; } if( max_dist < termcrit.epsilon ) break; CV_SWAP( centers, old_centers, temp ); } cvZero( counters ); for( i = 0; i < sample_count; i++ ) counters->data.i[labels->data.i[i]]++; // ensure that we do not have empty clusters for( k = 0; k < cluster_count; k++ ) if( counters->data.i[k] == 0 ) for(;;) { i = cvRandInt(&rng) % sample_count; j = labels->data.i[i]; if( counters->data.i[j] > 1 ) { labels->data.i[i] = k; counters->data.i[j]--; counters->data.i[k]++; break; } } __END__; cvReleaseMat( ¢ers ); cvReleaseMat( &old_centers ); cvReleaseMat( &counters ); }
CV_IMPL void cvAbsDiffS( const void* srcarr, void* dstarr, CvScalar scalar ) { static CvFuncTable adiffs_tab; static int inittab = 0; CV_FUNCNAME( "cvAbsDiffS" ); __BEGIN__; int coi1 = 0, coi2 = 0; int type, sctype; CvMat srcstub, *src = (CvMat*)srcarr; CvMat dststub, *dst = (CvMat*)dstarr; int src_step = src->step; int dst_step = dst->step; double buf[12]; CvSize size; if( !inittab ) { icvInitAbsDiffCTable( &adiffs_tab ); inittab = 1; } CV_CALL( src = cvGetMat( src, &srcstub, &coi1 )); CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 )); if( coi1 != 0 || coi2 != 0 ) CV_ERROR( CV_BadCOI, "" ); if( !CV_ARE_TYPES_EQ(src, dst) ) CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); if( !CV_ARE_SIZES_EQ(src, dst) ) CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); sctype = type = CV_MAT_TYPE( src->type ); if( CV_MAT_DEPTH(type) < CV_32S ) sctype = (type & CV_MAT_CN_MASK) | CV_32SC1; size = icvGetMatSize( src ); size.width *= CV_MAT_CN( type ); src_step = src->step; dst_step = dst->step; if( CV_IS_MAT_CONT( src->type & dst->type )) { size.width *= size.height; size.height = 1; src_step = dst_step = CV_STUB_STEP; } CV_CALL( cvScalarToRawData( &scalar, buf, sctype, 1 )); { CvFunc2D_2A1P func = (CvFunc2D_2A1P) (adiffs_tab.fn_2d[CV_MAT_DEPTH(type)]); if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); IPPI_CALL( func( src->data.ptr, src_step, dst->data.ptr, dst_step, size, buf )); } __END__; }
// type in {CV_TRAIN_ERROR, CV_TEST_ERROR} float CvGBTrees::calc_error( CvMLData* _data, int type, std::vector<float> *resp ) { float err = 0.0f; const CvMat* _sample_idx = (type == CV_TRAIN_ERROR) ? _data->get_train_sample_idx() : _data->get_test_sample_idx(); const CvMat* response = _data->get_responses(); int n = _sample_idx ? get_len(_sample_idx) : 0; n = (type == CV_TRAIN_ERROR && n == 0) ? _data->get_values()->rows : n; if (!n) return -FLT_MAX; float* pred_resp = 0; if (resp) { resp->resize(n); pred_resp = &((*resp)[0]); } else pred_resp = new float[n]; Sample_predictor predictor = Sample_predictor(this, pred_resp, _data->get_values(), _data->get_missing(), _sample_idx); //#ifdef HAVE_TBB // tbb::parallel_for(cv::BlockedRange(0,n), predictor, tbb::auto_partitioner()); //#else cv::parallel_for(cv::BlockedRange(0,n), predictor); //#endif int* sidx = _sample_idx ? _sample_idx->data.i : 0; int r_step = CV_IS_MAT_CONT(response->type) ? 1 : response->step / CV_ELEM_SIZE(response->type); if ( !problem_type() ) { for( int i = 0; i < n; i++ ) { int si = sidx ? sidx[i] : i; int d = fabs((double)pred_resp[i] - response->data.fl[si*r_step]) <= FLT_EPSILON ? 0 : 1; err += d; } err = err / (float)n * 100.0f; } else { for( int i = 0; i < n; i++ ) { int si = sidx ? sidx[i] : i; float d = pred_resp[i] - response->data.fl[si*r_step]; err += d*d; } err = err / (float)n; } return err; }