CV_IMPL void cvInitUndistortMap( const CvMat* A, const CvMat* dist_coeffs, CvArr* mapxarr, CvArr* mapyarr ) { uchar* buffer = 0; CV_FUNCNAME( "cvInitUndistortMap" ); __BEGIN__; float a[9], k[4]; int coi1 = 0, coi2 = 0; CvMat mapxstub, *_mapx = (CvMat*)mapxarr; CvMat mapystub, *_mapy = (CvMat*)mapyarr; float *mapx, *mapy; CvMat _a = cvMat( 3, 3, CV_32F, a ), _k; int mapxstep, mapystep; int u, v; float u0, v0, fx, fy, _fx, _fy, k1, k2, p1, p2; CvSize size; CV_CALL( _mapx = cvGetMat( _mapx, &mapxstub, &coi1 )); CV_CALL( _mapy = cvGetMat( _mapy, &mapystub, &coi2 )); if( coi1 != 0 || coi2 != 0 ) CV_ERROR( CV_BadCOI, "The function does not support COI" ); if( CV_MAT_TYPE(_mapx->type) != CV_32FC1 ) CV_ERROR( CV_StsUnsupportedFormat, "Both maps must have 32fC1 type" ); if( !CV_ARE_TYPES_EQ( _mapx, _mapy )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( !CV_ARE_SIZES_EQ( _mapx, _mapy )) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( !CV_IS_MAT(A) || A->rows != 3 || A->cols != 3 || CV_MAT_TYPE(A->type) != CV_32FC1 && CV_MAT_TYPE(A->type) != CV_64FC1 ) CV_ERROR( CV_StsBadArg, "Intrinsic matrix must be a valid 3x3 floating-point matrix" ); if( !CV_IS_MAT(dist_coeffs) || dist_coeffs->rows != 1 && dist_coeffs->cols != 1 || dist_coeffs->rows*dist_coeffs->cols*CV_MAT_CN(dist_coeffs->type) != 4 || CV_MAT_DEPTH(dist_coeffs->type) != CV_64F && CV_MAT_DEPTH(dist_coeffs->type) != CV_32F ) CV_ERROR( CV_StsBadArg, "Distortion coefficients must be 1x4 or 4x1 floating-point vector" ); cvConvert( A, &_a ); _k = cvMat( dist_coeffs->rows, dist_coeffs->cols, CV_MAKETYPE(CV_32F, CV_MAT_CN(dist_coeffs->type)), k ); cvConvert( dist_coeffs, &_k ); u0 = a[2]; v0 = a[5]; fx = a[0]; fy = a[4]; _fx = 1.f/fx; _fy = 1.f/fy; k1 = k[0]; k2 = k[1]; p1 = k[2]; p2 = k[3]; mapxstep = _mapx->step ? _mapx->step : CV_STUB_STEP; mapystep = _mapy->step ? _mapy->step : CV_STUB_STEP; mapx = _mapx->data.fl; mapy = _mapy->data.fl; size = cvGetMatSize(_mapx); /*if( icvUndistortGetSize_p && icvCreateMapCameraUndistort_32f_C1R_p ) { int buf_size = 0; if( icvUndistortGetSize_p( size, &buf_size ) && buf_size > 0 ) { CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); if( icvCreateMapCameraUndistort_32f_C1R_p( mapx, mapxstep, mapy, mapystep, size, a[0], a[4], a[2], a[5], k[0], k[1], k[2], k[3], buffer ) >= 0 ) EXIT; } }*/ mapxstep /= sizeof(mapx[0]); mapystep /= sizeof(mapy[0]); for( v = 0; v < size.height; v++, mapx += mapxstep, mapy += mapystep ) { float y = (v - v0)*_fy; float y2 = y*y; float _2p1y = 2*p1*y; float _3p1y2 = 3*p1*y2; float p2y2 = p2*y2; for( u = 0; u < size.width; u++ ) { float x = (u - u0)*_fx; float x2 = x*x; float r2 = x2 + y2; float d = 1 + (k1 + k2*r2)*r2; float _u = fx*(x*(d + _2p1y) + p2y2 + (3*p2)*x2) + u0; float _v = fy*(y*(d + (2*p2)*x) + _3p1y2 + p1*x2) + v0; mapx[u] = _u; mapy[u] = _v; } } __END__; cvFree( &buffer ); }
bool PngDecoder::readData( Mat& img ) { bool result = false; AutoBuffer<uchar*> _buffer(m_height); uchar** buffer = _buffer; int color = img.channels() > 1; uchar* data = img.data; int step = (int)img.step; if( m_png_ptr && m_info_ptr && m_end_info && m_width && m_height ) { png_structp png_ptr = (png_structp)m_png_ptr; png_infop info_ptr = (png_infop)m_info_ptr; png_infop end_info = (png_infop)m_end_info; if( setjmp( png_jmpbuf ( png_ptr ) ) == 0 ) { int y; if( img.depth() == CV_8U && m_bit_depth == 16 ) png_set_strip_16( png_ptr ); else if( !isBigEndian() ) png_set_swap( png_ptr ); if(img.channels() < 4) { /* observation: png_read_image() writes 400 bytes beyond * end of data when reading a 400x118 color png * "mpplus_sand.png". OpenCV crashes even with demo * programs. Looking at the loaded image I'd say we get 4 * bytes per pixel instead of 3 bytes per pixel. Test * indicate that it is a good idea to always ask for * stripping alpha.. 18.11.2004 Axel Walthelm */ png_set_strip_alpha( png_ptr ); } if( m_color_type == PNG_COLOR_TYPE_PALETTE ) png_set_palette_to_rgb( png_ptr ); if( m_color_type == PNG_COLOR_TYPE_GRAY && m_bit_depth < 8 ) #if PNG_LIBPNG_VER_MAJOR*100 + PNG_LIBPNG_VER_MINOR >= 104 png_set_expand_gray_1_2_4_to_8( png_ptr ); #else png_set_gray_1_2_4_to_8( png_ptr ); #endif if( CV_MAT_CN(m_type) > 1 && color ) png_set_bgr( png_ptr ); // convert RGB to BGR else if( color ) png_set_gray_to_rgb( png_ptr ); // Gray->RGB else png_set_rgb_to_gray( png_ptr, 1, 0.299, 0.587 ); // RGB->Gray png_read_update_info( png_ptr, info_ptr ); for( y = 0; y < m_height; y++ ) buffer[y] = data + y*step; png_read_image( png_ptr, buffer ); png_read_end( png_ptr, end_info ); result = true; } } close(); return result; }
void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize, double scale, double delta, int borderType ) { int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype); if (ddepth < 0) ddepth = sdepth; _dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) ); #ifdef HAVE_TEGRA_OPTIMIZATION if (scale == 1.0 && delta == 0) { Mat src = _src.getMat(), dst = _dst.getMat(); if (ksize == 1 && tegra::laplace1(src, dst, borderType)) return; if (ksize == 3 && tegra::laplace3(src, dst, borderType)) return; if (ksize == 5 && tegra::laplace5(src, dst, borderType)) return; } #endif if( ksize == 1 || ksize == 3 ) { float K[2][9] = { { 0, 1, 0, 1, -4, 1, 0, 1, 0 }, { 2, 0, 2, 0, -8, 0, 2, 0, 2 } }; Mat kernel(3, 3, CV_32F, K[ksize == 3]); if( scale != 1 ) kernel *= scale; filter2D( _src, _dst, ddepth, kernel, Point(-1, -1), delta, borderType ); } else { int ktype = std::max(CV_32F, std::max(ddepth, sdepth)); int wdepth = sdepth == CV_8U && ksize <= 5 ? CV_16S : sdepth <= CV_32F ? CV_32F : CV_64F; int wtype = CV_MAKETYPE(wdepth, cn); Mat kd, ks; getSobelKernels( kd, ks, 2, 0, ksize, false, ktype ); CV_OCL_RUN(_dst.isUMat(), ocl_Laplacian5(_src, _dst, kd, ks, scale, delta, borderType, wdepth, ddepth)) const size_t STRIPE_SIZE = 1 << 14; Ptr<FilterEngine> fx = createSeparableLinearFilter(stype, wtype, kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() ); Ptr<FilterEngine> fy = createSeparableLinearFilter(stype, wtype, ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() ); Mat src = _src.getMat(), dst = _dst.getMat(); int y = fx->start(src), dsty = 0, dy = 0; fy->start(src); const uchar* sptr = src.data + y*src.step; int dy0 = std::min(std::max((int)(STRIPE_SIZE/(CV_ELEM_SIZE(stype)*src.cols)), 1), src.rows); Mat d2x( dy0 + kd.rows - 1, src.cols, wtype ); Mat d2y( dy0 + kd.rows - 1, src.cols, wtype ); for( ; dsty < src.rows; sptr += dy0*src.step, dsty += dy ) { fx->proceed( sptr, (int)src.step, dy0, d2x.data, (int)d2x.step ); dy = fy->proceed( sptr, (int)src.step, dy0, d2y.data, (int)d2y.step ); if( dy > 0 ) { Mat dstripe = dst.rowRange(dsty, dsty + dy); d2x.rows = d2y.rows = dy; // modify the headers, which should work d2x += d2y; d2x.convertTo( dstripe, ddepth, scale, delta ); } } } }
inline int CudaMem::channels() const { return CV_MAT_CN(flags); }
static void* imread_( const string& filename, int flags, int hdrtype, Mat* mat=0 ) { IplImage* image = 0; CvMat *matrix = 0; Mat temp, *data = &temp; ImageDecoder decoder = findDecoder(filename); if( decoder.empty() ) return 0; decoder->setSource(filename); if( !decoder->readHeader() ) return 0; CvSize size; size.width = decoder->width(); size.height = decoder->height(); int type = decoder->type(); if( flags != -1 ) { if( (flags & CV_LOAD_IMAGE_ANYDEPTH) == 0 ) type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type)); if( (flags & CV_LOAD_IMAGE_COLOR) != 0 || ((flags & CV_LOAD_IMAGE_ANYCOLOR) != 0 && CV_MAT_CN(type) > 1) ) type = CV_MAKETYPE(CV_MAT_DEPTH(type), 3); else type = CV_MAKETYPE(CV_MAT_DEPTH(type), 1); } if( hdrtype == LOAD_CVMAT || hdrtype == LOAD_MAT ) { if( hdrtype == LOAD_CVMAT ) { matrix = cvCreateMat( size.height, size.width, type ); temp = cvarrToMat(matrix); } else { mat->create( size.height, size.width, type ); data = mat; } } else { image = cvCreateImage( size, cvIplDepth(type), CV_MAT_CN(type) ); temp = cvarrToMat(image); } if( !decoder->readData( *data )) { cvReleaseImage( &image ); cvReleaseMat( &matrix ); if( mat ) mat->release(); return 0; } return hdrtype == LOAD_CVMAT ? (void*)matrix : hdrtype == LOAD_IMAGE ? (void*)image : (void*)mat; }
CV_IMPL void cvGetRectSubPix( const void* srcarr, void* dstarr, CvPoint2D32f center ) { static CvFuncTable gr_tab[2]; static int inittab = 0; CV_FUNCNAME( "cvGetRectSubPix" ); __BEGIN__; CvMat srcstub, *src = (CvMat*)srcarr; CvMat dststub, *dst = (CvMat*)dstarr; CvSize src_size, dst_size; CvGetRectSubPixFunc func; int cn, src_step, dst_step; if( !inittab ) { icvInitGetRectSubPixC1RTable( gr_tab + 0 ); icvInitGetRectSubPixC3RTable( gr_tab + 1 ); inittab = 1; } if( !CV_IS_MAT(src)) CV_CALL( src = cvGetMat( src, &srcstub )); if( !CV_IS_MAT(dst)) CV_CALL( dst = cvGetMat( dst, &dststub )); cn = CV_MAT_CN( src->type ); if( (cn != 1 && cn != 3) || !CV_ARE_CNS_EQ( src, dst )) CV_ERROR( CV_StsUnsupportedFormat, "" ); src_size = cvGetMatSize( src ); dst_size = cvGetMatSize( dst ); src_step = src->step ? src->step : CV_STUB_STEP; dst_step = dst->step ? dst->step : CV_STUB_STEP; //if( dst_size.width > src_size.width || dst_size.height > src_size.height ) // CV_ERROR( CV_StsBadSize, "destination ROI must be smaller than source ROI" ); if( CV_ARE_DEPTHS_EQ( src, dst )) { func = (CvGetRectSubPixFunc)(gr_tab[cn != 1].fn_2d[CV_MAT_DEPTH(src->type)]); } else { if( CV_MAT_DEPTH( src->type ) != CV_8U || CV_MAT_DEPTH( dst->type ) != CV_32F ) CV_ERROR( CV_StsUnsupportedFormat, "" ); func = (CvGetRectSubPixFunc)(gr_tab[cn != 1].fn_2d[1]); } if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); IPPI_CALL( func( src->data.ptr, src_step, src_size, dst->data.ptr, dst_step, dst_size, center )); __END__; }
CV_IMPL void cvIntegral( const CvArr* image, CvArr* sumImage, CvArr* sumSqImage, CvArr* tiltedSumImage ) { CV_FUNCNAME( "cvIntegralImage" ); __BEGIN__; CvMat src_stub, *src = (CvMat*)image; CvMat sum_stub, *sum = (CvMat*)sumImage; CvMat sqsum_stub, *sqsum = (CvMat*)sumSqImage; CvMat tilted_stub, *tilted = (CvMat*)tiltedSumImage; int coi0 = 0, coi1 = 0, coi2 = 0, coi3 = 0; //int depth; int cn; int src_step, sum_step, sqsum_step, tilted_step; CvSize size; CV_CALL( src = cvGetMat( src, &src_stub, &coi0 )); CV_CALL( sum = cvGetMat( sum, &sum_stub, &coi1 )); if( sum->width != src->width + 1 || sum->height != src->height + 1 ) CV_ERROR( CV_StsUnmatchedSizes, "" ); if(CV_MAT_DEPTH(src->type)!=CV_8U || CV_MAT_CN(src->type)!=1) CV_ERROR( CV_StsUnsupportedFormat, "the source array must be 8UC1"); if( CV_MAT_DEPTH( sum->type ) != CV_32S || !CV_ARE_CNS_EQ( src, sum )) CV_ERROR( CV_StsUnsupportedFormat, "Sum array must have 32s type in case of 8u source array" "and the same number of channels as the source array" ); if( sqsum ) { CV_CALL( sqsum = cvGetMat( sqsum, &sqsum_stub, &coi2 )); if( !CV_ARE_SIZES_EQ( sum, sqsum ) ) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( CV_MAT_DEPTH( sqsum->type ) != CV_64S || !CV_ARE_CNS_EQ( src, sqsum )) CV_ERROR( CV_StsUnsupportedFormat, "Squares sum array must be 64s " "and the same number of channels as the source array" ); } if( tilted ) { if( !sqsum ) CV_ERROR( CV_StsNullPtr, "Squared sum array must be passed if tilted sum array is passed" ); CV_CALL( tilted = cvGetMat( tilted, &tilted_stub, &coi3 )); if( !CV_ARE_SIZES_EQ( sum, tilted ) ) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( !CV_ARE_TYPES_EQ( sum, tilted ) ) CV_ERROR( CV_StsUnmatchedFormats, "Sum and tilted sum must have the same types" ); if( CV_MAT_CN(tilted->type) != 1 ) CV_ERROR( CV_StsNotImplemented, "Tilted sum can not be computed for multi-channel arrays" ); } if( coi0 || coi1 || coi2 || coi3 ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" ); //depth = CV_MAT_DEPTH(src->type); cn = CV_MAT_CN(src->type); size = cvGetMatSize(src); src_step = src->step ? src->step : CV_STUB_STEP; sum_step = sum->step ? sum->step : CV_STUB_STEP; sqsum_step = !sqsum ? 0 : sqsum->step ? sqsum->step : CV_STUB_STEP; tilted_step = !tilted ? 0 : tilted->step ? tilted->step : CV_STUB_STEP; if( cn == 1 ) { cvIntegralImage_8u32s64s_C1R( src->data.ptr, src_step, (int*)(sum->data.ptr), sum_step, sqsum ? (int64*)(sqsum->data.ptr) : 0, sqsum_step, tilted ? (int*)(tilted->data.ptr) : 0, tilted_step, size ); } __END__; }
void CvBoxFilter::init( int _max_width, int _src_type, int _dst_type, bool _normalized, CvSize _ksize, CvPoint _anchor, int _border_mode, CvScalar _border_value ) { CV_FUNCNAME( "CvBoxFilter::init" ); __BEGIN__; sum = 0; normalized = _normalized; if( normalized && CV_MAT_TYPE(_src_type) != CV_MAT_TYPE(_dst_type) || !normalized && CV_MAT_CN(_src_type) != CV_MAT_CN(_dst_type)) CV_ERROR( CV_StsUnmatchedFormats, "In case of normalized box filter input and output must have the same type.\n" "In case of unnormalized box filter the number of input and output channels must be the same" ); min_depth = CV_MAT_DEPTH(_src_type) == CV_8U ? CV_32S : CV_64F; CvBaseImageFilter::init( _max_width, _src_type, _dst_type, 1, _ksize, _anchor, _border_mode, _border_value ); scale = normalized ? 1./(ksize.width*ksize.height) : 1; if( CV_MAT_DEPTH(src_type) == CV_8U ) x_func = (CvRowFilterFunc)icvSumRow_8u32s; else if( CV_MAT_DEPTH(src_type) == CV_32F ) x_func = (CvRowFilterFunc)icvSumRow_32f64f; else CV_ERROR( CV_StsUnsupportedFormat, "Unknown/unsupported input image format" ); if( CV_MAT_DEPTH(dst_type) == CV_8U ) { if( !normalized ) CV_ERROR( CV_StsBadArg, "Only normalized box filter can be used for 8u->8u transformation" ); y_func = (CvColumnFilterFunc)icvSumCol_32s8u; } else if( CV_MAT_DEPTH(dst_type) == CV_16S ) { if( normalized || CV_MAT_DEPTH(src_type) != CV_8U ) CV_ERROR( CV_StsBadArg, "Only 8u->16s unnormalized box filter is supported in case of 16s output" ); y_func = (CvColumnFilterFunc)icvSumCol_32s16s; } else if( CV_MAT_DEPTH(dst_type) == CV_32S ) { if( normalized || CV_MAT_DEPTH(src_type) != CV_8U ) CV_ERROR( CV_StsBadArg, "Only 8u->32s unnormalized box filter is supported in case of 32s output"); y_func = (CvColumnFilterFunc)icvSumCol_32s32s; } else if( CV_MAT_DEPTH(dst_type) == CV_32F ) { if( CV_MAT_DEPTH(src_type) != CV_32F ) CV_ERROR( CV_StsBadArg, "Only 32f->32f box filter (normalized or not) is supported in case of 32f output" ); y_func = (CvColumnFilterFunc)icvSumCol_64f32f; } else{ CV_ERROR( CV_StsBadArg, "Unknown/unsupported destination image format" ); } __END__; }
CV_IMPL CvSeq* cvConvexHull2( const CvArr* array, void* hull_storage, int orientation, int return_points ) { CvMat* mat = 0; CvContour contour_header; CvSeq hull_header; CvSeqBlock block, hullblock; CvSeq* ptseq = 0; CvSeq* hullseq = 0; if( CV_IS_SEQ( array )) { ptseq = (CvSeq*)array; if( !CV_IS_SEQ_POINT_SET( ptseq )) CV_Error( CV_StsBadArg, "Unsupported sequence type" ); if( hull_storage == 0 ) hull_storage = ptseq->storage; } else { ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block ); } bool isStorage = isStorageOrMat(hull_storage); if(isStorage) { if( return_points ) { hullseq = cvCreateSeq(CV_SEQ_KIND_CURVE|CV_SEQ_ELTYPE(ptseq)| CV_SEQ_FLAG_CLOSED|CV_SEQ_FLAG_CONVEX, sizeof(CvContour), sizeof(CvPoint),(CvMemStorage*)hull_storage ); } else { hullseq = cvCreateSeq( CV_SEQ_KIND_CURVE|CV_SEQ_ELTYPE_PPOINT| CV_SEQ_FLAG_CLOSED|CV_SEQ_FLAG_CONVEX, sizeof(CvContour), sizeof(CvPoint*), (CvMemStorage*)hull_storage ); } } else { mat = (CvMat*)hull_storage; if( (mat->cols != 1 && mat->rows != 1) || !CV_IS_MAT_CONT(mat->type)) CV_Error( CV_StsBadArg, "The hull matrix should be continuous and have a single row or a single column" ); if( mat->cols + mat->rows - 1 < ptseq->total ) CV_Error( CV_StsBadSize, "The hull matrix size might be not enough to fit the hull" ); if( CV_MAT_TYPE(mat->type) != CV_SEQ_ELTYPE(ptseq) && CV_MAT_TYPE(mat->type) != CV_32SC1 ) CV_Error( CV_StsUnsupportedFormat, "The hull matrix must have the same type as input or 32sC1 (integers)" ); hullseq = cvMakeSeqHeaderForArray( CV_SEQ_KIND_CURVE|CV_MAT_TYPE(mat->type)|CV_SEQ_FLAG_CLOSED, sizeof(hull_header), CV_ELEM_SIZE(mat->type), mat->data.ptr, mat->cols + mat->rows - 1, &hull_header, &hullblock ); cvClearSeq( hullseq ); } int hulltype = CV_SEQ_ELTYPE(hullseq); int total = ptseq->total; if( total == 0 ) { if( !isStorage ) CV_Error( CV_StsBadSize, "Point sequence can not be empty if the output is matrix" ); return 0; } cv::AutoBuffer<double> _ptbuf; cv::Mat h0; cv::convexHull(cv::cvarrToMat(ptseq, false, false, 0, &_ptbuf), h0, orientation == CV_CLOCKWISE, CV_MAT_CN(hulltype) == 2); if( hulltype == CV_SEQ_ELTYPE_PPOINT ) { const int* idx = h0.ptr<int>(); int ctotal = (int)h0.total(); for( int i = 0; i < ctotal; i++ ) { void* ptr = cvGetSeqElem(ptseq, idx[i]); cvSeqPush( hullseq, &ptr ); } } else cvSeqPushMulti(hullseq, h0.ptr(), (int)h0.total()); if (isStorage) { return hullseq; } else { if( mat->rows > mat->cols ) mat->rows = hullseq->total; else mat->cols = hullseq->total; return 0; } }
static void icvSumCol_32s16s( const int** src, short* dst, int dst_step, int count, void* params ) { CvBoxFilter* state = (CvBoxFilter*)params; int ksize = state->get_kernel_size().height; int ktotal = ksize*state->get_kernel_size().width; int i, width = state->get_width(); int cn = CV_MAT_CN(state->get_src_type()); int* sum = (int*)state->get_sum_buf(); int* _sum_count = state->get_sum_count_ptr(); int sum_count = *_sum_count; dst_step /= sizeof(dst[0]); width *= cn; src += sum_count; count += ksize - 1 - sum_count; for( ; count--; src++ ) { const int* sp = src[0]; if( sum_count+1 < ksize ) { for( i = 0; i <= width - 2; i += 2 ) { int s0 = sum[i] + sp[i], s1 = sum[i+1] + sp[i+1]; sum[i] = s0; sum[i+1] = s1; } for( ; i < width; i++ ) sum[i] += sp[i]; sum_count++; } else if( ktotal < 128 ) { const int* sm = src[-ksize+1]; for( i = 0; i <= width - 2; i += 2 ) { int s0 = sum[i] + sp[i], s1 = sum[i+1] + sp[i+1]; dst[i] = (short)s0; dst[i+1] = (short)s1; s0 -= sm[i]; s1 -= sm[i+1]; sum[i] = s0; sum[i+1] = s1; } for( ; i < width; i++ ) { int s0 = sum[i] + sp[i]; dst[i] = (short)s0; sum[i] = s0 - sm[i]; } dst += dst_step; } else { const int* sm = src[-ksize+1]; for( i = 0; i <= width - 2; i += 2 ) { int s0 = sum[i] + sp[i], s1 = sum[i+1] + sp[i+1]; dst[i] = CV_CAST_16S(s0); dst[i+1] = CV_CAST_16S(s1); s0 -= sm[i]; s1 -= sm[i+1]; sum[i] = s0; sum[i+1] = s1; } for( ; i < width; i++ ) { int s0 = sum[i] + sp[i]; dst[i] = CV_CAST_16S(s0); sum[i] = s0 - sm[i]; } dst += dst_step; } } *_sum_count = sum_count; }
static void icvSumCol_64f32f( const double** src, float* dst, int dst_step, int count, void* params ) { CvBoxFilter* state = (CvBoxFilter*)params; int ksize = state->get_kernel_size().height; int i, width = state->get_width(); int cn = CV_MAT_CN(state->get_src_type()); double scale = state->get_scale(); bool normalized = state->is_normalized(); double* sum = (double*)state->get_sum_buf(); int* _sum_count = state->get_sum_count_ptr(); int sum_count = *_sum_count; dst_step /= sizeof(dst[0]); width *= cn; src += sum_count; count += ksize - 1 - sum_count; for( ; count--; src++ ) { const double* sp = src[0]; if( sum_count+1 < ksize ) { for( i = 0; i <= width - 2; i += 2 ) { double s0 = sum[i] + sp[i], s1 = sum[i+1] + sp[i+1]; sum[i] = s0; sum[i+1] = s1; } for( ; i < width; i++ ) sum[i] += sp[i]; sum_count++; } else { const double* sm = src[-ksize+1]; if( normalized ) for( i = 0; i <= width - 2; i += 2 ) { double s0 = sum[i] + sp[i], s1 = sum[i+1] + sp[i+1]; double t0 = s0*scale, t1 = s1*scale; s0 -= sm[i]; s1 -= sm[i+1]; dst[i] = (float)t0; dst[i+1] = (float)t1; sum[i] = s0; sum[i+1] = s1; } else for( i = 0; i <= width - 2; i += 2 ) { double s0 = sum[i] + sp[i], s1 = sum[i+1] + sp[i+1]; dst[i] = (float)s0; dst[i+1] = (float)s1; s0 -= sm[i]; s1 -= sm[i+1]; sum[i] = s0; sum[i+1] = s1; } for( ; i < width; i++ ) { double s0 = sum[i] + sp[i], t0 = s0*scale; sum[i] = s0 - sm[i]; dst[i] = (float)t0; } dst += dst_step; } } *_sum_count = sum_count; }
inline int oclMat::oclchannels() const { return (CV_MAT_CN(flags)) == 3 ? 4 : (CV_MAT_CN(flags)); }
CV_IMPL void cvKMeans2( const CvArr* samples_arr, int cluster_count, CvArr* labels_arr, CvTermCriteria termcrit ) { CvMat* centers = 0; CvMat* old_centers = 0; CvMat* counters = 0; CV_FUNCNAME( "cvKMeans2" ); __BEGIN__; CvMat samples_stub, labels_stub; CvMat* samples = (CvMat*)samples_arr; CvMat* labels = (CvMat*)labels_arr; CvMat* temp = 0; CvRNG rng = CvRNG(-1); int i, j, k, sample_count, dims; int ids_delta, iter; double max_dist; if( !CV_IS_MAT( samples )) CV_CALL( samples = cvGetMat( samples, &samples_stub )); if( !CV_IS_MAT( labels )) CV_CALL( labels = cvGetMat( labels, &labels_stub )); if( cluster_count < 1 ) CV_ERROR( CV_StsOutOfRange, "Number of clusters should be positive" ); if( CV_MAT_DEPTH(samples->type) != CV_32F || CV_MAT_TYPE(labels->type) != CV_32SC1 ) CV_ERROR( CV_StsUnsupportedFormat, "samples should be floating-point matrix, cluster_idx - integer vector" ); if( labels->rows != 1 && (labels->cols != 1 || !CV_IS_MAT_CONT(labels->type)) || labels->rows + labels->cols - 1 != samples->rows ) CV_ERROR( CV_StsUnmatchedSizes, "cluster_idx should be 1D vector of the same number of elements as samples' number of rows" ); CV_CALL( termcrit = cvCheckTermCriteria( termcrit, 1e-6, 100 )); termcrit.epsilon *= termcrit.epsilon; sample_count = samples->rows; if( cluster_count > sample_count ) cluster_count = sample_count; dims = samples->cols*CV_MAT_CN(samples->type); ids_delta = labels->step ? labels->step/(int)sizeof(int) : 1; CV_CALL( centers = cvCreateMat( cluster_count, dims, CV_64FC1 )); CV_CALL( old_centers = cvCreateMat( cluster_count, dims, CV_64FC1 )); CV_CALL( counters = cvCreateMat( 1, cluster_count, CV_32SC1 )); // init centers for( i = 0; i < sample_count; i++ ) labels->data.i[i] = cvRandInt(&rng) % cluster_count; counters->cols = cluster_count; // cut down counters max_dist = termcrit.epsilon*2; for( iter = 0; iter < termcrit.max_iter; iter++ ) { // computer centers cvZero( centers ); cvZero( counters ); for( i = 0; i < sample_count; i++ ) { float* s = (float*)(samples->data.ptr + i*samples->step); k = labels->data.i[i*ids_delta]; double* c = (double*)(centers->data.ptr + k*centers->step); for( j = 0; j <= dims - 4; j += 4 ) { double t0 = c[j] + s[j]; double t1 = c[j+1] + s[j+1]; c[j] = t0; c[j+1] = t1; t0 = c[j+2] + s[j+2]; t1 = c[j+3] + s[j+3]; c[j+2] = t0; c[j+3] = t1; } for( ; j < dims; j++ ) c[j] += s[j]; counters->data.i[k]++; } if( iter > 0 ) max_dist = 0; for( k = 0; k < cluster_count; k++ ) { double* c = (double*)(centers->data.ptr + k*centers->step); if( counters->data.i[k] != 0 ) { double scale = 1./counters->data.i[k]; for( j = 0; j < dims; j++ ) c[j] *= scale; } else { i = cvRandInt( &rng ) % sample_count; float* s = (float*)(samples->data.ptr + i*samples->step); for( j = 0; j < dims; j++ ) c[j] = s[j]; } if( iter > 0 ) { double dist = 0; double* c_o = (double*)(old_centers->data.ptr + k*old_centers->step); for( j = 0; j < dims; j++ ) { double t = c[j] - c_o[j]; dist += t*t; } if( max_dist < dist ) max_dist = dist; } } // assign labels for( i = 0; i < sample_count; i++ ) { float* s = (float*)(samples->data.ptr + i*samples->step); int k_best = 0; double min_dist = DBL_MAX; for( k = 0; k < cluster_count; k++ ) { double* c = (double*)(centers->data.ptr + k*centers->step); double dist = 0; j = 0; for( ; j <= dims - 4; j += 4 ) { double t0 = c[j] - s[j]; double t1 = c[j+1] - s[j+1]; dist += t0*t0 + t1*t1; t0 = c[j+2] - s[j+2]; t1 = c[j+3] - s[j+3]; dist += t0*t0 + t1*t1; } for( ; j < dims; j++ ) { double t = c[j] - s[j]; dist += t*t; } if( min_dist > dist ) { min_dist = dist; k_best = k; } } labels->data.i[i*ids_delta] = k_best; } if( max_dist < termcrit.epsilon ) break; CV_SWAP( centers, old_centers, temp ); } cvZero( counters ); for( i = 0; i < sample_count; i++ ) counters->data.i[labels->data.i[i]]++; // ensure that we do not have empty clusters for( k = 0; k < cluster_count; k++ ) if( counters->data.i[k] == 0 ) for(;;) { i = cvRandInt(&rng) % sample_count; j = labels->data.i[i]; if( counters->data.i[j] > 1 ) { labels->data.i[i] = k; counters->data.i[j]--; counters->data.i[k]++; break; } } __END__; cvReleaseMat( ¢ers ); cvReleaseMat( &old_centers ); cvReleaseMat( &counters ); }
CV_IMPL void cvFindStereoCorrespondenceGC( const CvArr* _left, const CvArr* _right, CvArr* _dispLeft, CvArr* _dispRight, CvStereoGCState* state, int useDisparityGuess ) { CvStereoGCState2 state2; state2.orphans = 0; state2.maxOrphans = 0; CV_FUNCNAME( "cvFindStereoCorrespondenceGC" ); __BEGIN__; CvMat lstub, *left = cvGetMat( _left, &lstub ); CvMat rstub, *right = cvGetMat( _right, &rstub ); CvMat dlstub, *dispLeft = cvGetMat( _dispLeft, &dlstub ); CvMat drstub, *dispRight = cvGetMat( _dispRight, &drstub ); CvSize size; int iter, i, nZeroExpansions = 0; CvRNG rng = cvRNG(-1); int* disp; CvMat _disp; int64 E; CV_ASSERT( state != 0 ); CV_ASSERT( CV_ARE_SIZES_EQ(left, right) && CV_ARE_TYPES_EQ(left, right) && CV_MAT_TYPE(left->type) == CV_8UC1 ); CV_ASSERT( !dispLeft || (CV_ARE_SIZES_EQ(dispLeft, left) && CV_MAT_CN(dispLeft->type) == 1) ); CV_ASSERT( !dispRight || (CV_ARE_SIZES_EQ(dispRight, left) && CV_MAT_CN(dispRight->type) == 1) ); size = cvGetSize(left); if( !state->left || state->left->width != size.width || state->left->height != size.height ) { int pcn = (int)(sizeof(GCVtx*)/sizeof(int)); int vcn = (int)(sizeof(GCVtx)/sizeof(int)); int ecn = (int)(sizeof(GCEdge)/sizeof(int)); cvReleaseMat( &state->left ); cvReleaseMat( &state->right ); cvReleaseMat( &state->ptrLeft ); cvReleaseMat( &state->ptrRight ); cvReleaseMat( &state->dispLeft ); cvReleaseMat( &state->dispRight ); state->left = cvCreateMat( size.height, size.width, CV_8UC3 ); state->right = cvCreateMat( size.height, size.width, CV_8UC3 ); state->dispLeft = cvCreateMat( size.height, size.width, CV_16SC1 ); state->dispRight = cvCreateMat( size.height, size.width, CV_16SC1 ); state->ptrLeft = cvCreateMat( size.height, size.width, CV_32SC(pcn) ); state->ptrRight = cvCreateMat( size.height, size.width, CV_32SC(pcn) ); state->vtxBuf = cvCreateMat( 1, size.height*size.width*2, CV_32SC(vcn) ); state->edgeBuf = cvCreateMat( 1, size.height*size.width*12 + 16, CV_32SC(ecn) ); } if( !useDisparityGuess ) { cvSet( state->dispLeft, cvScalarAll(OCCLUDED)); cvSet( state->dispRight, cvScalarAll(OCCLUDED)); } else { CV_ASSERT( dispLeft && dispRight ); cvConvert( dispLeft, state->dispLeft ); cvConvert( dispRight, state->dispRight ); } state2.Ithreshold = state->Ithreshold; state2.interactionRadius = state->interactionRadius; state2.lambda = cvRound(state->lambda*DENOMINATOR); state2.lambda1 = cvRound(state->lambda1*DENOMINATOR); state2.lambda2 = cvRound(state->lambda2*DENOMINATOR); state2.K = cvRound(state->K*DENOMINATOR); icvInitStereoConstTabs(); icvInitGraySubpix( left, right, state->left, state->right ); disp = (int*)cvStackAlloc( state->numberOfDisparities*sizeof(disp[0]) ); _disp = cvMat( 1, state->numberOfDisparities, CV_32S, disp ); cvRange( &_disp, state->minDisparity, state->minDisparity + state->numberOfDisparities ); cvRandShuffle( &_disp, &rng ); if( state2.lambda < 0 && (state2.K < 0 || state2.lambda1 < 0 || state2.lambda2 < 0) ) { float L = icvComputeK(state)*0.2f; state2.lambda = cvRound(L*DENOMINATOR); } if( state2.K < 0 ) state2.K = state2.lambda*5; if( state2.lambda1 < 0 ) state2.lambda1 = state2.lambda*3; if( state2.lambda2 < 0 ) state2.lambda2 = state2.lambda; icvInitStereoTabs( &state2 ); E = icvComputeEnergy( state, &state2, !useDisparityGuess ); for( iter = 0; iter < state->maxIters; iter++ ) { for( i = 0; i < state->numberOfDisparities; i++ ) { int alpha = disp[i]; int64 Enew = icvAlphaExpand( E, -alpha, state, &state2 ); if( Enew < E ) { nZeroExpansions = 0; E = Enew; } else if( ++nZeroExpansions >= state->numberOfDisparities ) break; } } if( dispLeft ) cvConvert( state->dispLeft, dispLeft ); if( dispRight ) cvConvert( state->dispRight, dispRight ); __END__; cvFree( &state2.orphans ); }
/** * Read an image into memory and return the information * * @param[in] filename File to load * @param[in] flags Flags * @param[in] mats Reference to C++ vector<Mat> object to hold the images * */ static bool imreadmulti_(const String& filename, int flags, std::vector<Mat>& mats) { /// Search for the relevant decoder to handle the imagery ImageDecoder decoder; #ifdef HAVE_GDAL if (flags != IMREAD_UNCHANGED && (flags & IMREAD_LOAD_GDAL) == IMREAD_LOAD_GDAL){ decoder = GdalDecoder().newDecoder(); } else{ #endif decoder = findDecoder(filename); #ifdef HAVE_GDAL } #endif /// if no decoder was found, return nothing. if (!decoder){ return 0; } /// set the filename in the driver decoder->setSource(filename); // read the header to make sure it succeeds if (!decoder->readHeader()) return 0; for (;;) { // grab the decoded type int type = decoder->type(); if( (flags & IMREAD_LOAD_GDAL) != IMREAD_LOAD_GDAL && flags != IMREAD_UNCHANGED ) { if ((flags & CV_LOAD_IMAGE_ANYDEPTH) == 0) type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type)); if ((flags & CV_LOAD_IMAGE_COLOR) != 0 || ((flags & CV_LOAD_IMAGE_ANYCOLOR) != 0 && CV_MAT_CN(type) > 1)) type = CV_MAKETYPE(CV_MAT_DEPTH(type), 3); else type = CV_MAKETYPE(CV_MAT_DEPTH(type), 1); } // read the image data Mat mat(decoder->height(), decoder->width(), type); if (!decoder->readData(mat)) { // optionally rotate the data if EXIF' orientation flag says so if( (flags & IMREAD_IGNORE_ORIENTATION) == 0 && flags != IMREAD_UNCHANGED ) { ApplyExifOrientation(filename, mat); } break; } mats.push_back(mat); if (!decoder->nextPage()) { break; } } return !mats.empty(); }
/*F/////////////////////////////////////////////////////////////////////////////////////// // Name: cvMeanShift // Purpose: MeanShift algorithm // Context: // Parameters: // imgProb - 2D object probability distribution // windowIn - CvRect of CAMSHIFT Window intial size // numIters - If CAMSHIFT iterates this many times, stop // windowOut - Location, height and width of converged CAMSHIFT window // len - If != NULL, return equivalent len // width - If != NULL, return equivalent width // itersUsed - Returns number of iterations CAMSHIFT took to converge // Returns: // The function itself returns the area found // Notes: //F*/ CV_IMPL int cvMeanShift( const void* imgProb, CvRect windowIn, CvTermCriteria criteria, CvConnectedComp* comp ) { CvMoments moments; int i = 0, eps; CvMat stub, *mat = (CvMat*)imgProb; CvMat cur_win; CvRect cur_rect = windowIn; CV_FUNCNAME( "cvMeanShift" ); if( comp ) comp->rect = windowIn; moments.m00 = moments.m10 = moments.m01 = 0; __BEGIN__; CV_CALL( mat = cvGetMat( mat, &stub )); if( CV_MAT_CN( mat->type ) > 1 ) CV_ERROR( CV_BadNumChannels, cvUnsupportedFormat ); if( windowIn.height <= 0 || windowIn.width <= 0 ) CV_ERROR( CV_StsBadArg, "Input window has non-positive sizes" ); if( windowIn.x < 0 || windowIn.x + windowIn.width > mat->cols || windowIn.y < 0 || windowIn.y + windowIn.height > mat->rows ) CV_ERROR( CV_StsBadArg, "Initial window is not inside the image ROI" ); CV_CALL( criteria = cvCheckTermCriteria( criteria, 1., 100 )); eps = cvRound( criteria.epsilon * criteria.epsilon ); for( i = 0; i < criteria.max_iter; i++ ) { int dx, dy, nx, ny; double inv_m00; CV_CALL( cvGetSubRect( mat, &cur_win, cur_rect )); CV_CALL( cvMoments( &cur_win, &moments )); /* Calculating center of mass */ if( fabs(moments.m00) < DBL_EPSILON ) break; inv_m00 = moments.inv_sqrt_m00*moments.inv_sqrt_m00; dx = cvRound( moments.m10 * inv_m00 - windowIn.width*0.5 ); dy = cvRound( moments.m01 * inv_m00 - windowIn.height*0.5 ); nx = cur_rect.x + dx; ny = cur_rect.y + dy; if( nx < 0 ) nx = 0; else if( nx + cur_rect.width > mat->cols ) nx = mat->cols - cur_rect.width; if( ny < 0 ) ny = 0; else if( ny + cur_rect.height > mat->rows ) ny = mat->rows - cur_rect.height; dx = nx - cur_rect.x; dy = ny - cur_rect.y; cur_rect.x = nx; cur_rect.y = ny; /* Check for coverage centers mass & window */ if( dx*dx + dy*dy < eps ) break; } __END__; if( comp ) { comp->rect = cur_rect; comp->area = (float)moments.m00; } return i; }
/** * Insert a format in an output stream. Only debugging, output could be truncated */ std::ostream &operator<<(std::ostream &stream, const colorspaces::Image::Format& fmt){ stream << "FMT("<< fmt.name << ";channels:" << CV_MAT_CN(fmt.cvType) << ";depth:" << CV_MAT_DEPTH(fmt.cvType) << ")"; return stream; }
/* Convert QImage to cv::Mat */ cv::Mat image2Mat(const QImage &img, int requiredMatType, MatColorOrder requriedOrder) { int targetDepth = CV_MAT_DEPTH(requiredMatType); int targetChannels = CV_MAT_CN(requiredMatType); Q_ASSERT(targetChannels==CV_CN_MAX || targetChannels==1 || targetChannels==3 || targetChannels==4); Q_ASSERT(targetDepth==CV_8U || targetDepth==CV_16U || targetDepth==CV_32F); if (img.isNull()) return cv::Mat(); //Find the closest image format that can be used in image2Mat_shared() QImage::Format format = findClosestFormat(img.format()); QImage image = (format==img.format()) ? img : img.convertToFormat(format); MatColorOrder srcOrder; cv::Mat mat0 = image2Mat_shared(image, &srcOrder); //Adjust mat channells if needed. cv::Mat mat_adjustCn; const float maxAlpha = targetDepth==CV_8U ? 255 : (targetDepth==CV_16U ? 65535 : 1.0); if (targetChannels == CV_CN_MAX) targetChannels = mat0.channels(); switch(targetChannels) { case 1: if (mat0.channels() == 3) { cv::cvtColor(mat0, mat_adjustCn, CV_RGB2GRAY); } else if (mat0.channels() == 4) { if (srcOrder == MCO_BGRA) cv::cvtColor(mat0, mat_adjustCn, CV_BGRA2GRAY); else if (srcOrder == MCO_RGBA) cv::cvtColor(mat0, mat_adjustCn, CV_RGBA2GRAY); else//MCO_ARGB cv::cvtColor(argb2bgra(mat0), mat_adjustCn, CV_BGRA2GRAY); } break; case 3: if (mat0.channels() == 1) { cv::cvtColor(mat0, mat_adjustCn, requriedOrder == MCO_BGR ? CV_GRAY2BGR : CV_GRAY2RGB); } else if (mat0.channels() == 3) { if (requriedOrder != srcOrder) cv::cvtColor(mat0, mat_adjustCn, CV_RGB2BGR); } else if (mat0.channels() == 4) { if (srcOrder == MCO_ARGB) { mat_adjustCn = cv::Mat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 3)); int ARGB2RGB[] = {1,0, 2,1, 3,2}; int ARGB2BGR[] = {1,2, 2,1, 3,0}; cv::mixChannels(&mat0, 1, &mat_adjustCn, 1, requriedOrder == MCO_BGR ? ARGB2BGR : ARGB2RGB, 3); } else if (srcOrder == MCO_BGRA) { cv::cvtColor(mat0, mat_adjustCn, requriedOrder == MCO_BGR ? CV_BGRA2BGR : CV_BGRA2RGB); } else {//RGBA cv::cvtColor(mat0, mat_adjustCn, requriedOrder == MCO_BGR ? CV_RGBA2BGR : CV_RGBA2RGB); } } break; case 4: if (mat0.channels() == 1) { if (requriedOrder == MCO_ARGB) { cv::Mat alphaMat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 1), cv::Scalar(maxAlpha)); mat_adjustCn = cv::Mat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 4)); cv::Mat in[] = {alphaMat, mat0}; int from_to[] = {0,0, 1,1, 1,2, 1,3}; cv::mixChannels(in, 2, &mat_adjustCn, 1, from_to, 4); } else if (requriedOrder == MCO_RGBA) { cv::cvtColor(mat0, mat_adjustCn, CV_GRAY2RGBA); } else {//MCO_BGRA cv::cvtColor(mat0, mat_adjustCn, CV_GRAY2BGRA); } } else if (mat0.channels() == 3) { if (requriedOrder == MCO_ARGB) { cv::Mat alphaMat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 1), cv::Scalar(maxAlpha)); mat_adjustCn = cv::Mat(mat0.rows, mat0.cols, CV_MAKE_TYPE(mat0.type(), 4)); cv::Mat in[] = {alphaMat, mat0}; int from_to[] = {0,0, 1,1, 2,2, 3,3}; cv::mixChannels(in, 2, &mat_adjustCn, 1, from_to, 4); } else if (requriedOrder == MCO_RGBA) { cv::cvtColor(mat0, mat_adjustCn, CV_RGB2RGBA); } else {//MCO_BGRA cv::cvtColor(mat0, mat_adjustCn, CV_RGB2BGRA); } } else if (mat0.channels() == 4) { if (srcOrder != requriedOrder) mat_adjustCn = adjustChannelsOrder(mat0, srcOrder, requriedOrder); } break; default: break; } //Adjust depth if needed. if (targetDepth == CV_8U) return mat_adjustCn.empty() ? mat0.clone() : mat_adjustCn; if (mat_adjustCn.empty()) mat_adjustCn = mat0; cv::Mat mat_adjustDepth; mat_adjustCn.convertTo(mat_adjustDepth, CV_MAKE_TYPE(targetDepth, mat_adjustCn.channels()), targetDepth == CV_16U ? 255.0 : 1/255.0); return mat_adjustDepth; }
CV_IMPL void cvGetQuadrangleSubPix( const void* srcarr, void* dstarr, const CvMat* mat ) { static CvFuncTable gq_tab[2]; static int inittab = 0; CV_FUNCNAME( "cvGetQuadrangleSubPix" ); __BEGIN__; CvMat srcstub, *src = (CvMat*)srcarr; CvMat dststub, *dst = (CvMat*)dstarr; CvSize src_size, dst_size; CvGetQuadrangleSubPixFunc func; float m[6]; int k, cn; if( !inittab ) { icvInitGetQuadrangleSubPixC1RTable( gq_tab + 0 ); icvInitGetQuadrangleSubPixC3RTable( gq_tab + 1 ); inittab = 1; } if( !CV_IS_MAT(src)) CV_CALL( src = cvGetMat( src, &srcstub )); if( !CV_IS_MAT(dst)) CV_CALL( dst = cvGetMat( dst, &dststub )); if( !CV_IS_MAT(mat)) CV_ERROR( CV_StsBadArg, "map matrix is not valid" ); cn = CV_MAT_CN( src->type ); if( (cn != 1 && cn != 3) || !CV_ARE_CNS_EQ( src, dst )) CV_ERROR( CV_StsUnsupportedFormat, "" ); src_size = cvGetMatSize( src ); dst_size = cvGetMatSize( dst ); /*if( dst_size.width > src_size.width || dst_size.height > src_size.height ) CV_ERROR( CV_StsBadSize, "destination ROI must not be larger than source ROI" );*/ if( mat->rows != 2 || mat->cols != 3 ) CV_ERROR( CV_StsBadArg, "Transformation matrix must be 2x3" ); if( CV_MAT_TYPE( mat->type ) == CV_32FC1 ) { for( k = 0; k < 3; k++ ) { m[k] = mat->data.fl[k]; m[3 + k] = ((float*)(mat->data.ptr + mat->step))[k]; } } else if( CV_MAT_TYPE( mat->type ) == CV_64FC1 ) { for( k = 0; k < 3; k++ ) { m[k] = (float)mat->data.db[k]; m[3 + k] = (float)((double*)(mat->data.ptr + mat->step))[k]; } } else CV_ERROR( CV_StsUnsupportedFormat, "The transformation matrix should have 32fC1 or 64fC1 type" ); if( CV_ARE_DEPTHS_EQ( src, dst )) { func = (CvGetQuadrangleSubPixFunc)(gq_tab[cn != 1].fn_2d[CV_MAT_DEPTH(src->type)]); } else { if( CV_MAT_DEPTH( src->type ) != CV_8U || CV_MAT_DEPTH( dst->type ) != CV_32F ) CV_ERROR( CV_StsUnsupportedFormat, "" ); func = (CvGetQuadrangleSubPixFunc)(gq_tab[cn != 1].fn_2d[1]); } if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); IPPI_CALL( func( src->data.ptr, src->step, src_size, dst->data.ptr, dst->step, dst_size, m )); __END__; }
CV_IMPL void cvGoodFeaturesToTrack( const void* image, void* eigImage, void* tempImage, CvPoint2D32f* corners, int *corner_count, double quality_level, double min_distance, const void* maskImage, int block_size, int use_harris, double harris_k ) { CvMat* _eigImg = 0; CvMat* _tmpImg = 0; CV_FUNCNAME( "cvGoodFeaturesToTrack" ); __BEGIN__; double max_val = 0; int max_count = 0; int count = 0; int x, y, i, k = 0; int min_dist; /* when selecting points, use integer coordinates */ CvPoint *ptr = (CvPoint *) corners; /* process floating-point images using integer arithmetics */ int *eig_data = 0; int *tmp_data = 0; int **ptr_data = 0; uchar *mask_data = 0; int mask_step = 0; CvSize size; int coi1 = 0, coi2 = 0, coi3 = 0; CvMat stub, *img = (CvMat*)image; CvMat eig_stub, *eig = (CvMat*)eigImage; CvMat tmp_stub, *tmp = (CvMat*)tempImage; CvMat mask_stub, *mask = (CvMat*)maskImage; if( corner_count ) { max_count = *corner_count; *corner_count = 0; } CV_CALL( img = cvGetMat( img, &stub, &coi1 )); if( eig ) { CV_CALL( eig = cvGetMat( eig, &eig_stub, &coi2 )); } else { CV_CALL( _eigImg = cvCreateMat( img->rows, img->cols, CV_32FC1 )); eig = _eigImg; } if( tmp ) { CV_CALL( tmp = cvGetMat( tmp, &tmp_stub, &coi3 )); } else { CV_CALL( _tmpImg = cvCreateMat( img->rows, img->cols, CV_32FC1 )); tmp = _tmpImg; } if( mask ) { CV_CALL( mask = cvGetMat( mask, &mask_stub )); if( !CV_IS_MASK_ARR( mask )) { CV_ERROR( CV_StsBadMask, "" ); } } if( coi1 != 0 || coi2 != 0 || coi3 != 0 ) CV_ERROR( CV_BadCOI, "" ); if( CV_MAT_CN(img->type) != 1 || CV_MAT_CN(eig->type) != 1 || CV_MAT_CN(tmp->type) != 1 ) CV_ERROR( CV_BadNumChannels, cvUnsupportedFormat ); if( CV_MAT_DEPTH(tmp->type) != CV_32F || CV_MAT_DEPTH(eig->type) != CV_32F ) CV_ERROR( CV_BadDepth, cvUnsupportedFormat ); if( !corners || !corner_count ) CV_ERROR( CV_StsNullPtr, "" ); if( max_count <= 0 ) CV_ERROR( CV_StsBadArg, "maximal corners number is non positive" ); if( quality_level <= 0 || min_distance < 0 ) CV_ERROR( CV_StsBadArg, "quality level or min distance are non positive" ); if( use_harris ) { CV_CALL( cvCornerHarris( img, eig, block_size, 3, harris_k )); } else { CV_CALL( cvCornerMinEigenVal( img, eig, block_size, 3 )); } CV_CALL( cvMinMaxLoc( eig, 0, &max_val, 0, 0, mask )); CV_CALL( cvThreshold( eig, eig, max_val * quality_level, 0, CV_THRESH_TOZERO )); CV_CALL( cvDilate( eig, tmp )); min_dist = cvRound( min_distance * min_distance ); size = cvGetMatSize( img ); ptr_data = (int**)(tmp->data.ptr); eig_data = (int*)(eig->data.ptr); tmp_data = (int*)(tmp->data.ptr); if( mask ) { mask_data = (uchar*)(mask->data.ptr); mask_step = mask->step; } /* collect list of pointers to features - put them into temporary image */ for( y = 1, k = 0; y < size.height - 1; y++ ) { (char*&)eig_data += eig->step; (char*&)tmp_data += tmp->step; mask_data += mask_step; for( x = 1; x < size.width - 1; x++ ) { int val = eig_data[x]; if( val != 0 && val == tmp_data[x] && (!mask || mask_data[x]) ) ptr_data[k++] = eig_data + x; } } icvSortFeatures( ptr_data, k, 0 ); /* select the strongest features */ for( i = 0; i < k; i++ ) { int j = count, ofs = (int)((uchar*)(ptr_data[i]) - eig->data.ptr); y = ofs / eig->step; x = (ofs - y * eig->step)/sizeof(float); if( min_dist != 0 ) { for( j = 0; j < count; j++ ) { int dx = x - ptr[j].x; int dy = y - ptr[j].y; int dist = dx * dx + dy * dy; if( dist < min_dist ) break; } } if( j == count ) { ptr[count].x = x; ptr[count].y = y; if( ++count >= max_count ) break; } } /* convert points to floating-point format */ for( i = 0; i < count; i++ ) { assert( (unsigned)ptr[i].x < (unsigned)size.width && (unsigned)ptr[i].y < (unsigned)size.height ); corners[i].x = (float)ptr[i].x; corners[i].y = (float)ptr[i].y; } *corner_count = count; __END__; cvReleaseMat( &_eigImg ); cvReleaseMat( &_tmpImg ); }
inline int GpuMat::channels() const { return CV_MAT_CN(flags); }
static bool matchTemplate_CCOEFF_NORMED(InputArray _image, InputArray _templ, OutputArray _result) { matchTemplate(_image, _templ, _result, CV_TM_CCORR); UMat temp, image_sums, image_sqsums; integral(_image, image_sums, image_sqsums, CV_32F, CV_32F); int type = image_sums.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); ocl::Kernel k("matchTemplate_CCOEFF_NORMED", ocl::imgproc::match_template_oclsrc, format("-D CCOEFF_NORMED -D type=%s -D elem_type=%s -D cn=%d", ocl::typeToStr(type), ocl::typeToStr(depth), cn)); if (k.empty()) return false; UMat templ = _templ.getUMat(); Size size = _image.size(), tsize = templ.size(); _result.create(size.height - templ.rows + 1, size.width - templ.cols + 1, CV_32F); UMat result = _result.getUMat(); float scale = 1.f / tsize.area(); if (cn == 1) { float templ_sum = (float)sum(templ)[0]; multiply(templ, templ, temp, 1, CV_32F); float templ_sqsum = (float)sum(temp)[0]; templ_sqsum -= scale * templ_sum * templ_sum; templ_sum *= scale; if (templ_sqsum < DBL_EPSILON) { result = Scalar::all(1); return true; } k.args(ocl::KernelArg::ReadOnlyNoSize(image_sums), ocl::KernelArg::ReadOnlyNoSize(image_sqsums), ocl::KernelArg::ReadWrite(result), templ.rows, templ.cols, scale, templ_sum, templ_sqsum); } else { Vec4f templ_sum = Vec4f::all(0), templ_sqsum = Vec4f::all(0); templ_sum = sum(templ); multiply(templ, templ, temp, 1, CV_32F); templ_sqsum = sum(temp); float templ_sqsum_sum = 0; for (int i = 0; i < cn; i ++) templ_sqsum_sum += templ_sqsum[i] - scale * templ_sum[i] * templ_sum[i]; templ_sum *= scale; if (templ_sqsum_sum < DBL_EPSILON) { result = Scalar::all(1); return true; } if (cn == 2) k.args(ocl::KernelArg::ReadOnlyNoSize(image_sums), ocl::KernelArg::ReadOnlyNoSize(image_sqsums), ocl::KernelArg::ReadWrite(result), templ.rows, templ.cols, scale, templ_sum[0], templ_sum[1], templ_sqsum_sum); else k.args(ocl::KernelArg::ReadOnlyNoSize(image_sums), ocl::KernelArg::ReadOnlyNoSize(image_sqsums), ocl::KernelArg::ReadWrite(result), templ.rows, templ.cols, scale, templ_sum[0], templ_sum[1], templ_sum[2], templ_sum[3], templ_sqsum_sum); } size_t globalsize[2] = { result.cols, result.rows }; return k.run(2, globalsize, NULL, false); }
bool SunRasterDecoder::readHeader() { bool result = false; if( !m_strm.open( m_filename )) return false; try { m_strm.skip( 4 ); m_width = m_strm.getDWord(); m_height = m_strm.getDWord(); m_bpp = m_strm.getDWord(); int palSize = 3*(1 << m_bpp); m_strm.skip( 4 ); m_encoding = (SunRasType)m_strm.getDWord(); m_maptype = (SunRasMapType)m_strm.getDWord(); m_maplength = m_strm.getDWord(); if( m_width > 0 && m_height > 0 && (m_bpp == 1 || m_bpp == 8 || m_bpp == 24 || m_bpp == 32) && (m_type == RAS_OLD || m_type == RAS_STANDARD || (m_type == RAS_BYTE_ENCODED && m_bpp == 8) || m_type == RAS_FORMAT_RGB) && ((m_maptype == RMT_NONE && m_maplength == 0) || (m_maptype == RMT_EQUAL_RGB && m_maplength <= palSize && m_bpp <= 8))) { memset( m_palette, 0, sizeof(m_palette)); if( m_maplength != 0 ) { uchar buffer[256*3]; if( m_strm.getBytes( buffer, m_maplength ) == m_maplength ) { int i; palSize = m_maplength/3; for( i = 0; i < palSize; i++ ) { m_palette[i].b = buffer[i + 2*palSize]; m_palette[i].g = buffer[i + palSize]; m_palette[i].r = buffer[i]; m_palette[i].a = 0; } m_type = IsColorPalette( m_palette, m_bpp ) ? CV_8UC3 : CV_8UC1; m_offset = m_strm.getPos(); assert( m_offset == 32 + m_maplength ); result = true; } } else { m_type = m_bpp > 8 ? CV_8UC3 : CV_8UC1; if( CV_MAT_CN(m_type) == 1 ) FillGrayPalette( m_palette, m_bpp ); m_offset = m_strm.getPos(); assert( m_offset == 32 + m_maplength ); result = true; } } } catch(...) { } if( !result ) { m_offset = -1; m_width = m_height = -1; m_strm.close(); } return result; }
void crossCorr( const Mat& img, const Mat& _templ, Mat& corr, Size corrsize, int ctype, Point anchor, double delta, int borderType ) { const double blockScale = 4.5; const int minBlockSize = 256; std::vector<uchar> buf; Mat templ = _templ; int depth = img.depth(), cn = img.channels(); int tdepth = templ.depth(), tcn = templ.channels(); int cdepth = CV_MAT_DEPTH(ctype), ccn = CV_MAT_CN(ctype); CV_Assert( img.dims <= 2 && templ.dims <= 2 && corr.dims <= 2 ); if( depth != tdepth && tdepth != std::max(CV_32F, depth) ) { _templ.convertTo(templ, std::max(CV_32F, depth)); tdepth = templ.depth(); } CV_Assert( depth == tdepth || tdepth == CV_32F); CV_Assert( corrsize.height <= img.rows + templ.rows - 1 && corrsize.width <= img.cols + templ.cols - 1 ); CV_Assert( ccn == 1 || delta == 0 ); corr.create(corrsize, ctype); int maxDepth = depth > CV_8S ? CV_64F : std::max(std::max(CV_32F, tdepth), cdepth); Size blocksize, dftsize; blocksize.width = cvRound(templ.cols*blockScale); blocksize.width = std::max( blocksize.width, minBlockSize - templ.cols + 1 ); blocksize.width = std::min( blocksize.width, corr.cols ); blocksize.height = cvRound(templ.rows*blockScale); blocksize.height = std::max( blocksize.height, minBlockSize - templ.rows + 1 ); blocksize.height = std::min( blocksize.height, corr.rows ); dftsize.width = std::max(getOptimalDFTSize(blocksize.width + templ.cols - 1), 2); dftsize.height = getOptimalDFTSize(blocksize.height + templ.rows - 1); if( dftsize.width <= 0 || dftsize.height <= 0 ) CV_Error( CV_StsOutOfRange, "the input arrays are too big" ); // recompute block size blocksize.width = dftsize.width - templ.cols + 1; blocksize.width = MIN( blocksize.width, corr.cols ); blocksize.height = dftsize.height - templ.rows + 1; blocksize.height = MIN( blocksize.height, corr.rows ); Mat dftTempl( dftsize.height*tcn, dftsize.width, maxDepth ); Mat dftImg( dftsize, maxDepth ); int i, k, bufSize = 0; if( tcn > 1 && tdepth != maxDepth ) bufSize = templ.cols*templ.rows*CV_ELEM_SIZE(tdepth); if( cn > 1 && depth != maxDepth ) bufSize = std::max( bufSize, (blocksize.width + templ.cols - 1)* (blocksize.height + templ.rows - 1)*CV_ELEM_SIZE(depth)); if( (ccn > 1 || cn > 1) && cdepth != maxDepth ) bufSize = std::max( bufSize, blocksize.width*blocksize.height*CV_ELEM_SIZE(cdepth)); buf.resize(bufSize); // compute DFT of each template plane for( k = 0; k < tcn; k++ ) { int yofs = k*dftsize.height; Mat src = templ; Mat dst(dftTempl, Rect(0, yofs, dftsize.width, dftsize.height)); Mat dst1(dftTempl, Rect(0, yofs, templ.cols, templ.rows)); if( tcn > 1 ) { src = tdepth == maxDepth ? dst1 : Mat(templ.size(), tdepth, &buf[0]); int pairs[] = {k, 0}; mixChannels(&templ, 1, &src, 1, pairs, 1); } if( dst1.data != src.data ) src.convertTo(dst1, dst1.depth()); if( dst.cols > templ.cols ) { Mat part(dst, Range(0, templ.rows), Range(templ.cols, dst.cols)); part = Scalar::all(0); } dft(dst, dst, 0, templ.rows); } int tileCountX = (corr.cols + blocksize.width - 1)/blocksize.width; int tileCountY = (corr.rows + blocksize.height - 1)/blocksize.height; int tileCount = tileCountX * tileCountY; Size wholeSize = img.size(); Point roiofs(0,0); Mat img0 = img; if( !(borderType & BORDER_ISOLATED) ) { img.locateROI(wholeSize, roiofs); img0.adjustROI(roiofs.y, wholeSize.height-img.rows-roiofs.y, roiofs.x, wholeSize.width-img.cols-roiofs.x); } borderType |= BORDER_ISOLATED; // calculate correlation by blocks for( i = 0; i < tileCount; i++ ) { int x = (i%tileCountX)*blocksize.width; int y = (i/tileCountX)*blocksize.height; Size bsz(std::min(blocksize.width, corr.cols - x), std::min(blocksize.height, corr.rows - y)); Size dsz(bsz.width + templ.cols - 1, bsz.height + templ.rows - 1); int x0 = x - anchor.x + roiofs.x, y0 = y - anchor.y + roiofs.y; int x1 = std::max(0, x0), y1 = std::max(0, y0); int x2 = std::min(img0.cols, x0 + dsz.width); int y2 = std::min(img0.rows, y0 + dsz.height); Mat src0(img0, Range(y1, y2), Range(x1, x2)); Mat dst(dftImg, Rect(0, 0, dsz.width, dsz.height)); Mat dst1(dftImg, Rect(x1-x0, y1-y0, x2-x1, y2-y1)); Mat cdst(corr, Rect(x, y, bsz.width, bsz.height)); for( k = 0; k < cn; k++ ) { Mat src = src0; dftImg = Scalar::all(0); if( cn > 1 ) { src = depth == maxDepth ? dst1 : Mat(y2-y1, x2-x1, depth, &buf[0]); int pairs[] = {k, 0}; mixChannels(&src0, 1, &src, 1, pairs, 1); } if( dst1.data != src.data ) src.convertTo(dst1, dst1.depth()); if( x2 - x1 < dsz.width || y2 - y1 < dsz.height ) copyMakeBorder(dst1, dst, y1-y0, dst.rows-dst1.rows-(y1-y0), x1-x0, dst.cols-dst1.cols-(x1-x0), borderType); dft( dftImg, dftImg, 0, dsz.height ); Mat dftTempl1(dftTempl, Rect(0, tcn > 1 ? k*dftsize.height : 0, dftsize.width, dftsize.height)); mulSpectrums(dftImg, dftTempl1, dftImg, 0, true); dft( dftImg, dftImg, DFT_INVERSE + DFT_SCALE, bsz.height ); src = dftImg(Rect(0, 0, bsz.width, bsz.height)); if( ccn > 1 ) { if( cdepth != maxDepth ) { Mat plane(bsz, cdepth, &buf[0]); src.convertTo(plane, cdepth, 1, delta); src = plane; } int pairs[] = {0, k}; mixChannels(&src, 1, &cdst, 1, pairs, 1); } else { if( k == 0 ) src.convertTo(cdst, cdepth, 1, delta); else { if( maxDepth != cdepth ) { Mat plane(bsz, cdepth, &buf[0]); src.convertTo(plane, cdepth); src = plane; } add(src, cdst, cdst); } } } } }
static void* imdecode_( const Mat& buf, int flags, int hdrtype, Mat* mat=0 ) { CV_Assert(buf.data && buf.isContinuous()); IplImage* image = 0; CvMat *matrix = 0; Mat temp, *data = &temp; string filename = tempfile(); bool removeTempFile = false; ImageDecoder decoder = findDecoder(buf); if( decoder.empty() ) return 0; if( !decoder->setSource(buf) ) { FILE* f = fopen( filename.c_str(), "wb" ); if( !f ) return 0; removeTempFile = true; size_t bufSize = buf.cols*buf.rows*buf.elemSize(); fwrite( &buf.data[0], 1, bufSize, f ); fclose(f); decoder->setSource(filename); } if( !decoder->readHeader() ) { if( removeTempFile ) remove(filename.c_str()); return 0; } CvSize size; size.width = decoder->width(); size.height = decoder->height(); int type = decoder->type(); if( flags != -1 ) { if( (flags & CV_LOAD_IMAGE_ANYDEPTH) == 0 ) type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type)); if( (flags & CV_LOAD_IMAGE_COLOR) != 0 || ((flags & CV_LOAD_IMAGE_ANYCOLOR) != 0 && CV_MAT_CN(type) > 1) ) type = CV_MAKETYPE(CV_MAT_DEPTH(type), 3); else type = CV_MAKETYPE(CV_MAT_DEPTH(type), 1); } if( hdrtype == LOAD_CVMAT || hdrtype == LOAD_MAT ) { if( hdrtype == LOAD_CVMAT ) { matrix = cvCreateMat( size.height, size.width, type ); temp = cvarrToMat(matrix); } else { mat->create( size.height, size.width, type ); data = mat; } } else { image = cvCreateImage( size, cvIplDepth(type), CV_MAT_CN(type) ); temp = cvarrToMat(image); } bool code = decoder->readData( *data ); if( removeTempFile ) remove(filename.c_str()); if( !code ) { cvReleaseImage( &image ); cvReleaseMat( &matrix ); if( mat ) mat->release(); return 0; } return hdrtype == LOAD_CVMAT ? (void*)matrix : hdrtype == LOAD_IMAGE ? (void*)image : (void*)mat; }
CV_IMPL double cvThreshold( const void* srcarr, void* dstarr, double thresh, double maxval, int type ) { CvHistogram* hist = 0; CV_FUNCNAME( "cvThreshold" ); __BEGIN__; CvSize roi; int src_step, dst_step; CvMat src_stub, *src = (CvMat*)srcarr; CvMat dst_stub, *dst = (CvMat*)dstarr; CvMat src0, dst0; int coi1 = 0, coi2 = 0; int ithresh, imaxval, cn; bool use_otsu; CV_CALL( src = cvGetMat( src, &src_stub, &coi1 )); CV_CALL( dst = cvGetMat( dst, &dst_stub, &coi2 )); if( coi1 + coi2 ) CV_ERROR( CV_BadCOI, "COI is not supported by the function" ); if( !CV_ARE_CNS_EQ( src, dst ) ) CV_ERROR( CV_StsUnmatchedFormats, "Both arrays must have equal number of channels" ); cn = CV_MAT_CN(src->type); if( cn > 1 ) { src = cvReshape( src, &src0, 1 ); dst = cvReshape( dst, &dst0, 1 ); } use_otsu = (type & ~CV_THRESH_MASK) == CV_THRESH_OTSU; type &= CV_THRESH_MASK; if( use_otsu ) { float _ranges[] = { 0, 256 }; float* ranges = _ranges; int hist_size = 256; void* srcarr0 = src; if( CV_MAT_TYPE(src->type) != CV_8UC1 ) CV_ERROR( CV_StsNotImplemented, "Otsu method can only be used with 8uC1 images" ); CV_CALL( hist = cvCreateHist( 1, &hist_size, CV_HIST_ARRAY, &ranges )); cvCalcArrHist( &srcarr0, hist ); thresh = cvFloor(icvGetThreshVal_Otsu( hist )); } if( !CV_ARE_DEPTHS_EQ( src, dst ) ) { if( CV_MAT_TYPE(dst->type) != CV_8UC1 ) CV_ERROR( CV_StsUnsupportedFormat, "In case of different types destination should be 8uC1" ); if( type != CV_THRESH_BINARY && type != CV_THRESH_BINARY_INV ) CV_ERROR( CV_StsBadArg, "In case of different types only CV_THRESH_BINARY " "and CV_THRESH_BINARY_INV thresholding types are supported" ); if( maxval < 0 ) { CV_CALL( cvSetZero( dst )); } else { CV_CALL( cvCmpS( src, thresh, dst, type == CV_THRESH_BINARY ? CV_CMP_GT : CV_CMP_LE )); if( maxval < 255 ) CV_CALL( cvAndS( dst, cvScalarAll( maxval ), dst )); } EXIT; } if( !CV_ARE_SIZES_EQ( src, dst ) ) CV_ERROR( CV_StsUnmatchedSizes, "" ); roi = cvGetMatSize( src ); if( CV_IS_MAT_CONT( src->type & dst->type )) { roi.width *= roi.height; roi.height = 1; src_step = dst_step = CV_STUB_STEP; } else { src_step = src->step; dst_step = dst->step; } switch( CV_MAT_DEPTH(src->type) ) { case CV_8U: ithresh = cvFloor(thresh); imaxval = cvRound(maxval); if( type == CV_THRESH_TRUNC ) imaxval = ithresh; imaxval = CV_CAST_8U(imaxval); if( ithresh < 0 || ithresh >= 255 ) { if( type == CV_THRESH_BINARY || type == CV_THRESH_BINARY_INV || ((type == CV_THRESH_TRUNC || type == CV_THRESH_TOZERO_INV) && ithresh < 0) || (type == CV_THRESH_TOZERO && ithresh >= 255) ) { int v = type == CV_THRESH_BINARY ? (ithresh >= 255 ? 0 : imaxval) : type == CV_THRESH_BINARY_INV ? (ithresh >= 255 ? imaxval : 0) : type == CV_THRESH_TRUNC ? imaxval : 0; cvSet( dst, cvScalarAll(v) ); EXIT; } else { cvCopy( src, dst ); EXIT; } } if( type == CV_THRESH_BINARY || type == CV_THRESH_BINARY_INV ) { if( icvCompareC_8u_C1R_cv_p && icvAndC_8u_C1R_p ) { IPPI_CALL( icvCompareC_8u_C1R_cv_p( src->data.ptr, src_step, (uchar)ithresh, dst->data.ptr, dst_step, roi, type == CV_THRESH_BINARY ? cvCmpGreater : cvCmpLessEq )); if( imaxval < 255 ) IPPI_CALL( icvAndC_8u_C1R_p( dst->data.ptr, dst_step, (uchar)imaxval, dst->data.ptr, dst_step, roi )); EXIT; } } else if( type == CV_THRESH_TRUNC || type == CV_THRESH_TOZERO_INV ) { if( icvThreshold_GTVal_8u_C1R_p ) { IPPI_CALL( icvThreshold_GTVal_8u_C1R_p( src->data.ptr, src_step, dst->data.ptr, dst_step, roi, (uchar)ithresh, (uchar)(type == CV_THRESH_TRUNC ? ithresh : 0) )); EXIT; } } else { assert( type == CV_THRESH_TOZERO ); if( icvThreshold_LTVal_8u_C1R_p ) { ithresh = cvFloor(thresh+1.); ithresh = CV_CAST_8U(ithresh); IPPI_CALL( icvThreshold_LTVal_8u_C1R_p( src->data.ptr, src_step, dst->data.ptr, dst_step, roi, (uchar)ithresh, 0 )); EXIT; } } icvThresh_8u_C1R( src->data.ptr, src_step, dst->data.ptr, dst_step, roi, (uchar)ithresh, (uchar)imaxval, type ); break; case CV_32F: if( type == CV_THRESH_TRUNC || type == CV_THRESH_TOZERO_INV ) { if( icvThreshold_GTVal_32f_C1R_p ) { IPPI_CALL( icvThreshold_GTVal_32f_C1R_p( src->data.fl, src_step, dst->data.fl, dst_step, roi, (float)thresh, type == CV_THRESH_TRUNC ? (float)thresh : 0 )); EXIT; } } else if( type == CV_THRESH_TOZERO ) { if( icvThreshold_LTVal_32f_C1R_p ) { IPPI_CALL( icvThreshold_LTVal_32f_C1R_p( src->data.fl, src_step, dst->data.fl, dst_step, roi, (float)(thresh*(1 + FLT_EPSILON)), 0 )); EXIT; } } icvThresh_32f_C1R( src->data.fl, src_step, dst->data.fl, dst_step, roi, (float)thresh, (float)maxval, type ); break; default: CV_ERROR( CV_BadDepth, cvUnsupportedFormat ); } __END__; if( hist ) cvReleaseHist( &hist ); return thresh; }
bool PngDecoder::readHeader() { bool result = false; close(); png_structp png_ptr = png_create_read_struct( PNG_LIBPNG_VER_STRING, 0, 0, 0 ); if( png_ptr ) { png_infop info_ptr = png_create_info_struct( png_ptr ); png_infop end_info = png_create_info_struct( png_ptr ); m_png_ptr = png_ptr; m_info_ptr = info_ptr; m_end_info = end_info; m_buf_pos = 0; if( info_ptr && end_info ) { if( setjmp( png_jmpbuf( png_ptr ) ) == 0 ) { if( !m_buf.empty() ) png_set_read_fn(png_ptr, this, (png_rw_ptr)readDataFromBuf ); else { m_f = fopen( m_filename.c_str(), "rb" ); if( m_f ) png_init_io( png_ptr, m_f ); } if( !m_buf.empty() || m_f ) { png_uint_32 width, height; int bit_depth, color_type; png_read_info( png_ptr, info_ptr ); png_get_IHDR( png_ptr, info_ptr, &width, &height, &bit_depth, &color_type, 0, 0, 0 ); m_width = (int)width; m_height = (int)height; m_color_type = color_type; m_bit_depth = bit_depth; if( bit_depth <= 8 || bit_depth == 16 ) { switch(color_type) { case PNG_COLOR_TYPE_RGB: case PNG_COLOR_TYPE_PALETTE: m_type = CV_8UC3; break; case PNG_COLOR_TYPE_RGB_ALPHA: m_type = CV_8UC4; break; default: m_type = CV_8UC1; } if( bit_depth == 16 ) m_type = CV_MAKETYPE(CV_16U, CV_MAT_CN(m_type)); result = true; } } } } } if( !result ) close(); return result; }
/** * Read an image into memory and return the information * * @param[in] filename File to load * @param[in] flags Flags * @param[in] hdrtype { LOAD_CVMAT=0, * LOAD_IMAGE=1, * LOAD_MAT=2 * } * @param[in] mat Reference to C++ Mat object (If LOAD_MAT) * @param[in] scale_denom Scale value * */ static void* imread_( const String& filename, int flags, int hdrtype, Mat* mat=0 ) { IplImage* image = 0; CvMat *matrix = 0; Mat temp, *data = &temp; /// Search for the relevant decoder to handle the imagery ImageDecoder decoder; #ifdef HAVE_GDAL if(flags != IMREAD_UNCHANGED && (flags & IMREAD_LOAD_GDAL) == IMREAD_LOAD_GDAL ){ decoder = GdalDecoder().newDecoder(); }else{ #endif decoder = findDecoder( filename ); #ifdef HAVE_GDAL } #endif /// if no decoder was found, return nothing. if( !decoder ){ return 0; } int scale_denom = 1; if( flags > IMREAD_LOAD_GDAL ) { if( flags & IMREAD_REDUCED_GRAYSCALE_2 ) scale_denom = 2; else if( flags & IMREAD_REDUCED_GRAYSCALE_4 ) scale_denom = 4; else if( flags & IMREAD_REDUCED_GRAYSCALE_8 ) scale_denom = 8; } /// set the scale_denom in the driver decoder->setScale( scale_denom ); /// set the filename in the driver decoder->setSource( filename ); // read the header to make sure it succeeds if( !decoder->readHeader() ) return 0; // established the required input image size CvSize size; size.width = decoder->width(); size.height = decoder->height(); // grab the decoded type int type = decoder->type(); if( (flags & IMREAD_LOAD_GDAL) != IMREAD_LOAD_GDAL && flags != IMREAD_UNCHANGED ) { if( (flags & CV_LOAD_IMAGE_ANYDEPTH) == 0 ) type = CV_MAKETYPE(CV_8U, CV_MAT_CN(type)); if( (flags & CV_LOAD_IMAGE_COLOR) != 0 || ((flags & CV_LOAD_IMAGE_ANYCOLOR) != 0 && CV_MAT_CN(type) > 1) ) type = CV_MAKETYPE(CV_MAT_DEPTH(type), 3); else type = CV_MAKETYPE(CV_MAT_DEPTH(type), 1); } if( hdrtype == LOAD_CVMAT || hdrtype == LOAD_MAT ) { if( hdrtype == LOAD_CVMAT ) { matrix = cvCreateMat( size.height, size.width, type ); temp = cvarrToMat( matrix ); } else { mat->create( size.height, size.width, type ); data = mat; } } else { image = cvCreateImage( size, cvIplDepth(type), CV_MAT_CN(type) ); temp = cvarrToMat( image ); } // read the image data if( !decoder->readData( *data )) { cvReleaseImage( &image ); cvReleaseMat( &matrix ); if( mat ) mat->release(); return 0; } if( decoder->setScale( scale_denom ) > 1 ) // if decoder is JpegDecoder then decoder->setScale always returns 1 { resize( *mat, *mat, Size( size.width / scale_denom, size.height / scale_denom ) ); } return hdrtype == LOAD_CVMAT ? (void*)matrix : hdrtype == LOAD_IMAGE ? (void*)image : (void*)mat; }
CV_IMPL void cvAbsDiff( const void* srcarr1, const void* srcarr2, void* dstarr ) { static CvFuncTable adiff_tab; static int inittab = 0; CV_FUNCNAME( "cvAbsDiff" ); __BEGIN__; int coi1 = 0, coi2 = 0, coi3 = 0; CvMat srcstub1, *src1 = (CvMat*)srcarr1; CvMat srcstub2, *src2 = (CvMat*)srcarr2; CvMat dststub, *dst = (CvMat*)dstarr; int src1_step, src2_step, dst_step; CvSize size; int type; if( !inittab ) { icvInitAbsDiffTable( &adiff_tab ); inittab = 1; } CV_CALL( src1 = cvGetMat( src1, &srcstub1, &coi1 )); CV_CALL( src2 = cvGetMat( src2, &srcstub2, &coi2 )); CV_CALL( dst = cvGetMat( dst, &dststub, &coi3 )); if( coi1 != 0 || coi2 != 0 || coi3 != 0 ) CV_ERROR( CV_BadCOI, "" ); if( !CV_ARE_SIZES_EQ( src1, src2 ) ) CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); size = icvGetMatSize( src1 ); type = CV_MAT_TYPE(src1->type); if( !CV_ARE_SIZES_EQ( src1, dst )) CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes ); if( !CV_ARE_TYPES_EQ( src1, src2 )) CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); if( !CV_ARE_TYPES_EQ( src1, dst )) CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats ); size.width *= CV_MAT_CN( type ); src1_step = src1->step; src2_step = src2->step; dst_step = dst->step; if( CV_IS_MAT_CONT( src1->type & src2->type & dst->type )) { size.width *= size.height; size.height = 1; src1_step = src2_step = dst_step = CV_STUB_STEP; } { CvFunc2D_3A func = (CvFunc2D_3A) (adiff_tab.fn_2d[CV_MAT_DEPTH(type)]); if( !func ) CV_ERROR( CV_StsUnsupportedFormat, "" ); IPPI_CALL( func( src1->data.ptr, src1_step, src2->data.ptr, src2_step, dst->data.ptr, dst_step, size )); } __END__; }
CV_IMPL void cvUndistort2( const CvArr* _src, CvArr* _dst, const CvMat* A, const CvMat* dist_coeffs ) { static int inittab = 0; uchar* buffer = 0; CV_FUNCNAME( "cvUndistort2" ); __BEGIN__; float a[9], k[4]; int coi1 = 0, coi2 = 0; CvMat srcstub, *src = (CvMat*)_src; CvMat dststub, *dst = (CvMat*)_dst; CvMat _a = cvMat( 3, 3, CV_32F, a ), _k; int cn, src_step, dst_step; CvSize size; if( !inittab ) { icvInitLinearCoeffTab(); icvInitCubicCoeffTab(); inittab = 1; } CV_CALL( src = cvGetMat( src, &srcstub, &coi1 )); CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 )); if( coi1 != 0 || coi2 != 0 ) CV_ERROR( CV_BadCOI, "The function does not support COI" ); if( CV_MAT_DEPTH(src->type) != CV_8U ) CV_ERROR( CV_StsUnsupportedFormat, "Only 8-bit images are supported" ); if( src->data.ptr == dst->data.ptr ) CV_ERROR( CV_StsNotImplemented, "In-place undistortion is not implemented" ); if( !CV_ARE_TYPES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedFormats, "" ); if( !CV_ARE_SIZES_EQ( src, dst )) CV_ERROR( CV_StsUnmatchedSizes, "" ); if( !CV_IS_MAT(A) || A->rows != 3 || A->cols != 3 || CV_MAT_TYPE(A->type) != CV_32FC1 && CV_MAT_TYPE(A->type) != CV_64FC1 ) CV_ERROR( CV_StsBadArg, "Intrinsic matrix must be a valid 3x3 floating-point matrix" ); if( !CV_IS_MAT(dist_coeffs) || dist_coeffs->rows != 1 && dist_coeffs->cols != 1 || dist_coeffs->rows*dist_coeffs->cols*CV_MAT_CN(dist_coeffs->type) != 4 || CV_MAT_DEPTH(dist_coeffs->type) != CV_64F && CV_MAT_DEPTH(dist_coeffs->type) != CV_32F ) CV_ERROR( CV_StsBadArg, "Distortion coefficients must be 1x4 or 4x1 floating-point vector" ); cvConvert( A, &_a ); _k = cvMat( dist_coeffs->rows, dist_coeffs->cols, CV_MAKETYPE(CV_32F, CV_MAT_CN(dist_coeffs->type)), k ); cvConvert( dist_coeffs, &_k ); cn = CV_MAT_CN(src->type); size = cvGetMatSize(src); src_step = src->step ? src->step : CV_STUB_STEP; dst_step = dst->step ? dst->step : CV_STUB_STEP; if( fabs((double)k[2]) < 1e-5 && fabs((double)k[3]) < 1e-5 && icvUndistortGetSize_p ) { int buf_size = 0; CvUndistortRadialIPPFunc func = cn == 1 ? (CvUndistortRadialIPPFunc)icvUndistortRadial_8u_C1R_p : (CvUndistortRadialIPPFunc)icvUndistortRadial_8u_C3R_p; if( func && icvUndistortGetSize_p( size, &buf_size ) >= 0 && buf_size > 0 ) { CV_CALL( buffer = (uchar*)cvAlloc( buf_size )); if( func( src->data.ptr, src_step, dst->data.ptr, dst_step, size, a[0], a[4], a[2], a[5], k[0], k[1], buffer ) >= 0 ) EXIT; } } icvUnDistort_8u_CnR( src->data.ptr, src_step, dst->data.ptr, dst_step, size, a, k, cn ); __END__; cvFree( &buffer ); }