예제 #1
0
int BOCV_checkAreSameType(void* src1,void* src2 )
{
	if(CV_IS_IMAGE(src1)){
		if(CV_IS_IMAGE(src2)){
			IplImage* isrc1=(IplImage*) src1;
			IplImage* isrc2=(IplImage*) src2;
			if(isrc1->width != isrc2->width){
                                BKE_report(NULL, 1<<4,"Inputs have different sizes");
                		return 0;
                        }
			if(isrc1->height != isrc2->height){
                                BKE_report(NULL, 1<<4,"Inputs have different sizes");
                                return 0;	
                        }
                        return 1;
		}else{
			return 0;
                        BKE_report(NULL, 1<<4,"Src2 is not an image");
		}
		
	}
        BKE_report(NULL, 1<<4,"Src1 is not an image");
	//Here need code to mat and sequence...
	return 0;
}
예제 #2
0
int BOCV_checkSameNChannels(void* src1, void* src2){
    if(CV_IS_IMAGE(src1) && CV_IS_IMAGE(src2)){
        if(((IplImage*)src1)->nChannels != ((IplImage*)src2)->nChannels ){
                BKE_report(NULL, 1<<4,"Inputs have different num channels");
                return 0;	
        }
        return 1;
    }else{
        return 0;
    }
}
예제 #3
0
MouthContours::~MouthContours() {
    
    if(CV_IS_IMAGE(imgGrey)){
        cvReleaseImage(&imgGrey);
    }
    if(CV_IS_IMAGE(imgTempl)){
        cvReleaseImage(&imgTempl);
    }
    if(CV_IS_STORAGE((storageTeeth))){
        cvReleaseMemStorage( &storageTeeth );
    }
}
예제 #4
0
static CvMat*
icvRetrieveMatrix( void* obj )
{
    CvMat* m = 0;

    CV_FUNCNAME( "icvRetrieveMatrix" );

    __BEGIN__;

    if( CV_IS_MAT(obj) )
        m = (CvMat*)obj;
    else if( CV_IS_IMAGE(obj) )
    {
        IplImage* img = (IplImage*)obj;
        CvMat hdr, *src = cvGetMat( img, &hdr );
        CV_CALL( m = cvCreateMat( src->rows, src->cols, src->type ));
        CV_CALL( cvCopy( src, m ));
        cvReleaseImage( &img );
    }
    else if( obj )
    {
        cvRelease( &obj );
        CV_ERROR( CV_StsUnsupportedFormat, "The object is neither an image, nor a matrix" );
    }

    __END__;

    return m;
}
예제 #5
0
파일: image.cpp 프로젝트: 93sam/opencv
static IplImage*
icvRetrieveImage( void* obj )
{
    IplImage* img = 0;

    if( CV_IS_IMAGE(obj) )
        img = (IplImage*)obj;
    else if( CV_IS_MAT(obj) )
    {
        CvMat* m = (CvMat*)obj;
        img = cvCreateImageHeader( cvSize(m->cols,m->rows),
                        CV_MAT_DEPTH(m->type), CV_MAT_CN(m->type) );
        cvSetData( img, m->data.ptr, m->step );
        img->imageDataOrigin = (char*)m->refcount;
        m->data.ptr = 0; m->step = 0;
        cvReleaseMat( &m );
    }
    else if( obj )
    {
        cvRelease( &obj );
        CV_Error( CV_StsUnsupportedFormat, "The object is neither an image, nor a matrix" );
    }

    return img;
}
예제 #6
0
CV_IMPL CvMat*
cvEncodeImage( const char* ext, const CvArr* arr, const int* _params )
{
    int i = 0;
    if( _params )
    {
        for( ; _params[i] > 0; i += 2 )
            ;
    }
    cv::Mat img = cv::cvarrToMat(arr);
    if( CV_IS_IMAGE(arr) && ((const IplImage*)arr)->origin == IPL_ORIGIN_BL )
    {
        cv::Mat temp;
        cv::flip(img, temp, 0);
        img = temp;
    }
    cv::vector<uchar> buf;

    bool code = cv::imencode(ext, img, buf,
                             i > 0 ? std::vector<int>(_params, _params+i) : std::vector<int>() );
    if( !code )
        return 0;
    CvMat* _buf = cvCreateMat(1, (int)buf.size(), CV_8U);
    memcpy( _buf->data.ptr, &buf[0], buf.size() );

    return _buf;
}
예제 #7
0
bool ImageViewer::deleteImage()
{
    if (CV_IS_IMAGE(_ocvImage))
    {
        cvReleaseImage(&_ocvImage);
        _ocvImage = NULL;
    }
    if (CV_IS_IMAGE(labelImage))
    {
        cvReleaseImage(&labelImage);
        labelImage = NULL;
    }
    if(CV_IS_IMAGE(_srcOcvImage))
    {
        cvReleaseImage(&_srcOcvImage);
        _srcOcvImage = NULL;
    }

    DELETEPTR( _displayImage  );
    DELETEPTR( _labelMapImage );

    DELETEPTR( _result_display );
    DELETEPTR( _result_save    );

    _displayImage = new QImage(500, 500, QImage::Format_RGB32);
    _displayImage->fill(qRgb(128, 128, 128));

    thickness       = 5;
    bPaintable      = false;
    lastPos         = QPoint(-1, -1);
    updateObjectCount(0);
    isEraser = false;
    m_method = -1;
    _orgWidth = 0;
    _orgHeight = 0;

    DELETEPTR( brushInterface  );

    _polygonPointList.clear();
    _polygonPointList_cv.clear();

    update();

    return true;
}
예제 #8
0
static Mat iplImageToMat(const IplImage* img, bool copyData)
{
    Mat m;

    if( !img )
        return m;

    m.dims = 2;
    CV_DbgAssert(CV_IS_IMAGE(img) && img->imageData != 0);

    int imgdepth = IPL2CV_DEPTH(img->depth);
    size_t esz;
    m.step[0] = img->widthStep;

    if(!img->roi)
    {
        CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL);
        m.flags = Mat::MAGIC_VAL + CV_MAKETYPE(imgdepth, img->nChannels);
        m.rows = img->height;
        m.cols = img->width;
        m.datastart = m.data = (uchar*)img->imageData;
        esz = CV_ELEM_SIZE(m.flags);
    }
    else
    {
        CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL || img->roi->coi != 0);
        bool selectedPlane = img->roi->coi && img->dataOrder == IPL_DATA_ORDER_PLANE;
        m.flags = Mat::MAGIC_VAL + CV_MAKETYPE(imgdepth, selectedPlane ? 1 : img->nChannels);
        m.rows = img->roi->height;
        m.cols = img->roi->width;
        esz = CV_ELEM_SIZE(m.flags);
        m.datastart = m.data = (uchar*)img->imageData +
            (selectedPlane ? (img->roi->coi - 1)*m.step*img->height : 0) +
            img->roi->yOffset*m.step[0] + img->roi->xOffset*esz;
    }
    m.datalimit = m.datastart + m.step.p[0]*m.rows;
    m.dataend = m.datastart + m.step.p[0]*(m.rows-1) + esz*m.cols;
    m.step[1] = esz;
    m.updateContinuityFlag();

    if( copyData )
    {
        Mat m2 = m;
        m.release();
        if( !img->roi || !img->roi->coi ||
            img->dataOrder == IPL_DATA_ORDER_PLANE)
            m2.copyTo(m);
        else
        {
            int ch[] = {img->roi->coi - 1, 0};
            m.create(m2.rows, m2.cols, m2.type());
            mixChannels(&m2, 1, &m, 1, ch, 1);
        }
    }

    return m;
}
예제 #9
0
CvArr* BOCV_CreateArrFrom(void* src)
{
	if(CV_IS_IMAGE(src)){
		IplImage* isrc=(IplImage*) src;
		return (CvArr*)cvCreateImage(cvSize(isrc->width,isrc->height), isrc->depth, isrc->nChannels);		 
	}
	//Here need code to mat and sequence...
	return NULL;
}
예제 #10
0
파일: deriv.cpp 프로젝트: HanaLeeHn/opencv
CV_IMPL void
cvSobel( const void* srcarr, void* dstarr, int dx, int dy, int aperture_size )
{
    cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);

    CV_Assert( src.size() == dst.size() && src.channels() == dst.channels() );

    cv::Sobel( src, dst, dst.depth(), dx, dy, aperture_size, 1, 0, cv::BORDER_REPLICATE );
    if( CV_IS_IMAGE(srcarr) && ((IplImage*)srcarr)->origin && dy % 2 != 0 )
        dst *= -1;
}
예제 #11
0
void insertImageCOI(InputArray _ch, CvArr* arr, int coi)
{
    Mat ch = _ch.getMat(), mat = cvarrToMat(arr, false, true, 1);
    if(coi < 0)
    {
        CV_Assert( CV_IS_IMAGE(arr) );
        coi = cvGetImageCOI((const IplImage*)arr)-1;
    }
    CV_Assert(ch.size == mat.size && ch.depth() == mat.depth() && 0 <= coi && coi < mat.channels());
    int _pairs[] = { 0, coi };
    mixChannels( &ch, 1, &mat, 1, _pairs, 1 );
}
예제 #12
0
int BOCV_checkMask(void* src1, void* mask){
    if(!BOCV_checkAreSameType(src1, mask)){
        return 0; 
    }
    if(CV_IS_IMAGE(mask)){
            if(((IplImage*)mask)->nChannels!=1){
                BKE_report(NULL, 1<<4,"Mask must be 1 channel, please convert to gray");
                return 0;
            }
    }
    return 1;
}
예제 #13
0
파일: Blob.cpp 프로젝트: ashokzg/billiards
/**
- FUNCIÓ: Mean
- FUNCIONALITAT: Get blob mean color in input image
- PARÀMETRES:
	- image: image from gray color are extracted
- RESULTAT:
	- 
- RESTRICCIONS:
	- 
- AUTOR: rborras
- DATA DE CREACIÓ: 2008/05/06
- MODIFICACIÓ: Data. Autor. Descripció.
*/
double CBlob::Mean( IplImage *image )
{
	// it is calculated?
/*	if( m_meanGray != -1 )
	{
		return m_meanGray;
	}
*/	
	// Create a mask with same size as blob bounding box
	IplImage *mask;
	CvScalar mean, std;
	CvPoint offset;

	GetBoundingBox();
	
	if (m_boundingBox.height == 0 ||m_boundingBox.width == 0 || !CV_IS_IMAGE( image ))
	{
		m_meanGray = 0;
		return m_meanGray;
	}

	// apply ROI and mask to input image to compute mean gray and standard deviation
	mask = cvCreateImage( cvSize(m_boundingBox.width, m_boundingBox.height), IPL_DEPTH_8U, 1);
	cvSetZero(mask);

	offset.x = -m_boundingBox.x;
	offset.y = -m_boundingBox.y;

	// draw contours on mask
	cvDrawContours( mask, m_externalContour.GetContourPoints(), CV_RGB(255,255,255), CV_RGB(255,255,255),0, CV_FILLED, 8,
					offset );

	// draw internal contours
	t_contourList::iterator it = m_internalContours.begin();
	while(it != m_internalContours.end() )
	{
		cvDrawContours( mask, (*it).GetContourPoints(), CV_RGB(0,0,0), CV_RGB(0,0,0),0, CV_FILLED, 8,
					offset );
		it++;
	}

	cvSetImageROI( image, m_boundingBox );
	cvAvgSdv( image, &mean, &std, mask );
	
	m_meanGray = mean.val[0];
	m_stdDevGray = std.val[0];

	cvReleaseImage( &mask );
	cvResetImageROI( image );

	return m_meanGray;
}
예제 #14
0
CV_IMPL int
cvSaveImage( const char* filename, const CvArr* arr, const int* _params )
{
    int i = 0;
    if( _params )
    {
        for( ; _params[i] > 0; i += 2 )
            ;
    }
    return cv::imwrite_(filename, cv::cvarrToMat(arr),
                        i > 0 ? cv::vector<int>(_params, _params+i) : cv::vector<int>(),
                        CV_IS_IMAGE(arr) && ((const IplImage*)arr)->origin == IPL_ORIGIN_BL );
}
예제 #15
0
void extractImageCOI(const CvArr* arr, OutputArray _ch, int coi)
{
    Mat mat = cvarrToMat(arr, false, true, 1);
    _ch.create(mat.dims, mat.size, mat.depth());
    Mat ch = _ch.getMat();
    if(coi < 0)
    {
        CV_Assert( CV_IS_IMAGE(arr) );
        coi = cvGetImageCOI((const IplImage*)arr)-1;
    }
    CV_Assert(0 <= coi && coi < mat.channels());
    int _pairs[] = { coi, 0 };
    mixChannels( &mat, 1, &ch, 1, _pairs, 1 );
}
예제 #16
0
// get essential information about image ROI or CvMat data
CV_IMPL void
cvGetRawData( const CvArr* arr, uchar** data, int* step, CvSize* roi_size )
{
    CV_FUNCNAME( "cvGetRawData" );

    __BEGIN__;

    if( CV_IS_ARR( arr ))
    {
        CvMat *mat = (CvMat*)arr;

        if( step )
            *step = mat->step;

        if( data )
            *data = mat->data.ptr;

        if( roi_size )
            *roi_size = icvGetMatSize( mat );
    }
    else if( CV_IS_IMAGE( arr ))
    {
        IplImage* img = (IplImage*)arr;

        if( step )
            *step = img->widthStep;

        if( data )
            CV_CALL( *data = cvGetPtrAt( img, 0, 0 ));

        if( roi_size )
        {
            if( img->roi )
            {
                *roi_size = cvSize( img->roi->width, img->roi->height );
            }
            else
            {
                *roi_size = cvSize( img->width, img->height );
            }
        }
    }
    else
    {
        CV_ERROR( CV_StsBadArg, "" );
    }

    __END__;
}
예제 #17
0
CV_IMPL CvBGStatModel*
cvCreateGaussianBGModel( IplImage* first_frame, CvGaussBGStatModelParams* parameters )
{
    CvGaussBGStatModelParams params;

    CV_Assert( CV_IS_IMAGE(first_frame) );

    //init parameters
    if( parameters == NULL )
    {                        // These constants are defined in cvaux/include/cvaux.h
        params.win_size      = CV_BGFG_MOG_WINDOW_SIZE;
        params.bg_threshold  = CV_BGFG_MOG_BACKGROUND_THRESHOLD;

        params.std_threshold = CV_BGFG_MOG_STD_THRESHOLD;
        params.weight_init   = CV_BGFG_MOG_WEIGHT_INIT;

        params.variance_init = CV_BGFG_MOG_SIGMA_INIT*CV_BGFG_MOG_SIGMA_INIT;
        params.minArea       = CV_BGFG_MOG_MINAREA;
        params.n_gauss       = CV_BGFG_MOG_NGAUSSIANS;
    }
    else
        params = *parameters;

    CvGaussBGModel* bg_model = new CvGaussBGModel;
    memset( bg_model, 0, sizeof(*bg_model) );
    bg_model->type = CV_BG_MODEL_MOG;
    bg_model->release = (CvReleaseBGStatModel)icvReleaseGaussianBGModel;
    bg_model->update = (CvUpdateBGStatModel)icvUpdateGaussianBGModel;

    bg_model->params = params;

    cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG(params.win_size, params.n_gauss,
                                                                              params.bg_threshold);
    cv::Ptr<cv::BackgroundSubtractor>* pmog = new cv::Ptr<cv::BackgroundSubtractor>;
    *pmog = mog;
    bg_model->mog = pmog;

    CvSize sz = cvGetSize(first_frame);
    bg_model->background = cvCreateImage(sz, IPL_DEPTH_8U, first_frame->nChannels);
    bg_model->foreground = cvCreateImage(sz, IPL_DEPTH_8U, 1);

    bg_model->countFrames = 0;

    icvUpdateGaussianBGModel( first_frame, bg_model, 1 );

    return (CvBGStatModel*)bg_model;
}
예제 #18
0
CV_IMPL CvBGStatModel*
cvCreateGaussianBGModel( IplImage* first_frame, CvGaussBGStatModelParams* parameters )
{
    CvGaussBGStatModelParams params;
    
    CV_Assert( CV_IS_IMAGE(first_frame) );
    
    //init parameters
    if( parameters == NULL )
    {                        /* These constants are defined in cvaux/include/cvaux.h: */
        params.win_size      = CV_BGFG_MOG_WINDOW_SIZE;
        params.bg_threshold  = CV_BGFG_MOG_BACKGROUND_THRESHOLD;

        params.std_threshold = CV_BGFG_MOG_STD_THRESHOLD;
        params.weight_init   = CV_BGFG_MOG_WEIGHT_INIT;

        params.variance_init = CV_BGFG_MOG_SIGMA_INIT*CV_BGFG_MOG_SIGMA_INIT;
        params.minArea       = CV_BGFG_MOG_MINAREA;
        params.n_gauss       = CV_BGFG_MOG_NGAUSSIANS;
    }
    else
        params = *parameters;
    
    CvGaussBGModel* bg_model = new CvGaussBGModel;
    memset( bg_model, 0, sizeof(*bg_model) );
    bg_model->type = CV_BG_MODEL_MOG;
    bg_model->release = (CvReleaseBGStatModel)icvReleaseGaussianBGModel;
    bg_model->update = (CvUpdateBGStatModel)icvUpdateGaussianBGModel;
    
    bg_model->params = params;
    
    //prepare storages
    bg_model->g_point = (CvGaussBGPoint*)new cv::Mat();
    
    bg_model->background = cvCreateImage(cvSize(first_frame->width,
        first_frame->height), IPL_DEPTH_8U, first_frame->nChannels);
    bg_model->foreground = cvCreateImage(cvSize(first_frame->width,
        first_frame->height), IPL_DEPTH_8U, 1);
    
    bg_model->storage = cvCreateMemStorage();
    
    bg_model->countFrames = 0;
    
    icvUpdateGaussianBGModel( first_frame, bg_model, 1 );
    
    return (CvBGStatModel*)bg_model;
}
예제 #19
0
static
CvScalar icvSumIplImage(const IplImage *img)
{
	assert(CV_IS_IMAGE(img));
	
    CvScalar sum = {{0,0,0,0}};
	
	if(img->nChannels == 1)
	{
		switch(img->depth)
		{
		case IPL_DEPTH_8U : ICV_SUM_C1(img, sum, uchar); break;
		case IPL_DEPTH_8S : ICV_SUM_C1(img, sum, char); break;
		case IPL_DEPTH_16U: ICV_SUM_C1(img, sum, unsigned short); break;
		case IPL_DEPTH_16S: ICV_SUM_C1(img, sum, short); break;
		case IPL_DEPTH_32S: ICV_SUM_C1(img, sum, int); break;
		
		case IPL_DEPTH_32F: ICV_SUM_C1F(img, sum, float); break;
		case IPL_DEPTH_64F: ICV_SUM_C1F(img, sum, double); break;
		
		default: break;
		}
	}
	else if(img->nChannels == 3)
	{
		switch(img->depth)
		{
		case IPL_DEPTH_8U : ICV_SUM_C3(img, sum, uchar); break;
		case IPL_DEPTH_8S : ICV_SUM_C3(img, sum, char); break;
		case IPL_DEPTH_16U: ICV_SUM_C3(img, sum, unsigned short); break;
		case IPL_DEPTH_16S: ICV_SUM_C3(img, sum, short); break;
		case IPL_DEPTH_32S: ICV_SUM_C3(img, sum, int); break;
		
		case IPL_DEPTH_32F: ICV_SUM_C3F(img, sum, float); break;
		case IPL_DEPTH_64F: ICV_SUM_C3F(img, sum, double); break;
		
		default: break;
		}
	}
	return sum;
}
예제 #20
0
Mat cvarrToMat(const CvArr* arr, bool copyData,
               bool /*allowND*/, int coiMode, AutoBuffer<double>* abuf )
{
    if( !arr )
        return Mat();
    if( CV_IS_MAT_HDR_Z(arr) )
        return cvMatToMat((const CvMat*)arr, copyData);
    if( CV_IS_MATND(arr) )
        return cvMatNDToMat((const CvMatND*)arr, copyData );
    if( CV_IS_IMAGE(arr) )
    {
        const IplImage* iplimg = (const IplImage*)arr;
        if( coiMode == 0 && iplimg->roi && iplimg->roi->coi > 0 )
            CV_Error(CV_BadCOI, "COI is not supported by the function");
        return iplImageToMat(iplimg, copyData);
    }
    if( CV_IS_SEQ(arr) )
    {
        CvSeq* seq = (CvSeq*)arr;
        int total = seq->total, type = CV_MAT_TYPE(seq->flags), esz = seq->elem_size;
        if( total == 0 )
            return Mat();
        CV_Assert(total > 0 && CV_ELEM_SIZE(seq->flags) == esz);
        if(!copyData && seq->first->next == seq->first)
            return Mat(total, 1, type, seq->first->data);
        if( abuf )
        {
            abuf->allocate(((size_t)total*esz + sizeof(double)-1)/sizeof(double));
            double* bufdata = abuf->data();
            cvCvtSeqToArray(seq, bufdata, CV_WHOLE_SEQ);
            return Mat(total, 1, type, bufdata);
        }

        Mat buf(total, 1, type);
        cvCvtSeqToArray(seq, buf.ptr(), CV_WHOLE_SEQ);
        return buf;
    }
    CV_Error(CV_StsBadArg, "Unknown array type");
}
예제 #21
0
//找出图像surMark中的最大轮廓
CvSeq* GetMaxContour(IplImage *surMark, CvMemStorage* storage)
{
	if (!CV_IS_IMAGE(surMark) || surMark->nChannels!=1 || surMark->depth !=IPL_DEPTH_8U)
	{
		return NULL;
	}
	CvSeq* contour = 0;
	CvSeq* maxcontour=0;
	double dMaxArea=0;
	IplImage *tmpImg=cvCloneImage(surMark);//cvFindContours会改变图像的内容
	cvFindContours( tmpImg, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
	cvReleaseImage(&tmpImg);
	for( ; contour != 0; contour = contour->h_next )
	{
		CvRect rc=((CvContour*)contour)->rect;
		if (rc.width*rc.height>dMaxArea)
		{
			dMaxArea=rc.width*rc.height;
			maxcontour=contour;
		}
	}
	return maxcontour;
}
예제 #22
0
파일: image.cpp 프로젝트: 93sam/opencv
static CvMat*
icvRetrieveMatrix( void* obj )
{
    CvMat* m = 0;

    if( CV_IS_MAT(obj) )
        m = (CvMat*)obj;
    else if( CV_IS_IMAGE(obj) )
    {
        IplImage* img = (IplImage*)obj;
        CvMat hdr, *src = cvGetMat( img, &hdr );
        m = cvCreateMat( src->rows, src->cols, src->type );
        cvCopy( src, m );
        cvReleaseImage( &img );
    }
    else if( obj )
    {
        cvRelease( &obj );
        CV_Error( CV_StsUnsupportedFormat, "The object is neither an image, nor a matrix" );
    }

    return m;
}
예제 #23
0
CV_IMPL
int cvSaveImage( const char* filename, const CvArr* arr )
{
	if(!filename || !filename[0] || !arr) {
		return 0;
	}

    if(!CV_IS_IMAGE(arr)) {
		return 0;
	}
	IplImage* img = (IplImage*)arr;
	std::string data;

	if(img->depth != IPL_DEPTH_8U) {
		return 0;
	}

	std::string lowerName = icvLowerString(filename);
	if(icvHasSuffixString(lowerName, ".jpg") || icvHasSuffixString(lowerName, ".jpeg")) {
		cvConvertImage(img, img, CV_CVTIMG_SWAP_RB);
		if(!icvEncodeJpg(&data,
			img->imageData, img->imageSize,
			img->width, img->height, img->nChannels,
			90, img->widthStep
		)) {
			cvConvertImage(img, img, CV_CVTIMG_SWAP_RB);
			return 0;
		}
		cvConvertImage(img, img, CV_CVTIMG_SWAP_RB);
	}

	if(!icvSaveFileData(filename, data.data(), data.size())) {
		return 0;
	}

	return 1;
}
예제 #24
0
CV_IMPL CvScalar
cvSum( const CvArr* arr )
{
    CvScalar sum = {{0,0,0,0}};

    CV_FUNCNAME("cvSum");
	assert(false);

    __BEGIN__;

    //int type, coi = 0;
    //int mat_step;
    //CvSize size;
    //CvMat stub, ;
    CvMat *mat = (CvMat*)arr;
	
	if(CV_IS_IMAGE(arr))
	{
		sum = icvSumIplImage((const IplImage*)arr);
		EXIT;
	}
	else if(CV_IS_MAT(mat))
	{
        if( CV_IS_MATND(mat) )
        {
            CV_ERROR( CV_StsBadArg, "Only mat are supported here" );
            EXIT;
        }
		
		sum = icvSumMat(mat);
		EXIT;
	}

    __END__;

    return  sum;
}
예제 #25
0
/**
- FUNCIÓ: ComponentLabeling
- FUNCIONALITAT: Calcula els components binaris (blobs) d'una imatge amb connectivitat a 8
- PARÀMETRES:
	- inputImage: image to segment (pixel values different than blobColor are treated as background)
	- maskImage: if not NULL, all the pixels equal to 0 in mask are skipped in input image
	- backgroundColor: color of background (ignored pixels)
	- blobs: blob vector destination
- RESULTAT:
	- 
- RESTRICCIONS:
	- 
- AUTOR: rborras
- DATA DE CREACIÓ: 2008/04/21
- MODIFICACIÓ: Data. Autor. Descripció.
- NOTA: Algorithm based on "A linear-time component labeling algorithm using contour tracing technique", 
		F.Chang et al
*/
bool ComponentLabeling(	IplImage* inputImage,
						IplImage* maskImage,
						unsigned char backgroundColor,
						Blob_vector &blobs )
{
	int i,j;
	// row major vector with visited points 
	bool *visitedPoints, *pVisitedPoints, internalContour, externalContour;
	unsigned char *pInputImage, *pMask, *pAboveInputImage, *pBelowInputImage,
				  *pAboveMask, *pBelowMask;
	int imageWidth, imageHeight, currentLabel, contourLabel;
	// row major vector with labelled image 
	t_labelType *labelledImage, *pLabels;
	//! current blob pointer
	CBlob *currentBlob;
	CvSize imageSizes;
	CvPoint currentPoint;

	// verify input image
	if( !CV_IS_IMAGE( inputImage ) )
		return false;

	// verify that input image and mask image has same size
	if( maskImage )
	{
		if( !CV_IS_IMAGE(maskImage) || 
			maskImage->width != inputImage->width || 
			maskImage->height != inputImage->height )
		return false;
	}
	else
	{
		pMask = NULL;
		pAboveMask = NULL;
		pBelowMask = NULL;
	}

	imageSizes = cvSize(inputImage->width,inputImage->height);
	
	imageWidth = inputImage->width;
	imageHeight = inputImage->height;

	// create auxiliary buffers
	labelledImage = (t_labelType*) malloc( inputImage->width * inputImage->height * sizeof(t_labelType) );
	visitedPoints = (bool*) malloc( inputImage->width * inputImage->height * sizeof(bool) );

	// initialize it to 0
	memset(labelledImage, 0, inputImage->width * inputImage->height * sizeof(t_labelType) ) ;
	memset(visitedPoints, false, inputImage->width * inputImage->height * sizeof(bool) ) ;

	// initialize pointers and label counter
	pLabels = labelledImage;
	pVisitedPoints = visitedPoints;
	currentLabel = 1;

	for (j = 0; j < imageHeight; j++ )
	{
		// don't verify if we area on first or last row, it will verified on pointer access
		pAboveInputImage = (unsigned char*) inputImage->imageData + (j-1) * inputImage->widthStep;
		pBelowInputImage = (unsigned char*) inputImage->imageData + (j+1) * inputImage->widthStep;
	
		pInputImage = (unsigned char*) inputImage->imageData + j * inputImage->widthStep;

		if( maskImage )
		{
			pMask = (unsigned char*) maskImage->imageData + j * maskImage->widthStep;
			// don't verify if we area on first or last row, it will verified on pointer access
			pAboveMask = (unsigned char*) maskImage->imageData + (j-1) * maskImage->widthStep;
			pBelowMask = (unsigned char*) maskImage->imageData + (j+1) * maskImage->widthStep;

		}
		
		for (i = 0; i < imageWidth; i++, pInputImage++, pMask++, pAboveInputImage++, pBelowInputImage++,
										 pAboveMask++, pBelowMask++ )
		{
			// ignore background pixels or 0 pixels in mask
			if ( (*pInputImage == backgroundColor) || (maskImage && *pMask == 0 ))
			{
				pLabels++;
				pVisitedPoints++;
				continue;
			}
			
			// new external contour: current label == 0 and above pixel is background
			if( j > 0 )
			{
				externalContour = ((*pAboveInputImage == backgroundColor) || 
								  (maskImage && *pAboveMask == 0)) && 
								  (*pLabels == 0);
			}
			else
				externalContour = (*pLabels == 0);

			// new internal contour: below pixel is background and not visited
			if( !externalContour && j < imageHeight - 1 )
			{
				internalContour = *pBelowInputImage == backgroundColor &&
								  !GET_BELOW_VISITEDPIXEL( pVisitedPoints, imageWidth);
			}
			else
			{
				internalContour = false;
			}
			
			
			if( externalContour )
			{
				currentPoint = cvPoint(i,j);
				// assign label to labelled image
				*pLabels = currentLabel;
				
				// create new blob
				currentBlob = new CBlob(currentLabel, currentPoint, imageSizes );

				// contour tracing with currentLabel
				contourTracing( inputImage, maskImage, currentPoint, 
								labelledImage, visitedPoints, 
								currentLabel, false, backgroundColor, currentBlob->GetExternalContour() );

				// add new created blob
				blobs.push_back(currentBlob);

				currentLabel++;
			}
			else 
			{
				if( internalContour )
				{
					currentPoint = cvPoint(i,j);

					if( *pLabels == 0 )
					{
						// take left neightbour value as current
						if( i > 0 )
							contourLabel = *(pLabels - 1);
					}
					else
					{
						contourLabel = *pLabels;
					}

					if(contourLabel>0)
					{
						currentBlob = blobs[contourLabel-1];
						CBlobContour newContour(currentPoint, currentBlob->GetStorage());
						

						// contour tracing with contourLabel
						contourTracing( inputImage, maskImage, currentPoint, labelledImage, visitedPoints,
										contourLabel, true, backgroundColor, &newContour ); 

						currentBlob->AddInternalContour( newContour );
					}
				}
				// neither internal nor external contour
				else
				{
					// take left neightbour value as current if it is not labelled
					if( i > 0 && *pLabels == 0 )
						*pLabels = *(pLabels - 1);
				}

			}
			
			pLabels++;
			pVisitedPoints++;

		}
	}


	// free auxiliary buffers
	free( labelledImage );
	free( visitedPoints );

	return true;
}
예제 #26
0
bool FindPigBlobs(IplImage *srcImg, IplImage *maskImg,void* userdata)
{
	assert(CV_IS_IMAGE(srcImg) && CV_IS_IMAGE(maskImg) && srcImg->nChannels==3 && maskImg->nChannels==1 
		&& srcImg->width==maskImg->width && srcImg->height==maskImg->height && userdata);

	findpigblobs_userdata* param = (findpigblobs_userdata*)userdata;
	CvGaussBGModel *bg_model=param->bg_model;
	int *table=param->LUPTable;
	int rows=param->rows;
	int cols=param->cols;
	CvMemStorage *storage=param->storage;
	int& nModelFrames=param->modelframes;
	bool isRemoveShadow=param->isRemoveShadow;
	nModelFrames=nModelFrames< 0 ? 0 : nModelFrames;
	IplImage*mask=cvCreateImage(cvGetSize(srcImg),IPL_DEPTH_8U,1);
	IplImage *tmpImg = cvCreateImage(cvSize(srcImg->width, srcImg->height), srcImg->depth, srcImg->nChannels);
	IplImage *imgs[4];
	for(int i=0; i<4; ++i)
	{
		imgs[i]= cvCreateImage(cvSize(srcImg->width, srcImg->height), srcImg->depth, 1);
	}
	cvZero(maskImg);
	cvCvtColor(srcImg,tmpImg,CV_BGR2YCrCb);
	cvSplit(tmpImg,imgs[0], imgs[1], imgs[2], 0);
	cvCvtColor(srcImg,tmpImg,CV_BGR2HSV);
	cvSplit(tmpImg, 0, imgs[3], 0, 0);

	cvLookUpTable(table,rows,cols,5,imgs,mask);
	cvErode(mask,mask,NULL,1);
	cvDilate(mask,mask,0,4);
	cvClearMemStorage(storage);
	CvSeq *contour=GetMaxContour(mask,storage);
	if (!contour){
		return false;
	}
	cvDrawContours(maskImg,contour,cvScalarAll(255),cvScalarAll(255),-1,CV_FILLED, 8 );

	CvRect maxcontourRc=((CvContour*)contour)->rect;
	cvUpdateBGStatModel( srcImg,(CvBGStatModel *)bg_model, -1.0 );
	if (isRemoveShadow){
		bool b=GetAvgBackgroudImg(srcImg,tmpImg,0.1);//平均背景法生成背景,tmpImg为背景
		assert(b);
	}

	if (nModelFrames>20){//20帧之后才产生背景
		nModelFrames=21;
		if (isRemoveShadow){
			bool b=ShadowDetect(srcImg,tmpImg,bg_model->foreground,0.5,1,20,20,mask);//检测阴影
			assert(b);
			cvSub(bg_model->foreground,mask,mask);//得到去除阴影后的二值图
		}else{
			cvCopy(bg_model->foreground,mask);
		}
		cvErode(mask,mask);
		cvDilate(mask,mask,0,5);
		cvClearMemStorage(storage);
		cvFindContours( mask, storage, &contour, sizeof(CvContour), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
		for( ; contour != 0; contour = contour->h_next ){
			CvRect rc=((CvContour*)contour)->rect;
			if(!CommonArea(rc,maxcontourRc,0.3) && rc.width*rc.height>mask->imageSize*0.008
				&& rc.x+rc.width>40 && rc.y+rc.height>40)//删除位于保温室的误检测
			{
				cvDrawContours(maskImg,contour,cvScalarAll(255),cvScalarAll(255),-1,CV_FILLED, 8 );
			}
		}
	}else{
		nModelFrames++;
	}

	cvReleaseImage(&mask);
	cvReleaseImage(&tmpImg);
	for(int i=0; i<4; ++i)
	{
		cvReleaseImage(&imgs[i]);
	}
	return true; 
}
예제 #27
0
// Function cvCreateFGDStatModel initializes foreground detection process
// parameters:
//      first_frame - frame from video sequence
//      parameters  - (optional) if NULL default parameters of the algorithm will be used
//      p_model     - pointer to CvFGDStatModel structure
CV_IMPL CvBGStatModel*
cvCreateFGDStatModel( IplImage* first_frame, CvFGDStatModelParams* parameters )
{
    CvFGDStatModel* p_model = 0;
    
    CV_FUNCNAME( "cvCreateFGDStatModel" );

    __BEGIN__;
    
    int i, j, k, pixel_count, buf_size;
    CvFGDStatModelParams params;

    if( !CV_IS_IMAGE(first_frame) )
        CV_ERROR( CV_StsBadArg, "Invalid or NULL first_frame parameter" );

    if (first_frame->nChannels != 3)
        CV_ERROR( CV_StsBadArg, "first_frame must have 3 color channels" );

    // Initialize parameters:
    if( parameters == NULL )
    {
        params.Lc      = CV_BGFG_FGD_LC;
        params.N1c     = CV_BGFG_FGD_N1C;
        params.N2c     = CV_BGFG_FGD_N2C;

        params.Lcc     = CV_BGFG_FGD_LCC;
        params.N1cc    = CV_BGFG_FGD_N1CC;
        params.N2cc    = CV_BGFG_FGD_N2CC;

        params.delta   = CV_BGFG_FGD_DELTA;

        params.alpha1  = CV_BGFG_FGD_ALPHA_1;
        params.alpha2  = CV_BGFG_FGD_ALPHA_2;
        params.alpha3  = CV_BGFG_FGD_ALPHA_3;

        params.T       = CV_BGFG_FGD_T;
        params.minArea = CV_BGFG_FGD_MINAREA;

        params.is_obj_without_holes = 1;
        params.perform_morphing     = 1;
    }
    else
    {
        params = *parameters;
    }

    CV_CALL( p_model = (CvFGDStatModel*)cvAlloc( sizeof(*p_model) ));
    memset( p_model, 0, sizeof(*p_model) );
    p_model->type = CV_BG_MODEL_FGD;
    p_model->release = (CvReleaseBGStatModel)icvReleaseFGDStatModel;
    p_model->update = (CvUpdateBGStatModel)icvUpdateFGDStatModel;;
    p_model->params = params;

    // Initialize storage pools:
    pixel_count = first_frame->width * first_frame->height;
    
    buf_size = pixel_count*sizeof(p_model->pixel_stat[0]);
    CV_CALL( p_model->pixel_stat = (CvBGPixelStat*)cvAlloc(buf_size) );
    memset( p_model->pixel_stat, 0, buf_size );
    
    buf_size = pixel_count*params.N2c*sizeof(p_model->pixel_stat[0].ctable[0]);
    CV_CALL( p_model->pixel_stat[0].ctable = (CvBGPixelCStatTable*)cvAlloc(buf_size) );
    memset( p_model->pixel_stat[0].ctable, 0, buf_size );

    buf_size = pixel_count*params.N2cc*sizeof(p_model->pixel_stat[0].cctable[0]);
    CV_CALL( p_model->pixel_stat[0].cctable = (CvBGPixelCCStatTable*)cvAlloc(buf_size) );
    memset( p_model->pixel_stat[0].cctable, 0, buf_size );

    for(     i = 0, k = 0; i < first_frame->height; i++ ) {
        for( j = 0;        j < first_frame->width;  j++, k++ )
        {
            p_model->pixel_stat[k].ctable = p_model->pixel_stat[0].ctable + k*params.N2c;
            p_model->pixel_stat[k].cctable = p_model->pixel_stat[0].cctable + k*params.N2cc;
        }
    }

    // Init temporary images:
    CV_CALL( p_model->Ftd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
    CV_CALL( p_model->Fbd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
    CV_CALL( p_model->foreground = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));

    CV_CALL( p_model->background = cvCloneImage(first_frame));
    CV_CALL( p_model->prev_frame = cvCloneImage(first_frame));
    CV_CALL( p_model->storage = cvCreateMemStorage());

    __END__;

    if( cvGetErrStatus() < 0 )
    {
        CvBGStatModel* base_ptr = (CvBGStatModel*)p_model;

        if( p_model && p_model->release )
            p_model->release( &base_ptr );
        else
            cvFree( &p_model );
        p_model = 0;
    }

    return (CvBGStatModel*)p_model;
}
예제 #28
0
/* dst = src */
CV_IMPL void
cvCopy( const void* srcarr, void* dstarr, const void* maskarr )
{
    if( CV_IS_SPARSE_MAT(srcarr) && CV_IS_SPARSE_MAT(dstarr))
    {
        CV_Assert( maskarr == 0 );
        CvSparseMat* src1 = (CvSparseMat*)srcarr;
        CvSparseMat* dst1 = (CvSparseMat*)dstarr;
        CvSparseMatIterator iterator;
        CvSparseNode* node;

        dst1->dims = src1->dims;
        memcpy( dst1->size, src1->size, src1->dims*sizeof(src1->size[0]));
        dst1->valoffset = src1->valoffset;
        dst1->idxoffset = src1->idxoffset;
        cvClearSet( dst1->heap );

        if( src1->heap->active_count >= dst1->hashsize*CV_SPARSE_HASH_RATIO )
        {
            cvFree( &dst1->hashtable );
            dst1->hashsize = src1->hashsize;
            dst1->hashtable =
                (void**)cvAlloc( dst1->hashsize*sizeof(dst1->hashtable[0]));
        }

        memset( dst1->hashtable, 0, dst1->hashsize*sizeof(dst1->hashtable[0]));

        for( node = cvInitSparseMatIterator( src1, &iterator );
             node != 0; node = cvGetNextSparseNode( &iterator ))
        {
            CvSparseNode* node_copy = (CvSparseNode*)cvSetNew( dst1->heap );
            int tabidx = node->hashval & (dst1->hashsize - 1);
            CV_MEMCPY_AUTO( node_copy, node, dst1->heap->elem_size );
            node_copy->next = (CvSparseNode*)dst1->hashtable[tabidx];
            dst1->hashtable[tabidx] = node_copy;
        }
        return;
    }
    cv::Mat src = cv::cvarrToMat(srcarr, false, true, 1), dst = cv::cvarrToMat(dstarr, false, true, 1);
    CV_Assert( src.depth() == dst.depth() && src.size() == dst.size() );
    
    int coi1 = 0, coi2 = 0;
    if( CV_IS_IMAGE(srcarr) )
        coi1 = cvGetImageCOI((const IplImage*)srcarr);
    if( CV_IS_IMAGE(dstarr) )
        coi2 = cvGetImageCOI((const IplImage*)dstarr);
    
    if( coi1 || coi2 )
    {
        CV_Assert( (coi1 != 0 || src.channels() == 1) &&
            (coi2 != 0 || dst.channels() == 1) );
        
        int pair[] = { std::max(coi1-1, 0), std::max(coi2-1, 0) };
        cv::mixChannels( &src, 1, &dst, 1, pair, 1 );
        return;
    }
    else
        CV_Assert( src.channels() == dst.channels() );
    
    if( !maskarr )
        src.copyTo(dst);
    else
        src.copyTo(dst, cv::cvarrToMat(maskarr));
}
예제 #29
0
bool CvCalibFilter::FindEtalon( CvMat** mats )
{
    bool result = true;

    if( !mats || etalonPointCount == 0 )
    {
        assert(0);
        result = false;
    }

    if( result )
    {
        int i, tempPointCount0 = etalonPointCount*2;

        for( i = 0; i < cameraCount; i++ )
        {
            if( !latestPoints[i] )
                latestPoints[i] = (CvPoint2D32f*)
                    cvAlloc( tempPointCount0*2*sizeof(latestPoints[0]));
        }

        for( i = 0; i < cameraCount; i++ )
        {
            CvSize size;
            int tempPointCount = tempPointCount0;
            bool found = false;

            if( !CV_IS_MAT(mats[i]) && !CV_IS_IMAGE(mats[i]))
            {
                assert(0);
                break;
            }

            size = cvGetSize(mats[i]);

            if( size.width != imgSize.width || size.height != imgSize.height )
            {
                imgSize = size;
            }

            if( !grayImg || grayImg->width != imgSize.width ||
                grayImg->height != imgSize.height )
            {
                cvReleaseMat( &grayImg );
                cvReleaseMat( &tempImg );
                grayImg = cvCreateMat( imgSize.height, imgSize.width, CV_8UC1 );
                tempImg = cvCreateMat( imgSize.height, imgSize.width, CV_8UC1 );
            }

            if( !storage )
                storage = cvCreateMemStorage();

            switch( etalonType )
            {
            case CV_CALIB_ETALON_CHESSBOARD:
                if( CV_MAT_CN(cvGetElemType(mats[i])) == 1 )
                    cvCopy( mats[i], grayImg );
                else
                    cvCvtColor( mats[i], grayImg, CV_BGR2GRAY );
                found = cvFindChessBoardCornerGuesses( grayImg, tempImg, storage,
                                                       cvSize( cvRound(etalonParams[0]),
                                                       cvRound(etalonParams[1])),
                                                       latestPoints[i], &tempPointCount ) != 0;
                if( found )
                    cvFindCornerSubPix( grayImg, latestPoints[i], tempPointCount,
                                        cvSize(5,5), cvSize(-1,-1),
                                        cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,10,0.1));
                break;
            default:
                assert(0);
                result = false;
                break;
            }

            latestCounts[i] = found ? tempPointCount : -tempPointCount;
            result = result && found;
        }
    }

    if( storage )
        cvClearMemStorage( storage );

    return result;
}
예제 #30
0
void MouthContours::execute(IplImage* img, IplImage* drw, CvRect mouthSearch){

    CvSeq* contours;
    if(CV_IS_IMAGE(imgGrey)){
        cvReleaseImage(&imgGrey);
    }
    if(CV_IS_IMAGE(imgTempl)){
        cvReleaseImage(&imgTempl);
    }
    allocateOnDemand( &storageTeeth );
    allocateOnDemand( &imgTempl, cvSize( img->width, img->height ), IPL_DEPTH_8U, 3 );
    cvCopy( img,  imgTempl, 0 );
    allocateOnDemand( &imgGrey, cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 );

    if(CV_IS_STORAGE((storageTeeth))){
        contours = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvContour), sizeof(CvPoint), storageTeeth );
        cvCvtColor( imgTempl, imgGrey, CV_BGR2GRAY );
        int sigma = 1;
        int ksize = (sigma*5)|1;
        cvSetImageROI(imgGrey, mouthSearch);
        cvSetImageROI(drw, mouthSearch);

        cvSmooth( imgGrey , imgGrey, CV_GAUSSIAN, ksize, ksize, sigma, sigma);
        //cvEqualizeHist( small_img_grey, small_img_grey );
        cvCanny( imgGrey, imgGrey, 70, 70, 3 );

        cvDilate( imgGrey, imgGrey, NULL, 1 );
        cvErode( imgGrey, imgGrey, NULL, 1 );

        cvFindContours( imgGrey, storageTeeth, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
        if(CV_IS_SEQ(contours)){
            contours = cvApproxPoly( contours, sizeof(CvContour), storageTeeth, CV_POLY_APPROX_DP, 5, 1 ); 
            if( contours->total > 0 ){ 
                for( ;contours; contours = contours->h_next ){
                    if( contours->total <  4 )  
                        continue;     
                                        
                    cvDrawContours( drw, contours, CV_RGB(255,0,0), CV_RGB(0,255,0), 5, 1, CV_AA, cvPoint(0,0) );
                    MouthContours::TeethArcLength = cvArcLength(  contours, CV_WHOLE_SEQ, -1);
                    MouthContours::TeethAreaContour = cvContourArea( contours, CV_WHOLE_SEQ); 
                    time_t ltime;
                    struct tm *Tm;     
                    ltime=time(NULL);
                    Tm=localtime(&ltime); 
                    MouthContours::MouthHH = Tm->tm_hour;
                    MouthContours::MouthMM = Tm->tm_min;
                    MouthContours::MouthSS = Tm->tm_sec; 
                    
                }
            }else{
                    MouthContours::MouthHH = 0;
                    MouthContours::MouthMM = 0;
                    MouthContours::MouthSS = 0; 
                    MouthContours::TeethArcLength = 0;
                    MouthContours::TeethAreaContour = 0;
                }

        }else{
              MouthContours::MouthHH = 0;
                    MouthContours::MouthMM = 0;
                    MouthContours::MouthSS = 0; 
            MouthContours::TeethArcLength = 0;
            MouthContours::TeethAreaContour = 0;
        }
        
        cvClearMemStorage( storageTeeth );
         
    }
    cvResetImageROI(imgGrey);
    cvResetImageROI(drw);
    
}