コード例 #1
0
ファイル: Blob.cpp プロジェクト: srgblnch/ISL
Blob::Blob( const Blob &_src )
: perimeter_(*this),
  area_(*this),
  ellipse_(*this),
  bounding_rect_(*this)
{
  CV_FUNCNAME( "Blob::Blob(const Blob &)" );
  __BEGIN__;

  ISL_CALL( this->alloc() );
  ISL_CALL( this->copy_edges(_src.edges_) );
  
  this->perimeter_.copy_data(_src.perimeter_);
  this->area_.copy_data(_src.area_);
  this->ellipse_.copy_data(_src.ellipse_);
  this->bounding_rect_.copy_data(_src.bounding_rect_);

  __END__;
  
  if( cvGetErrStatus() < 0 )
    this->release();

  __ISL_CHECK_ERROR__;

}
コード例 #2
0
ファイル: cvarray.cpp プロジェクト: mikanradojevic/sdkpub
// create IplImage header
CV_IMPL IplImage *
cvCreateImageHeader( CvSize size, int depth, int channels )
{
    IplImage *img = 0;

    CV_FUNCNAME( "cvCreateImageHeader" );

    __BEGIN__;

    if( !CvIPL.createHeader )
    {
        CV_CALL( img = (IplImage *)cvAlloc( sizeof( *img )));
        CV_CALL( cvInitImageHeader( img, size, depth, channels, IPL_ORIGIN_TL, 4, 1 ));
    }
    else
    {
        char *colorModel;
        char *channelSeq;

        icvGetColorModel( channels, &colorModel, &channelSeq );

        img = CvIPL.createHeader( channels, 0, depth, colorModel, channelSeq,
                                  IPL_DATA_ORDER_PIXEL, IPL_ORIGIN_TL, 4,
                                  size.width, size.height, 0, 0, 0, 0 );
    }

    __END__;

    if( cvGetErrStatus() < 0 && img )
        cvReleaseImageHeader( &img );

    return img;
}
コード例 #3
0
ファイル: PrincipalAxis.cpp プロジェクト: srgblnch/ISL
PrincipalAxis::PrincipalAxis(double mx, double my, double cxx, double cxy, double cyy)
: major_(0)
, minor_(0)
{
  CV_FUNCNAME( "PrincipalAxis::PrincipalAxis" );
  __BEGIN__;

  CV_CALL( this->covariance_    = cvCreateMat( 2, 2, CV_64FC1 ) );
  CV_CALL( this->eigen_vectors_ = cvCreateMat( 2, 2, CV_64FC1 ) );
  CV_CALL( this->eigen_values_  = cvCreateMat( 2, 1, CV_64FC1 ) );

  CV_CALL( cvZero(this->covariance_   ) );
  CV_CALL( cvZero(this->eigen_vectors_) );
  CV_CALL( cvZero(this->eigen_values_ ) );

  this->compute(mx, my, cxx, cxy, cyy);

  __END__;

  if( cvGetErrStatus() < 0 )
  {
    __ISL_DISABLE_ERROR__;
    this->release();
    __ISL_RESTORE_ERROR__;
  }
  __ISL_CHECK_ERROR__;
}
コード例 #4
0
ファイル: cvmorph.cpp プロジェクト: 273k/OpenCV-Android
CV_IMPL IplConvKernel *
cvCreateStructuringElementEx( int cols, int rows,
                              int anchorX, int anchorY,
                              int shape, int *values )
{
    IplConvKernel *element = 0;
    int i, size = rows * cols;
    int element_size = sizeof(*element) + size*sizeof(element->values[0]);

    CV_FUNCNAME( "cvCreateStructuringElementEx" );

    __BEGIN__;

    if( !values && shape == CV_SHAPE_CUSTOM )
        CV_ERROR_FROM_STATUS( CV_NULLPTR_ERR );

    if( cols <= 0 || rows <= 0 ||
        (unsigned) anchorX >= (unsigned) cols ||
        (unsigned) anchorY >= (unsigned) rows )
        CV_ERROR_FROM_STATUS( CV_BADSIZE_ERR );

    CV_CALL( element = (IplConvKernel *)cvAlloc(element_size + 32));
    if( !element )
        CV_ERROR_FROM_STATUS( CV_OUTOFMEM_ERR );

    element->nCols = cols;
    element->nRows = rows;
    element->anchorX = anchorX;
    element->anchorY = anchorY;
    element->nShiftR = shape < CV_SHAPE_ELLIPSE ? shape : CV_SHAPE_CUSTOM;
    element->values = (int*)(element + 1);

    if( shape == CV_SHAPE_CUSTOM )
    {
        if( !values )
            CV_ERROR( CV_StsNullPtr, "Null pointer to the custom element mask" );
        for( i = 0; i < size; i++ )
            element->values[i] = values[i];
    }
    else
    {
        CvMat el_hdr = cvMat( rows, cols, CV_32SC1, element->values );
        CV_CALL( CvMorphology::init_binary_element(&el_hdr,
                        shape, cvPoint(anchorX,anchorY)));
    }

    __END__;

    if( cvGetErrStatus() < 0 )
        cvReleaseStructuringElement( &element );

    return element;
}
コード例 #5
0
/* Note: this has been copied verbatim from <opencv_root>/interfaces/python/cv.cpp */
static int convert_to_IplImage(PyObject *o, IplImage **dst)
{
    iplimage_t *ipl = (iplimage_t*)o;
    void *buffer;
    Py_ssize_t buffer_len;

    if (!is_iplimage(o)) {
	return -1; //failmsg("Argument must be IplImage");
    } else if (PyString_Check(ipl->data)) {
	cvSetData(ipl->a, PyString_AsString(ipl->data) + ipl->offset, ipl->a->widthStep);
	assert(cvGetErrStatus() == 0);
	*dst = ipl->a;
	return 1;
    } else if (ipl->data && PyObject_AsWriteBuffer(ipl->data, &buffer, &buffer_len) == 0) {
	cvSetData(ipl->a, (void*)((char*)buffer + ipl->offset), ipl->a->widthStep);
	assert(cvGetErrStatus() == 0);
	*dst = ipl->a;
	return 1; 
    } else {
	return -1;// failmsg("IplImage argument has no data");
    }
}
コード例 #6
0
PHP_OPENCV_API void php_opencv_throw_exception(TSRMLS_D)
{
	char * error_message;
	int status = cvGetErrStatus();

	if (status >= 0) {
		return;
	}

	error_message = estrdup(cvErrorStr(status));
	zend_throw_exception(opencv_ce_cvexception, error_message, status TSRMLS_CC);
	efree(error_message);
	return;
}
コード例 #7
0
ファイル: Blob.cpp プロジェクト: srgblnch/ISL
void
Blob::alloc()
{
  CV_FUNCNAME( "Blob::alloc" );
  __BEGIN__;

  CV_CALL( this->storage_ = cvCreateMemStorage(0) );
  CV_CALL( this->edges_ = cvCreateSeq(CV_SEQ_ELTYPE_POINT | CV_SEQ_KIND_CURVE, sizeof(CvContour), sizeof(CvPoint) , this->storage_) );

  __END__;
  
  if( cvGetErrStatus() < 0 )
    cvReleaseMemStorage(&this->storage_);

  __ISL_CHECK_ERROR__;
}
コード例 #8
0
ファイル: cvtexture_demo.cpp プロジェクト: WoodMath/CVMLAB
CV_IMPL IplImage*
cvCreateGLCMImage( CvGLCM* GLCM, int step )
{
    IplImage* dest = 0;

    CV_FUNCNAME( "cvCreateGLCMImage" );

    __BEGIN__;

    float* destData;
    int sideLoop1, sideLoop2;

    if( !GLCM )
        CV_ERROR( CV_StsNullPtr, "" );

    if( !(GLCM->matrices) )
        CV_ERROR( CV_StsNullPtr, "Matrices are not allocated" );

    if( (unsigned)step >= (unsigned)(GLCM->numMatrices) )
        CV_ERROR( CV_StsOutOfRange, "The step index is out of range" );

    dest = cvCreateImage( cvSize( GLCM->matrixSideLength, GLCM->matrixSideLength ), IPL_DEPTH_32F, 1 );
    destData = (float*)(dest->imageData);

    for( sideLoop1 = 0; sideLoop1 < GLCM->matrixSideLength; sideLoop1++) 
      /*    for( sideLoop1 = 0; sideLoop1 < GLCM->matrixSideLength;
	    sideLoop1++, (float*&)destData += dest->widthStep )*/
    {
        for( sideLoop2=0; sideLoop2 < GLCM->matrixSideLength; sideLoop2++ )
        {
            double matrixValue = GLCM->matrices[step][sideLoop1][sideLoop2];
	    //            destData[ sideLoop2 ] = (float)matrixValue;
	    //	    cvSetReal2D(dest, sideLoop1, sideLoop2, matrixValue);
	    ((float*)(dest->imageData + sideLoop1 * dest->widthStep))[sideLoop2] = (float)matrixValue; 
	    //            destData[ sideLoop2 ] = (float)matrixValue;
        }
    }

    __END__;

    if( cvGetErrStatus() < 0 )
        cvReleaseImage( &dest );

    return dest;
}
コード例 #9
0
ファイル: cvarray.cpp プロジェクト: mikanradojevic/sdkpub
// create CvMat and underlying date
CV_IMPL CvMat*
cvCreateMat( int height, int width, int type )
{
    CvMat* arr = 0;

    CV_FUNCNAME( "cvCreateMat" );
    
    __BEGIN__;

    CV_CALL( arr = cvCreateMatHeader( height, width, type ));
    CV_CALL( cvCreateData( arr ));

    __END__;

    if( cvGetErrStatus() < 0 )
        cvReleaseMat( &arr );

    return arr;
}
コード例 #10
0
ファイル: cvgraphex.cpp プロジェクト: mikanradojevic/sdkpub
CV_IMPL void
icvStartScanGraph( CvGraph* graph, CvGraphScanner* scanner,
                   CvGraphVtx* vtx, int mask )
{
    CvMemStorage* child_storage = 0;

    CV_FUNCNAME("icvStartScanGraph");

    __BEGIN__;

    if( !graph || !scanner )
        CV_ERROR_FROM_STATUS( CV_NULLPTR_ERR );

    if( !(graph->storage ))
        CV_ERROR_FROM_STATUS( CV_NULLPTR_ERR );

    memset( scanner, 0, sizeof(*scanner));

    scanner->graph = graph;
    scanner->mask = mask;
    scanner->vtx = vtx;
    scanner->index = vtx == 0 ? 0 : -1;

    CV_CALL( child_storage = cvCreateChildMemStorage( graph->storage ));

    CV_CALL( scanner->stack = cvCreateSeq( 0, sizeof(CvSet),
                       sizeof(CvGraphItem), child_storage ));

    CV_CALL( icvSeqElemsClearMask( (CvSeq*)graph,
                                   CV_FIELD_OFFSET( flags, CvGraphVtx ),
                                   CV_GRAPH_ITEM_VISITED_FLAG|
                                   CV_GRAPH_SEARCH_TREE_NODE_FLAG ));

    CV_CALL( icvSeqElemsClearMask( (CvSeq*)(graph->edges),
                                   CV_FIELD_OFFSET( flags, CvGraphEdge ),
                                   CV_GRAPH_ITEM_VISITED_FLAG ));

    __END__;

    if( cvGetErrStatus() < 0 )
        cvReleaseMemStorage( &child_storage );
}
コード例 #11
0
ファイル: Blob.cpp プロジェクト: srgblnch/ISL
Blob::Blob(const CvSeq* _edges)
: perimeter_(*this),
  area_(*this),
  ellipse_(*this),
  bounding_rect_(*this)
{
  CV_FUNCNAME( "Blob::Blob(const CvSeq*)" );
  __BEGIN__;

  ISL_CALL( this->alloc() );
  ISL_CALL( this->copy_edges(_edges) );

  __END__;
  
  if( cvGetErrStatus() < 0 )
    this->release();

  __ISL_CHECK_ERROR__;

}
コード例 #12
0
ファイル: cvarray.cpp プロジェクト: mikanradojevic/sdkpub
// Create CvMat header only
CV_IMPL CvMat*
cvCreateMatHeader( int height, int width, int type )
{
    CvMat* arr = 0;
    
    CV_FUNCNAME( "cvCreateMatHeader" );

    __BEGIN__;

    CV_CALL( arr = (CvMat*)cvAlloc( sizeof(*arr)));
    CV_CALL( cvInitMatHeader( arr, height, width, type, 0,
                              icvAlign(width*cvPixSize(type),4) ));

    __END__;

    if( cvGetErrStatus() < 0 )
        cvReleaseMatHeader( &arr );

    return arr;
}
コード例 #13
0
ファイル: cvarray.cpp プロジェクト: mikanradojevic/sdkpub
// create IplImage header and allocate underlying data
CV_IMPL IplImage *
cvCreateImage( CvSize size, int depth, int channels )
{
    IplImage *img = 0;

    CV_FUNCNAME( "cvCreateImage" );

    __BEGIN__;

    CV_CALL( img = cvCreateImageHeader( size, depth, channels ));
    assert( img );
    CV_CALL( cvCreateData( img ));

    __END__;

    if( cvGetErrStatus() < 0 )
        cvReleaseImage( &img );

    return img;
}
コード例 #14
0
ファイル: cvtexture.cpp プロジェクト: douzsh/douzsh
 CvGLCM*
cvCreateGLCM( const IplImage* srcImage,
              int stepMagnitude,
              const int* srcStepDirections,/* should be static array..
                                          or if not the user should handle de-allocation */
              int numStepDirections,
              int optimizationType )
{
    static const int defaultStepDirections[] = { 0,1, -1,1, -1,0, -1,-1 };

    int* memorySteps = 0;
    CvGLCM* newGLCM = 0;
    int* stepDirections = 0;

    CV_FUNCNAME( "cvCreateGLCM" );

    __BEGIN__;

    uchar* srcImageData = 0;
    CvSize srcImageSize;
    int srcImageStep;
    int stepLoop;
    const int maxNumGreyLevels8u = CV_MAX_NUM_GREY_LEVELS_8U;

    if( !srcImage )
        CV_ERROR( CV_StsNullPtr, "" );

    if( srcImage->nChannels != 1 )
        CV_ERROR( CV_BadNumChannels, "Number of channels must be 1");

    if( srcImage->depth != IPL_DEPTH_8U )
        CV_ERROR( CV_BadDepth, "Depth must be equal IPL_DEPTH_8U");

    // no Directions provided, use the default ones - 0 deg, 45, 90, 135
    if( !srcStepDirections )
    {
        srcStepDirections = defaultStepDirections;
    }

    CV_CALL( stepDirections = (int*)cvAlloc( numStepDirections*2*sizeof(stepDirections[0])));
    memcpy( stepDirections, srcStepDirections, numStepDirections*2*sizeof(stepDirections[0]));

    cvGetRawData( srcImage, &srcImageData, &srcImageStep, &srcImageSize );

    // roll together Directions and magnitudes together with knowledge of image (step)
    CV_CALL( memorySteps = (int*)cvAlloc( numStepDirections*sizeof(memorySteps[0])));

    for( stepLoop = 0; stepLoop < numStepDirections; stepLoop++ )
    {
        stepDirections[stepLoop*2 + 0] *= stepMagnitude;
        stepDirections[stepLoop*2 + 1] *= stepMagnitude;

        memorySteps[stepLoop] = stepDirections[stepLoop*2 + 0]*srcImageStep +
                                stepDirections[stepLoop*2 + 1];
    }

    CV_CALL( newGLCM = (CvGLCM*)cvAlloc(sizeof(newGLCM[0])));
    memset( newGLCM, 0, sizeof(newGLCM[0]) );
	//memset( newGLCM, 0, sizeof(newGLCM) );

    newGLCM->matrices = 0;
    newGLCM->numMatrices = numStepDirections;
    newGLCM->optimizationType = optimizationType;

    if( optimizationType <= CV_GLCM_OPTIMIZATION_LUT )
    {
        int lookupTableLoop, imageColLoop, imageRowLoop, lineOffset = 0;

        // if optimization type is set to lut, then make one for the image
        if( optimizationType == CV_GLCM_OPTIMIZATION_LUT )
        {
            for( imageRowLoop = 0; imageRowLoop < srcImageSize.height;
                                   imageRowLoop++, lineOffset += srcImageStep )
            {
                for( imageColLoop = 0; imageColLoop < srcImageSize.width; imageColLoop++ )
                {
                    newGLCM->forwardLookupTable[srcImageData[lineOffset+imageColLoop]]=1;
                }
            }

            newGLCM->numLookupTableElements = 0;

            for( lookupTableLoop = 0; lookupTableLoop < maxNumGreyLevels8u; lookupTableLoop++ )
            {
                if( newGLCM->forwardLookupTable[ lookupTableLoop ] != 0 )
                {
                    newGLCM->forwardLookupTable[ lookupTableLoop ] =
                        newGLCM->numLookupTableElements;
                    newGLCM->reverseLookupTable[ newGLCM->numLookupTableElements ] =
                        lookupTableLoop;

                    newGLCM->numLookupTableElements++;
                }
            }
        }
        // otherwise make a "LUT" which contains all the gray-levels (for code-reuse)
        else if( optimizationType == CV_GLCM_OPTIMIZATION_NONE )
        {
            for( lookupTableLoop = 0; lookupTableLoop <maxNumGreyLevels8u; lookupTableLoop++ )
            {
                newGLCM->forwardLookupTable[ lookupTableLoop ] = lookupTableLoop;
                newGLCM->reverseLookupTable[ lookupTableLoop ] = lookupTableLoop;
            }
            newGLCM->numLookupTableElements = maxNumGreyLevels8u;
        }

        newGLCM->matrixSideLength = newGLCM->numLookupTableElements;
        icvCreateGLCM_LookupTable_8u_C1R( srcImageData, srcImageStep, srcImageSize,
                                          newGLCM, stepDirections,
                                          numStepDirections, memorySteps );
    }
    else if( optimizationType == CV_GLCM_OPTIMIZATION_HISTOGRAM )
    {
        CV_ERROR( CV_StsBadFlag, "Histogram-based method is not implemented" );

    /*  newGLCM->numMatrices *= 2;
        newGLCM->matrixSideLength = maxNumGreyLevels8u*2;

        icvCreateGLCM_Histogram_8uC1R( srcImageStep, srcImageSize, srcImageData,
                                       newGLCM, numStepDirections,
                                       stepDirections, memorySteps );
    */
    }

    __END__;

    cvFree( &memorySteps );
    cvFree( &stepDirections );

    if( cvGetErrStatus() < 0 )
    {
        cvFree( &newGLCM );
    }

    return newGLCM;
}
コード例 #15
0
/* 
   Initializes scanner structure.
   Prepare image for scanning ( clear borders and convert all pixels to 0-1.
*/
CV_IMPL CvContourScanner
cvStartFindContours( void* _img, CvMemStorage* storage,
                     int  header_size, int mode, 
                     int  method, CvPoint offset )
{
    int y;
    int step;
    CvSize size;
    uchar *img = 0;
    CvContourScanner scanner = 0;
    CvMat stub, *mat = (CvMat*)_img;

    CV_FUNCNAME( "cvStartFindContours" );

    __BEGIN__;

    if( !storage )
        CV_ERROR( CV_StsNullPtr, "" );

    CV_CALL( mat = cvGetMat( mat, &stub ));

    if( !CV_IS_MASK_ARR( mat ))
        CV_ERROR( CV_StsUnsupportedFormat, "[Start]FindContours support only 8uC1 images" );

    size = cvSize( mat->width, mat->height );
    step = mat->step;
    img = (uchar*)(mat->data.ptr);

    if( method < 0 || method > CV_CHAIN_APPROX_TC89_KCOS )
        CV_ERROR_FROM_STATUS( CV_BADRANGE_ERR );

    if( header_size < (int) (method == CV_CHAIN_CODE ? sizeof( CvChain ) : sizeof( CvContour )))
        CV_ERROR_FROM_STATUS( CV_BADSIZE_ERR );

    scanner = (CvContourScanner)cvAlloc( sizeof( *scanner ));
    if( !scanner )
        CV_ERROR_FROM_STATUS( CV_OUTOFMEM_ERR );

    memset( scanner, 0, sizeof( *scanner ));

    scanner->storage1 = scanner->storage2 = storage;
    scanner->img0 = (char *) img;
    scanner->img = (char *) (img + step);
    scanner->img_step = step;
    scanner->img_size.width = size.width - 1;   /* exclude rightest column */
    scanner->img_size.height = size.height - 1; /* exclude bottomost row */
    scanner->mode = mode;
    scanner->offset = offset;
    scanner->pt.x = scanner->pt.y = 1;
    scanner->lnbd.x = 0;
    scanner->lnbd.y = 1;
    scanner->nbd = 2;
    scanner->mode = (int) mode;
    scanner->frame_info.contour = &(scanner->frame);
    scanner->frame_info.is_hole = 1;
    scanner->frame_info.next = 0;
    scanner->frame_info.parent = 0;
    scanner->frame_info.rect = cvRect( 0, 0, size.width, size.height );
    scanner->l_cinfo = 0;
    scanner->subst_flag = 0;

    scanner->frame.flags = CV_SEQ_FLAG_HOLE;

    scanner->approx_method2 = scanner->approx_method1 = method;

    if( method == CV_CHAIN_APPROX_TC89_L1 || method == CV_CHAIN_APPROX_TC89_KCOS )
        scanner->approx_method1 = CV_CHAIN_CODE;

    if( scanner->approx_method1 == CV_CHAIN_CODE )
    {
        scanner->seq_type1 = CV_SEQ_CHAIN_CONTOUR;
        scanner->header_size1 = scanner->approx_method1 == scanner->approx_method2 ?
            header_size : sizeof( CvChain );
        scanner->elem_size1 = sizeof( char );
    }
    else
    {
        scanner->seq_type1 = CV_SEQ_POLYGON;
        scanner->header_size1 = scanner->approx_method1 == scanner->approx_method2 ?
            header_size : sizeof( CvContour );
        scanner->elem_size1 = sizeof( CvPoint );
    }

    scanner->header_size2 = header_size;

    if( scanner->approx_method2 == CV_CHAIN_CODE )
    {
        scanner->seq_type2 = scanner->seq_type1;
        scanner->elem_size2 = scanner->elem_size1;
    }
    else
    {
        scanner->seq_type2 = CV_SEQ_POLYGON;
        scanner->elem_size2 = sizeof( CvPoint );
    }

    scanner->seq_type1 = scanner->approx_method1 == CV_CHAIN_CODE ?
        CV_SEQ_CHAIN_CONTOUR : CV_SEQ_POLYGON;

    scanner->seq_type2 = scanner->approx_method2 == CV_CHAIN_CODE ?
        CV_SEQ_CHAIN_CONTOUR : CV_SEQ_POLYGON;

    cvSaveMemStoragePos( storage, &(scanner->initial_pos) );

    if( method > CV_CHAIN_APPROX_SIMPLE )
    {
        scanner->storage1 = cvCreateChildMemStorage( scanner->storage2 );
    }

    if( mode > CV_RETR_LIST )
    {
        scanner->cinfo_storage = cvCreateChildMemStorage( scanner->storage2 );
        scanner->cinfo_set = cvCreateSet( 0, sizeof( CvSet ), sizeof( _CvContourInfo ),
                                          scanner->cinfo_storage );
        if( scanner->cinfo_storage == 0 || scanner->cinfo_set == 0 )
            CV_ERROR_FROM_STATUS( CV_OUTOFMEM_ERR );
    }

    /* make zero borders */
    memset( img, 0, size.width );
    memset( img + step * (size.height - 1), 0, size.width );

    for( y = 1, img += step; y < size.height - 1; y++, img += step )
    {
        img[0] = img[size.width - 1] = 0;
    }

    /* converts all pixels to 0 or 1 */
    cvThreshold( mat, mat, 0, 1, CV_THRESH_BINARY );
    CV_CHECK();

    __END__;

    if( cvGetErrStatus() < 0 )
        cvFree( &scanner );

    return scanner;
}
コード例 #16
0
ファイル: cvtexture.cpp プロジェクト: douzsh/douzsh
static void
icvCreateGLCM_LookupTable_8u_C1R( const uchar* srcImageData,
                                  int srcImageStep,
                                  CvSize srcImageSize,
                                  CvGLCM* destGLCM,
                                  int* steps,
                                  int numSteps,
                                  int* memorySteps )
{
    int* stepIncrementsCounter = 0;

    CV_FUNCNAME( "icvCreateGLCM_LookupTable_8u_C1R" );

    __BEGIN__;

    int matrixSideLength = destGLCM->matrixSideLength;
    int stepLoop, sideLoop1, sideLoop2;
    int colLoop, rowLoop, lineOffset = 0;
    double*** matrices = 0;

    // allocate memory to the matrices
    CV_CALL( destGLCM->matrices = (double***)cvAlloc( sizeof(matrices[0])*numSteps ));
    matrices = destGLCM->matrices;

    for( stepLoop=0; stepLoop<numSteps; stepLoop++ )
    {
        CV_CALL( matrices[stepLoop] = (double**)cvAlloc( sizeof(matrices[0][0])*matrixSideLength ));
        CV_CALL( matrices[stepLoop][0] = (double*)cvAlloc( sizeof(matrices[0][0][0])*
                                                  matrixSideLength*matrixSideLength ));

        memset( matrices[stepLoop][0], 0, matrixSideLength*matrixSideLength*
                                          sizeof(matrices[0][0][0]) );

        for( sideLoop1 = 1; sideLoop1 < matrixSideLength; sideLoop1++ )
        {
            matrices[stepLoop][sideLoop1] = matrices[stepLoop][sideLoop1-1] + matrixSideLength;
        }
    }

    CV_CALL( stepIncrementsCounter = (int*)cvAlloc( numSteps*sizeof(stepIncrementsCounter[0])));
    memset( stepIncrementsCounter, 0, numSteps*sizeof(stepIncrementsCounter[0]) );

    // generate GLCM for each step
    for( rowLoop=0; rowLoop<srcImageSize.height; rowLoop++, lineOffset+=srcImageStep )
    {
        for( colLoop=0; colLoop<srcImageSize.width; colLoop++ )
        {
            int pixelValue1 = destGLCM->forwardLookupTable[srcImageData[lineOffset + colLoop]];

            for( stepLoop=0; stepLoop<numSteps; stepLoop++ )
            {
                int col2, row2;
                row2 = rowLoop + steps[stepLoop*2 + 0];
                col2 = colLoop + steps[stepLoop*2 + 1];

                if( col2>=0 && row2>=0 && col2<srcImageSize.width && row2<srcImageSize.height )
                {
                    int memoryStep = memorySteps[ stepLoop ];
                    int pixelValue2 = destGLCM->forwardLookupTable[ srcImageData[ lineOffset + colLoop + memoryStep ] ];

                    // maintain symmetry
                    matrices[stepLoop][pixelValue1][pixelValue2] ++;
                    matrices[stepLoop][pixelValue2][pixelValue1] ++;

                    // incremenet counter of total number of increments
                    stepIncrementsCounter[stepLoop] += 2;
                }
            }
        }
    }

    // normalize matrices. each element is a probability of gray value i,j adjacency in direction/magnitude k
    for( sideLoop1=0; sideLoop1<matrixSideLength; sideLoop1++ )
    {
        for( sideLoop2=0; sideLoop2<matrixSideLength; sideLoop2++ )
        {
            for( stepLoop=0; stepLoop<numSteps; stepLoop++ )
            {
                matrices[stepLoop][sideLoop1][sideLoop2] /= double(stepIncrementsCounter[stepLoop]);
            }
        }
    }

    destGLCM->matrices = matrices;

    __END__;

    cvFree( &stepIncrementsCounter );

    if( cvGetErrStatus() < 0 )
        cvReleaseGLCM( &destGLCM, CV_GLCM_GLCM );
}
コード例 #17
0
ファイル: cvtexture.cpp プロジェクト: douzsh/douzsh
 void
cvCreateGLCMDescriptors( CvGLCM* destGLCM, int descriptorOptimizationType )
{
    CV_FUNCNAME( "cvCreateGLCMDescriptors" );

    __BEGIN__;

    int matrixLoop;

    if( !destGLCM )
        CV_ERROR( CV_StsNullPtr, "" );

    if( !(destGLCM->matrices) )
        CV_ERROR( CV_StsNullPtr, "Matrices are not allocated" );

    CV_CALL( cvReleaseGLCM( &destGLCM, CV_GLCM_DESC ));

    if( destGLCM->optimizationType != CV_GLCM_OPTIMIZATION_HISTOGRAM )
    {
        destGLCM->descriptorOptimizationType = destGLCM->numDescriptors = descriptorOptimizationType;
    }
    else
    {
        CV_ERROR( CV_StsBadFlag, "Histogram-based method is not implemented" );
//      destGLCM->descriptorOptimizationType = destGLCM->numDescriptors = CV_GLCMDESC_OPTIMIZATION_HISTOGRAM;
    }

    CV_CALL( destGLCM->descriptors = (double**)
            cvAlloc( destGLCM->numMatrices*sizeof(destGLCM->descriptors[0])));

    for( matrixLoop = 0; matrixLoop < destGLCM->numMatrices; matrixLoop ++ )
    {
        CV_CALL( destGLCM->descriptors[ matrixLoop ] =
                (double*)cvAlloc( destGLCM->numDescriptors*sizeof(destGLCM->descriptors[0][0])));
        memset( destGLCM->descriptors[matrixLoop], 0, destGLCM->numDescriptors*sizeof(destGLCM->descriptors[0][0]) );

        switch( destGLCM->descriptorOptimizationType )
        {
            case CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST:
                icvCreateGLCMDescriptors_AllowDoubleNest( destGLCM, matrixLoop );
                break;
            default:
                CV_ERROR( CV_StsBadFlag,
                "descriptorOptimizationType different from CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST\n"
                "is not supported" );
            /*
            case CV_GLCMDESC_OPTIMIZATION_ALLOWTRIPLENEST:
                icvCreateGLCMDescriptors_AllowTripleNest( destGLCM, matrixLoop );
                break;
            case CV_GLCMDESC_OPTIMIZATION_HISTOGRAM:
                if(matrixLoop < destGLCM->numMatrices>>1)
                    icvCreateGLCMDescriptors_Histogram( destGLCM, matrixLoop);
                    break;
            */
        }
    }

    __END__;

    if( cvGetErrStatus() < 0 )
        cvReleaseGLCM( &destGLCM, CV_GLCM_DESC );
}
コード例 #18
0
static void*
icvLoadImage( const char* filename, int flags, bool load_as_matrix )
{
    GrFmtReader* reader = 0;
    IplImage* image = 0;
    CvMat hdr, *matrix = 0;
    int depth = 8;

    CV_FUNCNAME( "cvLoadImage" );

    __BEGIN__;

    CvSize size;
    int iscolor;
    int cn;

    if( !filename || strlen(filename) == 0 )
        CV_ERROR( CV_StsNullPtr, "null filename" );

    reader = g_Filters.FindReader( filename );
    if( !reader )
        EXIT;

    if( !reader->ReadHeader() )
        EXIT;

    size.width = reader->GetWidth();
    size.height = reader->GetHeight();

    if( flags == -1 )
        iscolor = reader->IsColor();
    else
    {
        if( (flags & CV_LOAD_IMAGE_COLOR) != 0 ||
           ((flags & CV_LOAD_IMAGE_ANYCOLOR) != 0 && reader->IsColor()) )
            iscolor = 1;
        else
            iscolor = 0;

        if( (flags & CV_LOAD_IMAGE_ANYDEPTH) != 0 )
        {
            reader->UseNativeDepth(true);
            depth = reader->GetDepth();
        }
    }

    cn = iscolor ? 3 : 1;

    if( load_as_matrix )
    {
        int type;
        if(reader->IsFloat() && depth != 8)
            type = CV_32F;
        else
            type = ( depth <= 8 ) ? CV_8U : ( depth <= 16 ) ? CV_16U : CV_32S;
        CV_CALL( matrix = cvCreateMat( size.height, size.width, CV_MAKETYPE(type, cn) ));
    }
    else
    {
        int type;
        if(reader->IsFloat() && depth != 8)
            type = IPL_DEPTH_32F;
        else
            type = ( depth <= 8 ) ? IPL_DEPTH_8U : ( depth <= 16 ) ? IPL_DEPTH_16U : IPL_DEPTH_32S;
        CV_CALL( image = cvCreateImage( size, type, cn ));
        matrix = cvGetMat( image, &hdr );
    }

    if( !reader->ReadData( matrix->data.ptr, matrix->step, iscolor ))
    {
        if( load_as_matrix )
            cvReleaseMat( &matrix );
        else
            cvReleaseImage( &image );
        EXIT;
    }

    __END__;

    delete reader;

    if( cvGetErrStatus() < 0 )
    {
        if( load_as_matrix )
            cvReleaseMat( &matrix );
        else
            cvReleaseImage( &image );
    }

    return load_as_matrix ? (void*)matrix : (void*)image;
}
コード例 #19
0
CV_IMPL int
cvSaveImage( const char* filename, const CvArr* arr )
{
    int origin = 0;
    GrFmtWriter* writer = 0;
    CvMat *temp = 0, *temp2 = 0;

    CV_FUNCNAME( "cvSaveImage" );

    __BEGIN__;

    CvMat stub, *image;
    int channels, ipl_depth;

    if( !filename || strlen(filename) == 0 )
        CV_ERROR( CV_StsNullPtr, "null filename" );

    CV_CALL( image = cvGetMat( arr, &stub ));

    if( CV_IS_IMAGE( arr ))
        origin = ((IplImage*)arr)->origin;

    channels = CV_MAT_CN( image->type );
    if( channels != 1 && channels != 3 && channels != 4 )
        CV_ERROR( CV_BadNumChannels, "" );

    writer = g_Filters.FindWriter( filename );
    if( !writer )
        CV_ERROR( CV_StsError, "could not find a filter for the specified extension" );

    if( origin )
    {
        CV_CALL( temp = cvCreateMat(image->rows, image->cols, image->type) );
        CV_CALL( cvFlip( image, temp, 0 ));
        image = temp;
    }

    ipl_depth = cvCvToIplDepth(image->type);

    if( !writer->IsFormatSupported(ipl_depth) )
    {
        assert( writer->IsFormatSupported(IPL_DEPTH_8U) );
        CV_CALL( temp2 = cvCreateMat(image->rows,
            image->cols, CV_MAKETYPE(CV_8U,channels)) );
        CV_CALL( cvConvertImage( image, temp2 ));
        image = temp2;
        ipl_depth = IPL_DEPTH_8U;
    }

    if( !writer->WriteImage( image->data.ptr, image->step, image->width,
                             image->height, ipl_depth, channels ))
        CV_ERROR( CV_StsError, "could not save the image" );

    __END__;

    delete writer;
    cvReleaseMat( &temp );
    cvReleaseMat( &temp2 );

    return cvGetErrStatus() >= 0;
}
コード例 #20
0
ファイル: bgfg_gaussmix.cpp プロジェクト: 93sam/opencv
CV_IMPL CvBGStatModel*
cvCreateGaussianBGModel2( IplImage* first_frame, CvGaussBGStatModel2Params* parameters )
{
    CvGaussBGModel2* bg_model = 0;
    int w,h;

    CV_FUNCNAME( "cvCreateGaussianBGModel2" );

    __BEGIN__;

    CvGaussBGStatModel2Params params;

    if( !CV_IS_IMAGE(first_frame) )
        CV_ERROR( CV_StsBadArg, "Invalid or NULL first_frame parameter" );

    if( first_frame->nChannels>CV_BGFG_MOG2_NDMAX )
        CV_ERROR( CV_StsBadArg, "Maxumum number of channels in the image is excedded (change CV_BGFG_MOG2_MAXBANDS constant)!" );


    CV_CALL( bg_model = (CvGaussBGModel2*)cvAlloc( sizeof(*bg_model) ));
    memset( bg_model, 0, sizeof(*bg_model) );
    bg_model->type    = CV_BG_MODEL_MOG2;
    bg_model->release = (CvReleaseBGStatModel) icvReleaseGaussianBGModel2;
    bg_model->update  = (CvUpdateBGStatModel)  icvUpdateGaussianBGModel2;

    //init parameters
    if( parameters == NULL )
    {
        memset(&params, 0, sizeof(params));

        // These constants are defined in cvaux/include/cvaux.h
        params.bShadowDetection = 1;
        params.bPostFiltering=0;
        params.minArea=CV_BGFG_MOG2_MINAREA;

        //set parameters
        // K - max number of Gaussians per pixel
        params.nM = CV_BGFG_MOG2_NGAUSSIANS;//4;
        // Tb - the threshold - n var
        //pGMM->fTb = 4*4;
        params.fTb = CV_BGFG_MOG2_STD_THRESHOLD*CV_BGFG_MOG2_STD_THRESHOLD;
        // Tbf - the threshold
        //pGMM->fTB = 0.9f;//1-cf from the paper
        params.fTB = CV_BGFG_MOG2_BACKGROUND_THRESHOLD;
        // Tgenerate - the threshold
        params.fTg = CV_BGFG_MOG2_STD_THRESHOLD_GENERATE*CV_BGFG_MOG2_STD_THRESHOLD_GENERATE;//update the mode or generate new
        //pGMM->fSigma= 11.0f;//sigma for the new mode
        params.fVarInit = CV_BGFG_MOG2_VAR_INIT;
        params.fVarMax = CV_BGFG_MOG2_VAR_MAX;
        params.fVarMin = CV_BGFG_MOG2_VAR_MIN;
        // alpha - the learning factor
        params.fAlphaT = 1.0f/CV_BGFG_MOG2_WINDOW_SIZE;//0.003f;
        // complexity reduction prior constant
        params.fCT = CV_BGFG_MOG2_CT;//0.05f;

        //shadow
        // Shadow detection
        params.nShadowDetection = (unsigned char)CV_BGFG_MOG2_SHADOW_VALUE;//value 0 to turn off
        params.fTau = CV_BGFG_MOG2_SHADOW_TAU;//0.5f;// Tau - shadow threshold
    }
    else
    {
        params = *parameters;
    }

    bg_model->params = params;

    //image data
    w = first_frame->width;
    h = first_frame->height;

    bg_model->params.nWidth = w;
    bg_model->params.nHeight = h;

    bg_model->params.nND = first_frame->nChannels;


    //allocate GMM data

    //GMM for each pixel
    bg_model->data.rGMM = (CvPBGMMGaussian*) malloc(w*h * params.nM * sizeof(CvPBGMMGaussian));
    //used modes per pixel
    bg_model->data.rnUsedModes = (unsigned char* ) malloc(w*h);
    memset(bg_model->data.rnUsedModes,0,w*h);//no modes used

    //prepare storages
    CV_CALL( bg_model->background = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, first_frame->nChannels));
    CV_CALL( bg_model->foreground = cvCreateImage(cvSize(w,h), IPL_DEPTH_8U, 1));

    //for eventual filtering
    CV_CALL( bg_model->storage = cvCreateMemStorage());

    bg_model->countFrames = 0;

    __END__;

    if( cvGetErrStatus() < 0 )
    {
        CvBGStatModel* base_ptr = (CvBGStatModel*)bg_model;

        if( bg_model && bg_model->release )
            bg_model->release( &base_ptr );
        else
            cvFree( &bg_model );
        bg_model = 0;
    }

    return (CvBGStatModel*)bg_model;
}
コード例 #21
0
CV_IMPL CvBGStatModel*
cvCreateGaussianBGModel( IplImage* first_frame, CvGaussBGStatModelParams* parameters )
{
    CvGaussBGModel* bg_model = 0;
    
    CV_FUNCNAME( "cvCreateGaussianBGModel" );
    
    __BEGIN__;
    
    double var_init;
    CvGaussBGStatModelParams params;
    int i, j, k, m, n;
    
    //init parameters
    if( parameters == NULL )
      {                        /* These constants are defined in cvaux/include/cvaux.h: */
        params.win_size      = CV_BGFG_MOG_WINDOW_SIZE;
        params.bg_threshold  = CV_BGFG_MOG_BACKGROUND_THRESHOLD;

        params.std_threshold = CV_BGFG_MOG_STD_THRESHOLD;
        params.weight_init   = CV_BGFG_MOG_WEIGHT_INIT;

        params.variance_init = CV_BGFG_MOG_SIGMA_INIT*CV_BGFG_MOG_SIGMA_INIT;
        params.minArea       = CV_BGFG_MOG_MINAREA;
        params.n_gauss       = CV_BGFG_MOG_NGAUSSIANS;
    }
    else
    {
        params = *parameters;
    }
    
    if( !CV_IS_IMAGE(first_frame) )
        CV_ERROR( CV_StsBadArg, "Invalid or NULL first_frame parameter" );
    
    CV_CALL( bg_model = (CvGaussBGModel*)cvAlloc( sizeof(*bg_model) ));
    memset( bg_model, 0, sizeof(*bg_model) );
    bg_model->type = CV_BG_MODEL_MOG;
    bg_model->release = (CvReleaseBGStatModel)icvReleaseGaussianBGModel;
    bg_model->update = (CvUpdateBGStatModel)icvUpdateGaussianBGModel;
    
    bg_model->params = params;
    
    //prepare storages
    CV_CALL( bg_model->g_point = (CvGaussBGPoint*)cvAlloc(sizeof(CvGaussBGPoint)*
        ((first_frame->width*first_frame->height) + 256)));
    
    CV_CALL( bg_model->background = cvCreateImage(cvSize(first_frame->width,
        first_frame->height), IPL_DEPTH_8U, first_frame->nChannels));
    CV_CALL( bg_model->foreground = cvCreateImage(cvSize(first_frame->width,
        first_frame->height), IPL_DEPTH_8U, 1));
    
    CV_CALL( bg_model->storage = cvCreateMemStorage());
    
    //initializing
    var_init = 2 * params.std_threshold * params.std_threshold;
    CV_CALL( bg_model->g_point[0].g_values =
        (CvGaussBGValues*)cvAlloc( sizeof(CvGaussBGValues)*params.n_gauss*
        (first_frame->width*first_frame->height + 128)));
    
    for( i = 0, n = 0; i < first_frame->height; i++ )
    {
        for( j = 0; j < first_frame->width; j++, n++ )
        {
            const int p = i*first_frame->widthStep+j*first_frame->nChannels;

            bg_model->g_point[n].g_values =
                bg_model->g_point[0].g_values + n*params.n_gauss;
            bg_model->g_point[n].g_values[0].weight = 1;    //the first value seen has weight one
            bg_model->g_point[n].g_values[0].match_sum = 1;
            for( m = 0; m < first_frame->nChannels; m++)
            {
                bg_model->g_point[n].g_values[0].variance[m] = var_init;
                bg_model->g_point[n].g_values[0].mean[m] = (unsigned char)first_frame->imageData[p + m];
            }
            for( k = 1; k < params.n_gauss; k++)
            {
                bg_model->g_point[n].g_values[k].weight = 0;
                bg_model->g_point[n].g_values[k].match_sum = 0;
                for( m = 0; m < first_frame->nChannels; m++){
                    bg_model->g_point[n].g_values[k].variance[m] = var_init;
                    bg_model->g_point[n].g_values[k].mean[m] = 0;
                }
            }
        }
    }
    
    bg_model->countFrames = 0;
    
    __END__;
    
    if( cvGetErrStatus() < 0 )
    {
        CvBGStatModel* base_ptr = (CvBGStatModel*)bg_model;
        
        if( bg_model && bg_model->release )
            bg_model->release( &base_ptr );
        else
            cvFree( &bg_model );
        bg_model = 0;
    }
    
    return (CvBGStatModel*)bg_model;
}
コード例 #22
0
int testfaceLib_pThread ( const char* str_video, int trackerType, int multiviewType, int recognizerType, const char* str_facesetxml, int threads, 
						 bool blink, bool smile, bool gender, bool age, bool recog, bool quiet, bool saveface, const char* sfolder, bool bEnableAutoCluster )
{
    FILE* fp_imaginfo = fopen( "imaginfo.txt", "w" );

	bool bAutoFocus = false;
	IplImage *imgAutoFocus = NULL;

	int  sampleRate =1;
	
	if(str_facesetxml == NULL)
		str_facesetxml = "faceset_model.xml";

	int  prob_estimate[7];
	char sState[256];
	EnumViewAngle  viewAngle = (EnumViewAngle)multiviewType;
	//dynamic clustering for smooth ID registration
	//bEnableAutoCluster =  true;

	CxlibFaceAnalyzer faceAnalyzer(viewAngle, (EnumTrackerType)trackerType, blink, smile, gender, age, recog, sampleRate, str_facesetxml, recognizerType, bEnableAutoCluster); 

	/////////////////////////////////////////////////////////////////////////////////////
	//	init GUI window
	const char* str_title = "Face Tester";
	if( ! quiet )
		cvNamedWindow( str_title, CV_WINDOW_AUTOSIZE );

	char sCaptionInfo[256] = "";
	CvFont *pFont = new CvFont;
	cvInitFont(pFont, CV_FONT_HERSHEY_PLAIN, 0.85, 0.85, 0, 1);

	// load GUI smile icon images
	IplImage *pImgSmileBGR;
	IplImage *pImgSmileMask;
	if(age == 0)
	{   // smile icon
		pImgSmileBGR  = cvLoadImage( "smile.bmp" );
		pImgSmileMask = cvLoadImage( "smilemask.bmp", 0 );
	}
	else
	{   // gender/age/smile icons
		pImgSmileBGR  = cvLoadImage( "faceicon.bmp" );
		pImgSmileMask = cvLoadImage( "faceiconMask.bmp", 0 );
	}

	IplImage *pImgSmileBGRA = cvCreateImage( cvSize(pImgSmileBGR->width, pImgSmileBGR->height), IPL_DEPTH_8U, 4 );
	cvCvtColor(pImgSmileBGR, pImgSmileBGRA, CV_BGR2BGRA );

	// open video source
    size_t len = strlen( str_video );
    bool is_piclist = (0 == stricmp( str_video + len - 4, ".txt" ));
    CxImageSeqReader* vidcap = NULL;
    if( is_piclist )
        vidcap = new CxPicListReader( str_video );
    else
        vidcap = new CxVideoReader( str_video );
	if( cvGetErrStatus() < 0 )
	{   
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// when using camera, set to 640x480, 30fps
	if( isdigit(str_video[0]) != 0 && str_video[1] == '\0' )
	{
		vidcap->width( 640 );
		vidcap->height( 480 );
		vidcap->fps( 30 );
	}

	// print beginning info
	printf( "tracker cascade:  '%s'\n", trackerType== TRA_HAAR ? "haar" : (recognizerType== TRA_SURF ? "surf" : "pf tracker SURF"));
	printf( "face recognizer:  '%s'\n", recognizerType == RECOGNIZER_BOOST_GB240 ? "boost gabor240" : "cascade gloh"  );
	printf( "video:    '%s', %dx%d, %2.1f fps\n", str_video, 
		vidcap->width(), vidcap->height(), vidcap->fps() );

	// set mouse event process
	CxMouseParam mouse_faceparam;
	mouse_faceparam.updated = false;
	mouse_faceparam.play    = true;
	mouse_faceparam.ret_online_collecting = 0;

	static const int MAX_FACES = 16; 
	if(! quiet)
	{
		mouse_faceparam.play    = true;
		mouse_faceparam.updated = false;
		mouse_faceparam.face_num  = faceAnalyzer.getMaxFaceNum();
		mouse_faceparam.rects     = faceAnalyzer.getFaceRects();
		mouse_faceparam.image     = NULL;
		mouse_faceparam.cut_big_face= faceAnalyzer.getBigCutFace();
		mouse_faceparam.typeRecognizer = 0;
		mouse_faceparam.faceRecognizer = &faceAnalyzer;
		mouse_faceparam.ret_online_collecting = 0;
		cvSetMouseCallback(	str_title, my_mouse_callback, (void*)&mouse_faceparam );
		faceAnalyzer.setMouseParam(&mouse_faceparam);
	}

	// init count ticks                   
	int64  ticks, start_ticks, total_ticks;
	int64  tracker_total_ticks;
	double tracker_fps, total_fps; 

	start_ticks         = total_ticks  = 0;
	tracker_total_ticks = 0;
		
	// loop for each frame of a video/camera
	int frames = 0;
	IplImage *pImg = NULL;

	while( ! vidcap->eof() )
	{   
		// capture a video frame
		if( mouse_faceparam.play == true)
			pImg = vidcap->query();
		else 
			continue;

		if ( pImg == NULL )
			break;

		// make a copy, flip if upside-down
		CvImage image( cvGetSize(pImg), pImg->depth, pImg->nChannels );
		if( pImg->origin == IPL_ORIGIN_BL ) //flip live camera's frame
			cvFlip( pImg, image );
		else
			cvCopy( pImg, image );

		// convert to gray_image for face analysis
		CvImage gray_image( image.size(), image.depth(), 1 );
		if( image.channels() == 3 )
			cvCvtColor( image, gray_image, CV_BGR2GRAY );
		else
			cvCopy( image, gray_image );

		///////////////////////////////////////////////////////////////////
		// do face tracking and face recognition
		start_ticks = ticks = cvGetTickCount();	

        if( is_piclist )
            faceAnalyzer.detect(gray_image, prob_estimate, sState);
        else
		    faceAnalyzer.track(gray_image, prob_estimate, sState, image);   // track face in each frame but recognize by pthread
		//faceAnalyzer.detect(gray_image, prob_estimate, sState);// track and recognizer face in each frame 

		int face_num = faceAnalyzer.getFaceNum();

		ticks       = cvGetTickCount() - ticks;
		tracker_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
		tracker_total_ticks += ticks;

		
		//set param for mouse event processing
		if(!quiet)
		{
			mouse_faceparam.face_num = face_num;
			mouse_faceparam.image    = image;
		}

        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "%s  %d", vidcap->filename(), face_num );

		// blink/smile/gender/age/face recognize section
		for( int i=0; i<face_num; i++ )
		{
			// get face rect and id from face tracker
			CvRectItem rectItem = faceAnalyzer.getFaceRect(i);
			CvRect rect = rectItem.rc;
			int    face_trackid = rectItem.fid;
			float  probSmile = faceAnalyzer.getFaceSmileProb(i);
			int    bBlink  = faceAnalyzer.getFaceBlink(i);
			int    bSmile  = faceAnalyzer.getFaceSmile(i);
			int    bGender = faceAnalyzer.getFaceGender(i);
			int    nAgeID  = faceAnalyzer.getFaceAge(i);
			int    nFaceID = faceAnalyzer.getFaceID(i);
			float  fFaceProb= faceAnalyzer.getFaceProb(i);
			
			char *sFaceCaption = NULL;
			char sFaceNameBuff[256];
			char *sFaceName = faceAnalyzer.getFaceName(i);
			if(sFaceName[0] != '\0')
			{
				sprintf(sFaceNameBuff, "%s %.2f", sFaceName, fFaceProb);
				sFaceCaption = sFaceName;
				sFaceCaption = sFaceNameBuff;
			}

			if( ! quiet )
			{
				CvPoint2D32f *landmark6 = NULL;
				sprintf(sCaptionInfo, "FPS:%04d, %s", (int)tracker_fps, sState);

				int trackid = -1; //face_trackid , don't display trackid if -1
				cxlibDrawFaceBlob( image, pFont, trackid, rect, landmark6, probSmile, 
					bBlink, bSmile, bGender, nAgeID, sFaceCaption, NULL,
					pImgSmileBGR, pImgSmileBGRA, pImgSmileMask);
			}

            if( fp_imaginfo != NULL )
                fprintf( fp_imaginfo, "  %d %d %d %d", rect.x, rect.y, rect.width, rect.height );
		}
        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "\n" );

		///////////////////////////////////////////////////////////////////
		total_ticks += (cvGetTickCount() - start_ticks);
		
		// frame face_num
		frames++;

		//auto focus faces
		if(quiet == false && bAutoFocus)
		{
			if(imgAutoFocus)
				cvCopy(image, imgAutoFocus);
			else
				imgAutoFocus = cvCloneImage(image);

			CvRectItem *rects = faceAnalyzer.getFaceRects();
			cxlibAutoFocusFaceImage(imgAutoFocus, image, rects, face_num);
		}

		// next frame if quiet
		if( quiet )
			continue;
		else
		{
			// draw status info for custom interaction
			if(mouse_faceparam.ret_online_collecting == 1)
			{
				sprintf(sCaptionInfo, "Collecting faces for track_id = %d", mouse_faceparam.ret_facetrack_id);
				//draw face collecting region
				cvLine(image, cvPoint(image.width()/4, 0), cvPoint(image.width()/4, image.height()-1), CV_RGB(255,255,0), 2);
				cvLine(image, cvPoint(image.width()*3/4, 0), cvPoint(image.width()*3/4, image.height()-1), CV_RGB(255,255,0), 2);
			}
			else
				sprintf(sCaptionInfo, "FPS:%04d, %s", (int)tracker_fps, sState);

			cxlibDrawCaption( image, pFont, sCaptionInfo);
		}
		
		//show Image
		if (image.width() <= 800)
			cvShowImage( str_title, image );
		else
		{   // display scaled smaller aimge
			CvImage scale_image (cvSize(800, image.height()*800/image.width()), image.depth(), 3 );
			cvResize (image, scale_image);
			cvShowImage( str_title, scale_image );
		}

		// user interaction
		int key = cvWaitKey(1);
		//int key = cvWaitKey(0);
		if( key == ' ' )     // press space bar to pause the video play
			cvWaitKey( 0 );                           
		else if( key == 27 ) // press 'esc' to exit
			break;	                                   
		else if( key == 'a' )
		{  // add new face name
			if(face_num > 0)
			{   
				CvRect rect = faceAnalyzer.getFaceRect(0).rc;
				int x = rect.x+rect.width/2;
				int y = rect.y+rect.height/2;
				addFaceSet( x, y, &mouse_faceparam);
			}
		}
		else if( key == 'c' )
		{   //enable flag to collect face exemplars for the selected face name
			mouse_faceparam.ret_online_collecting = 1; //enable online face exemplar collecting
		}
		else if( key == 'z' )
			bAutoFocus = !bAutoFocus;
		else if(key >= 0)
		{
			if(mouse_faceparam.ret_online_collecting == 1)
			{   // stop collecting face exemplars
				mouse_faceparam.ret_online_collecting = 0; //disable online face exemplar collecting
				mouse_faceparam.ret_facetrack_id = -1;
			}

			if( key == 's')
			{
				// save faceset xml model
				faceAnalyzer.saveFaceModelXML("faceset_model.xml");
				sprintf(sCaptionInfo, "%s", "saved the face model");
				cxlibDrawCaption( pImg, pFont, sCaptionInfo);
				cvShowImage( str_title, pImg );
				cvWaitKey( 400 ); 
			}
		}
	}

	// print info about fps
	float temp    = 1e-6f / cvGetTickFrequency();
	tracker_fps   = 1.0f  / ( tracker_total_ticks * temp / frames );
	
	total_fps = 1.0f / (total_ticks * temp / frames);

	printf( "Total frames:%d  Speed:%.1f fps\n", frames, total_fps);
	printf( "FPS: %.1f ", tracker_fps);

	//save updated faceset model
	if(mouse_faceparam.updated == true)
	{
		sprintf(sCaptionInfo, "%s", "press key 's' to save updated face model or other keys to cancel");
		cxlibDrawCaption( pImg, pFont, sCaptionInfo);
		cvShowImage( str_title, pImg );

		int key = cvWaitKey();
		if( key == 's')
			faceAnalyzer.saveFaceModelXML("faceset_model.xml");
	}

	//save merged face model for dynamic clustering of smoothID
	vFaceSet vMergedFaceSet;
	int minWeight =10; 
	faceAnalyzer.getMergedFaceSet(vMergedFaceSet, minWeight);
	faceAnalyzer.saveFaceModelXML("faceset_modelMerged.xml", &vMergedFaceSet);

	//release global GUI data
	if( !quiet )
		cvDestroyWindow( str_title );

	cvReleaseImage(&pImgSmileBGR);
	cvReleaseImage(&pImgSmileBGRA);
	cvReleaseImage(&pImgSmileMask);
	delete pFont;

    delete vidcap;

    if( fp_imaginfo != NULL )
        fclose( fp_imaginfo );

    return 0;
}
コード例 #23
0
int testfaceLib_sThread ( const char* str_video, int  trackerType, int multiviewType, int recognizerType, const char* str_facesetxml, int threads, 
						 bool blink, bool smile, bool gender, bool age, bool recog, bool quiet, bool saveface, const char* sfolder, bool bEnableAutoCluster)
{
	int  faceimgID = 0;
	char driver[8];
	char dir[1024];
	char fname[1024];
	char ext[8];
	char sImgPath[1024];

	if(sfolder)
	{
		char sysCommand[128];
		sprintf (sysCommand, "mkdir %s", sfolder);
		system (sysCommand);

		sprintf(sImgPath, "%s//%s", sfolder,  "imaginfo.txt");
		sprintf(fname,   "%s//%s", sfolder,  "faceinfo.txt");
	}
	else
	{
		sprintf(sImgPath, "%s", "imaginfo.txt");
		sprintf(fname,   "%s", "faceinfo.txt");
	}

	FILE* fp_imaginfo = fopen( sImgPath, "wt" );
    FILE* fp_faceinfo = fopen( fname, "wt" );

    bool bAutoFocus = false;
	IplImage *imgAutoFocus = NULL;

	/////////////////////////////////////////////////////////////////////////////////////
	//	init GUI window
	const char* str_title = "Face Tester";
	if( ! quiet )
		cvNamedWindow( str_title, CV_WINDOW_AUTOSIZE );

	char sCaptionInfo[256]="";
	CvFont *pFont = new CvFont;
	cvInitFont(pFont, CV_FONT_HERSHEY_PLAIN, 0.85, 0.85, 0, 1);
	
	// load GUI smile icon images
	IplImage *pImgSmileBGR;
	IplImage *pImgSmileMask;
	if(age == 0)
	{
		pImgSmileBGR  = cvLoadImage( "smile.bmp" );
		pImgSmileMask = cvLoadImage( "smilemask.bmp", 0 );
	}
	else
	{
		pImgSmileBGR  = cvLoadImage( "faceicon.bmp" );
		pImgSmileMask = cvLoadImage( "faceiconMask.bmp", 0 );
	}
	IplImage *pImgSmileBGRA = cvCreateImage( cvSize(pImgSmileBGR->width, pImgSmileBGR->height), IPL_DEPTH_8U, 4 );
	cvCvtColor(pImgSmileBGR, pImgSmileBGRA, CV_BGR2BGRA );

	// open video source
    size_t len = strlen( str_video );
    bool is_piclist = (0 == stricmp( str_video + len - 4, ".txt" ));
    CxImageSeqReader* vidcap = NULL;
    if( is_piclist )
        vidcap = new CxPicListReader( str_video );
    else
        vidcap = new CxVideoReader( str_video );

	if( cvGetErrStatus() < 0 )
	{   
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// when using camera, set to 640x480, 30fps
	if( isdigit(str_video[0]) != 0 && str_video[1] == '\0' )
	{
		vidcap->width( 640 );
		vidcap->height( 480 );
		vidcap->fps( 30 );
	}

	// print beginning info
	printf( "tracker cascade:  '%s'\n", trackerType == TRA_HAAR ? "haar" : (trackerType== TRA_SURF ? "surf" : "pf tracker SURF"));
	printf( "face recognizer:  '%s'\n", recognizerType == RECOGNIZER_BOOST_GB240 ? "boost gabor240" : "cascade gloh"  );
	printf( "video:    '%s', %dx%d, %2.1f fps\n", str_video, 
		vidcap->width(), vidcap->height(), vidcap->fps() );

	// config face tracker
	const int  face_max = 16;
	CvRectItem rects[face_max];
	
	tagDetectConfig configParam;
	EnumViewAngle  viewAngle = (EnumViewAngle)multiviewType;

	CxlibFaceDetector detector;
	detector.init(viewAngle, (EnumFeaType)trackerType);
	detector.config( configParam );

	CxlibFaceTracker tracker;
	tracker.init(viewAngle, (EnumTrackerType)trackerType);
	tracker.config( configParam, TR_NLEVEL_3 );

	if( cvGetErrStatus() < 0 )
	{
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// config landmark detector
	CvPoint2D32f   landmark6[6+1]; // consider both 6-pt and 7-pt
	float          parameters[16];
	bool      bLandmark = false;
	CxlibLandmarkDetector landmarkDetector(LDM_6PT);

	int size_smallface = 64;
	int size_bigface   = 128;
	CxlibAlignFace cutFace(size_smallface, size_bigface);
	
	// config blink/smile/gender detector
	int    bBlink = 0, bSmile = 0, bGender = 0, bAge = 0;  //+1, -1, otherwise 0: no process 
	float  probBlink = 0, probSmile = 0, probGender = 0, probAge[4];
	int    nAgeID = 0;

	CxlibBlinkDetector  blinkDetector(size_smallface);
	CxlibSmileDetector  smileDetector(size_smallface);
	CxlibGenderDetector genderDetector(size_smallface);
	CxlibAgeDetector    ageDetector(size_bigface);

	// config face recognizer
	float probFaceID = 0;
	if(str_facesetxml == NULL)
		str_facesetxml = "faceset_model.xml";

	CxlibFaceRecognizer faceRecognizer( size_bigface, recognizerType );
	if(recog) faceRecognizer.loadFaceModelXML(str_facesetxml);
	
	// set mouse event process
	CxMouseParam mouse_faceparam;
	mouse_faceparam.updated = false;
	mouse_faceparam.play = true;
	mouse_faceparam.ret_online_collecting = 0;
		
	if(! quiet)
	{
		mouse_faceparam.face_num  = face_max;
		mouse_faceparam.rects     = rects;
		mouse_faceparam.image     = NULL;
		mouse_faceparam.cut_big_face= cutFace.getBigCutFace();
		mouse_faceparam.typeRecognizer = 1;
		mouse_faceparam.faceRecognizer = &faceRecognizer;
		cvSetMouseCallback(	str_title, my_mouse_callback, (void*)&mouse_faceparam );
	}

	// init count ticks                   
	int64  ticks, start_ticks, total_ticks;
	int64  tracker_total_ticks, landmark_total_ticks, align_total_ticks,
		   blink_total_ticks, smile_total_ticks, gender_total_ticks, age_total_ticks, recg_total_ticks;
	double frame_fps, tracker_fps, landmark_fps, align_fps, blink_fps, smile_fps, gender_fps, age_fps, recg_fps, total_fps; 

	start_ticks         = total_ticks          = 0;
	tracker_total_ticks = landmark_total_ticks = align_total_ticks  = 0;
	blink_total_ticks   = smile_total_ticks    = gender_total_ticks = age_total_ticks = recg_total_ticks = 0;

	tracker_fps = landmark_fps = align_fps = blink_fps = smile_fps = gender_fps = age_fps = recg_fps = total_fps = 0.0;        

	// loop for each frame of a video/camera
	int frames = 0;
	IplImage *pImg = NULL;
	int   print_faceid=-1;
	float print_score = 0;
	std::string  print_facename;

	bool bRunLandmark = blink || smile|| gender|| age|| recog || saveface;
	IplImage *thumbnailImg   = cvCreateImage(cvSize(THUMBNAIL_WIDTH, THUMBNAIL_HEIGHT), IPL_DEPTH_8U, 3);   
	
	//dynamic clustering for smooth ID registration
	//bEnableAutoCluster = true;
	if( is_piclist ) bEnableAutoCluster = false;

	while( ! vidcap->eof() )
	{   
		// capture a video frame
		if( mouse_faceparam.play == true)
			pImg = vidcap->query();
		else 
			continue;

		if ( pImg == NULL )
			continue;

		// make a copy, flip if upside-down
		CvImage image( cvGetSize(pImg), pImg->depth, pImg->nChannels );
		if( pImg->origin == IPL_ORIGIN_BL ) //flip live camera's frame
			cvFlip( pImg, image );
		else
			cvCopy( pImg, image );

		// convert to gray_image for face analysis
		CvImage gray_image( image.size(), image.depth(), 1 );
		if( image.channels() == 3 )
			cvCvtColor( image, gray_image, CV_BGR2GRAY );
		else
			cvCopy( image, gray_image );

		// do face tracking
		start_ticks = ticks = cvGetTickCount();	
       
		int face_num = 0;
        if( is_piclist )
            face_num = detector.detect( gray_image, rects, face_max );
        else
            face_num = tracker.track( gray_image, rects, face_max, image ); // track in a video for faster speed
		  //face_num = tracker.detect( gray_image, rects, face_max ); // detect in an image

		//set param for mouse event processing
		if(!quiet)
		{
			mouse_faceparam.face_num = face_num;
			mouse_faceparam.image    = image;
		}

		ticks       = cvGetTickCount() - ticks;
		tracker_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
		tracker_total_ticks += ticks;

        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "%s  %d", vidcap->filename(), face_num );

        // blink/smile/gender/age/face recognize section
		for( int i=0; i<face_num; i++ )
		//for( int i=0; i< MIN(1,face_num); i++ )
		{
			// get face rect and id from face tracker
			CvRect rect = rects[i].rc;

            if( fp_imaginfo != NULL )
                fprintf( fp_imaginfo, "  %d %d %d %d %f", rect.x, rect.y, rect.width, rect.height, rects[i].prob );

			int    face_trackid = rects[i].fid;
			float  like = rects[i].prob;
			int    angle= rects[i].angle;

			// filter out outer faces
			if( rect.x+rect.width  > gray_image.width()   || rect.x < 0 ) continue;
			if( rect.y+rect.height > gray_image.height() || rect.y < 0 ) continue;

			//tracker.getThumbnail(image, rect, thumbnailImg);

			// detect landmark points 
			ticks = cvGetTickCount();	

			if(bRunLandmark)
			{
                if( is_piclist )
				    bLandmark = landmarkDetector.detect( gray_image, &rect, landmark6, parameters, angle ); //detect in an image
                else
				    bLandmark = landmarkDetector.track( gray_image, &rect, landmark6, parameters, angle ); // track in a video for faster speed

				ticks = cvGetTickCount() - ticks;
				landmark_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
				landmark_total_ticks += ticks;
			}
			else
				bLandmark = false;

	
			if(quiet == false && bLandmark == false) 
			{
				//DrawFaceRect
				cxlibDrawFaceRect(image, rect);
				continue;
			}

			// warped align face and hist eq to delighting
			ticks = cvGetTickCount();	

			cutFace.init(gray_image, rect, landmark6);

			ticks = cvGetTickCount() - ticks;
			if(ticks > 1)
				align_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
			else
			{	align_fps = 0;
				ticks = 0;
			}
			align_total_ticks += ticks;

			if(saveface)   //save face icon for training later
			{
				//save cutfaces
				if(sfolder)
				{
#ifdef WIN32
					_splitpath(vidcap->filename(),driver,dir,fname,ext);
					sprintf(sImgPath, "%s//%s%s", sfolder, fname,ext);
#else
					sprintf(sImgPath, "%s//%06d.jpg", sfolder, faceimgID++);
#endif
				}
				else
					sprintf(sImgPath, "%s#.jpg", vidcap->filename());
				
				cvSaveImage(sImgPath, cutFace.getBigCutFace());
			}

			// detect blink
			bBlink = 0;	
			probBlink = 0;
			if(blink && bLandmark)
			{
				ticks = cvGetTickCount();	
				float blink_threshold = blinkDetector.getDefThreshold();//0.5;
				int ret = blinkDetector.predict( &cutFace, &probBlink);
			
				if(probBlink > blink_threshold )
					bBlink = 1;  //eye close
				else 
					bBlink = -1; //eye open

				ticks = cvGetTickCount() - ticks;
				blink_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				blink_total_ticks += ticks;

				print_score = probBlink;
			}
			else blink_fps = 0;

			// detect smile
			bSmile    = 0;	
			probSmile = 0;
			if ( smile && bLandmark )
			{	
				ticks = cvGetTickCount();
				float smile_threshold = smileDetector.getDefThreshold(); //0.48;  
				int ret = smileDetector.predict(&cutFace, &probSmile);

				if(probSmile > smile_threshold)
					bSmile = 1;  //smile
				else 
					bSmile = -1; //not smile

				ticks	  = cvGetTickCount() - ticks;
				smile_fps = 1000.0 /( 1e-3 * ticks / cvGetTickFrequency() );
				smile_total_ticks += ticks;

				print_score = probSmile;
			}
			else smile_fps = 0;

			//detect gender
			bGender    = 0;	
			probGender = 0;
			if(gender && bLandmark)
			{
				ticks = cvGetTickCount();	
				float gender_threshold = genderDetector.getDefThreshold(); // 0.42; 
				int ret = genderDetector.predict(&cutFace, &probGender);

				if(probGender > gender_threshold)
					bGender =  1; //female
				else
					bGender = -1; //male

				//bGender = -1:male, 1:female, 0: null
				// smooth prediction result
                if( ! is_piclist )
				    bGender = genderDetector.voteLabel(face_trackid, bGender);
				
				ticks = cvGetTickCount() - ticks;
				gender_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				gender_total_ticks += ticks;

				print_score = probGender; 
			}
			else gender_fps = 0;

			//detect age
			nAgeID  = -1;
			if(age && bLandmark && rect.width*rect.height > 40*40)
			{
				ticks = cvGetTickCount();	

				//nAgeID = 0:"Baby", 1:"Kid", 2:"Adult", 3:"Senior"
				nAgeID = ageDetector.predict(&cutFace, probAge);

				// smooth prediction result
                if( ! is_piclist )
				    nAgeID = ageDetector.voteLabel(face_trackid, nAgeID); 

				ticks = cvGetTickCount() - ticks;
				age_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				age_total_ticks += ticks;

				print_score = probAge[nAgeID]; 
				//if( ! quiet )	cxDrawAignFace2Image(image, pCutFace2);
			}
			else 
			{
				age_fps = 0;
			}

			// recognize the face id
			// we only do recognition every 5 frames,interval
			char  *sFaceCaption = NULL;
			char  sFaceCaptionBuff[256];
            int face_id = 0;
			probFaceID = 0;
			if ( recog && bLandmark )
			{
				ticks = cvGetTickCount();
				float face_threshold = faceRecognizer.getDefThreshold(); 
				/////////////////////////////////////////////////////////////////////////////////////////
				int face_id  = -1;
				if(bEnableAutoCluster & !is_piclist)
				{
					bool bAutocluster = true;
					if(mouse_faceparam.ret_online_collecting) bAutocluster = false;
					//face clustering
					face_id  = faceRecognizer.predict(&cutFace, &probFaceID, bAutocluster, face_trackid, frames);
				}
				else//face recognition
					face_id  = faceRecognizer.predict(&cutFace, &probFaceID);
				/////////////////////////////////////////////////////////////////////////////////////////

				ticks    = cvGetTickCount() - ticks;
				recg_fps = 1000.0f / ( 1e-3 * ticks / cvGetTickFrequency() );
				recg_total_ticks += ticks;
				
				// smooth prediction result
                if( ! is_piclist && !bEnableAutoCluster)
                {
				    if(probFaceID > face_threshold*1.0)
					    face_id = faceRecognizer.voteLabel(face_trackid, face_id); 
				    else
					    face_id = faceRecognizer.voteLabel(face_trackid, -1);
                }
				else if(probFaceID <= face_threshold)
				{
					face_id =-1;
				}

				//set face name caption
				if(face_id >= 0)
				{
					// recognized face name
					const char* sFaceName = faceRecognizer.getFaceName(face_id);
					sprintf(sFaceCaptionBuff, "%s %.2f", sFaceName, probFaceID);
					//sprintf(sFaceCaptionBuff, "%s", sFaceName); //dispaly score
					sFaceCaption = sFaceCaptionBuff;
					
					print_score  = probFaceID;
					print_faceid = face_id;
				}
				else
				{   // failed to recognize 
					//sprintf(sFaceCaptionBuff, "N\A %.2f", probFaceID);
					//sFaceCaption = sFaceCaptionBuff;
				}

				// collect and save unknown face exemplars
				if(probFaceID < face_threshold*0.9 || face_id != mouse_faceparam.ret_faceset_id )
				{
					if(mouse_faceparam.ret_online_collecting && (face_num ==1 || face_trackid == mouse_faceparam.ret_facetrack_id))
					{
						if( rect.x > gray_image.width()/4 && rect.x+rect.width < gray_image.width()*3/4 ) 
						{
							mouse_faceparam.updated = true;
							int nFaceSetIdx = faceRecognizer.getFaceSetIdx(mouse_faceparam.ret_faceset_id);
							bool bflag = faceRecognizer.tryInsertFace(cutFace.getBigCutFace(), nFaceSetIdx);
							//printf("insert flag %d\n", bflag);
						}
					}
				}
			}
			else recg_fps = 0;

			if( ! quiet )
			{
				sprintf(sCaptionInfo, "FPS: %03d Fd:%04d Ld:%04d Fa:%04d Bl:%04d Sm:%04d Ge:%04d Ag:%03d Rc:%03d",
					(int)frame_fps, (int)tracker_fps, (int)landmark_fps, (int)align_fps, 
					(int)blink_fps,   (int)smile_fps,    (int)gender_fps, (int)age_fps, (int)recg_fps);

				//sprintf(sFaceCaptionBuff, "%.2f", print_score);
				//sFaceCaption = sFaceCaptionBuff;

				int trackid = -1; //face_trackid. don't display trackid if -1
				cxlibDrawFaceBlob( image, pFont, trackid, rect, landmark6, probSmile, 
					bBlink, bSmile, bGender, nAgeID, sFaceCaption, NULL,
					pImgSmileBGR, pImgSmileBGRA, pImgSmileMask);
			}

            // log file
            if( fp_faceinfo != NULL )
            {
                // index,  rect,  landmark6,  bBlink, probBlink, bSmile, probSmile, bGender, probGender, nAgeID, probAge[nAgeID], face_id, probFaceID
				//fprintf( fp_faceinfo, "#%s# @%s@ ",    vidcap->filename(), sImgPath);
				fprintf( fp_faceinfo, "#%s# ",    vidcap->filename());
                fprintf( fp_faceinfo, "faceidx=( %06d %02d )", vidcap->index(), i+1 );
				fprintf( fp_faceinfo, "   rect=( %3d %3d %3d %3d )", rect.x, rect.y, rect.width, rect.height );
                fprintf( fp_faceinfo, "   landmark6=(" );
                int l;
                for( l = 0; l < 6; l++ )
                    fprintf( fp_faceinfo, " %3.0f %3.0f", landmark6[l].x, landmark6[l].y );
                fprintf( fp_faceinfo, " )");
                fprintf( fp_faceinfo, "   blink=( %+d %f )", bBlink, probBlink );
                fprintf( fp_faceinfo, "   smile=( %+d %f )", bSmile, probSmile );
                fprintf( fp_faceinfo, "   gender=( %+d %f )", bGender, probGender );
                fprintf( fp_faceinfo, "   agegroup=( %+d %f )", nAgeID, (nAgeID >= 0 && nAgeID < 4) ? probAge[nAgeID] : 1.0f );
                fprintf( fp_faceinfo, "   identity=( %+d %f )", face_id, probFaceID );
                fprintf( fp_faceinfo, "\n" );
            }
        }
        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "\n" );

		ticks    = cvGetTickCount() - start_ticks;
		total_ticks += (ticks);
		frame_fps = 1000.0f / ( 1e-3 * ticks / cvGetTickFrequency() );

		// frame face_num
		frames++;

		//auto focus faces
		if(quiet == false && bAutoFocus)
		{
			if(imgAutoFocus)
				cvCopy(image, imgAutoFocus);
			else
				imgAutoFocus = cvCloneImage(image);
			cxlibAutoFocusFaceImage(imgAutoFocus, image, rects, face_num);
		}

		// next frame if quiet
		if( quiet )
			continue;
		else
		{
			// draw status info for custom interaction
			if(mouse_faceparam.ret_online_collecting == 1)
			{
				sprintf(sCaptionInfo, "Collecting faces for track_id = %d", mouse_faceparam.ret_facetrack_id);
				//draw face collecting region
				cvLine(image, cvPoint(image.width()/4, 0), cvPoint(image.width()/4, image.height()-1), CV_RGB(255,255,0), 2);
				cvLine(image, cvPoint(image.width()*3/4, 0), cvPoint(image.width()*3/4, image.height()-1), CV_RGB(255,255,0), 2);
			}
			else
				sprintf(sCaptionInfo, "FPS: %03d Fd:%04d Ld:%04d Fa:%04d Bl:%04d Sm:%04d Ge:%04d Ag:%03d Rc:%03d",
					(int)frame_fps, (int)tracker_fps, (int)landmark_fps, (int)align_fps, 
					(int)blink_fps,   (int)smile_fps,    (int)gender_fps, (int)age_fps, (int)recg_fps);

			cxlibDrawCaption( image, pFont, sCaptionInfo);
		}
	
		//show Image
		if (image.width() <= 800)
		{
			//show image
			cvShowImage( str_title, image );
		}
		else
		{   // show scaled smaller image
			CvImage scale_image (cvSize(800, image.height()*800/image.width()), image.depth(), 3 );
			cvResize (image, scale_image);
			cvShowImage( str_title, scale_image );
		}

		// user interaction
		int key = cvWaitKey( 30 );
		//int key = cvWaitKey( );
		if( key == ' ' ) // press the spacebar to pause the video play 
			cvWaitKey( 0 );                           
		else if( key == 27 )
			break;	    // press 'esc' to exit
		else if( key == 'a' )
		{  // add new face name
			if(face_num > 0)
			{   
				CvRect rect = rects[0].rc;
				int x = rect.x+rect.width/2;
				int y = rect.y+rect.height/2;
				addFaceSet( x, y, &mouse_faceparam);
			}
		}
		else if( key == 'c' )
		{   // collect face exemplars for current selected facename
			mouse_faceparam.ret_online_collecting = 1; //enable online face exemplar collecting
		}
		else if( key == 'z' )
			// turn on/off the autofocus flag
			bAutoFocus = !bAutoFocus;
		else if(key >= 0)
		{
			if(mouse_faceparam.ret_online_collecting == 1)
			{   // stop collecting faces
				mouse_faceparam.ret_online_collecting = 0; //disable online face exemplar collecting
				mouse_faceparam.ret_facetrack_id = -1;
			}

			if( key == 's')
			{   // save face models
				faceRecognizer.saveFaceModelXML("faceset_model.xml");
				sprintf(sCaptionInfo, "%s", "saved the face model");
				cxlibDrawCaption( pImg, pFont, sCaptionInfo);
				cvShowImage( str_title, pImg );
				cvWaitKey( 400 ); 
			}
		}
	}

	// print speed info about fps
	float temp    = 1e-6f / cvGetTickFrequency();
	tracker_fps   = 1.0f  / ( tracker_total_ticks * temp / frames );

	if (landmark_total_ticks != 0.0)
		landmark_fps = 1.0f  / ( landmark_total_ticks * temp / frames );

	if (align_total_ticks != 0.0)
		align_fps    = 1.0f  / ( align_total_ticks * temp / frames );

	if (blink_total_ticks != 0.0)
		blink_fps  = 1.0f  / (blink_total_ticks * temp / frames);

	if (smile_total_ticks != 0.0)
		smile_fps  = 1.0f  / (smile_total_ticks * temp / frames);

	if (gender_total_ticks != 0.0)
		gender_fps = 1.0f  / (gender_total_ticks * temp / frames);

	if (age_total_ticks != 0.0)
		age_fps = 1.0f  / (age_total_ticks * temp / frames);

	if (recg_total_ticks != 0.0)
		recg_fps   = 1.0f  / (recg_total_ticks  * temp / frames);

	total_fps = 1.0f / (total_ticks * temp / frames);

	printf( "Total frames:%d  Speed:%.1f fps\n", frames, total_fps);
	printf( "FPS: Fd:%.1f Ld:%.1f Fa:%.1f Bl:%.1f Sm:%.1f Ge:%.1f Ag:%.1f Rc:%.1f",
		tracker_fps, landmark_fps, align_fps, 
		blink_fps,   smile_fps,    gender_fps, age_fps, recg_fps);

	//save updated face model
	if(mouse_faceparam.updated == true)
	{
		sprintf(sCaptionInfo, "%s", "press key 's' to save updated face model or other keys to cancel");
		cxlibDrawCaption( pImg, pFont, sCaptionInfo);
		cvShowImage( str_title, pImg );

		int key = cvWaitKey();
		if( key == 's')
			faceRecognizer.saveFaceModelXML("faceset_model.xml");
	}

	
	//save merged face model for dynamic clustering of smoothID
	vFaceSet vMergedFaceSet;
	int minWeight = 10;
	faceRecognizer.getMergedFaceSet(vMergedFaceSet, minWeight);
	faceRecognizer.saveFaceModelXML("faceset_modelMerged.xml", &vMergedFaceSet);
	//faceRecognizer.saveFaceModelXML("faceset_modelMerged#.xml");

	//release buff 
	
	//release global GUI data
	if( !quiet )
		cvDestroyWindow( str_title );

	cvReleaseImage(&thumbnailImg);
	cvReleaseImage(&pImgSmileBGR);
	cvReleaseImage(&pImgSmileBGRA);
	cvReleaseImage(&pImgSmileMask);
	
	delete pFont;

    delete vidcap;

    if( fp_imaginfo != NULL )
        fclose( fp_imaginfo );
	
    if( fp_faceinfo != NULL )
        fclose( fp_faceinfo );

    return 0;
}
コード例 #24
0
ファイル: bgfg_mean.cpp プロジェクト: AeroCano/JdeRobot
// Function createBGMeanStatModel initializes foreground detection process
// parameters:
//      first_frame - frame from video sequence
//      parameters  - (optional) if NULL default parameters of the algorithm will be used
//      p_model     - pointer to CvFGDStatModel structure
CV_IMPL CvBGStatModel*
createBGMeanStatModel( IplImage* first_frame, BGMeanStatModelParams* parameters ){
  BGMeanStatModel* p_model = 0;
  
  CV_FUNCNAME( "createBGMeanStatModel" );
  
  __BEGIN__;
  
  int i, j, k, pixel_count, buf_size;
  BGMeanStatModelParams params;
  
  if( !CV_IS_IMAGE(first_frame) )
    CV_ERROR( CV_StsBadArg, "Invalid or NULL first_frame parameter" );
  
  if (first_frame->nChannels != 3)
    CV_ERROR( CV_StsBadArg, "first_frame must have 1-3 color channels" );
  
  // Initialize parameters:
  if( parameters == NULL ){
    params.n_frames = BGFG_MEAN_NFRAMES;
    params.bg_update_rate = BGFG_MEAN_BG_UPDATE_RATE;
    params.fg_update_rate = BGFG_MEAN_FG_UPDATE_RATE;
    params.sg_params.is_obj_without_holes = BGFG_SEG_OBJ_WITHOUT_HOLES;
    params.sg_params.perform_morphing = BGFG_SEG_PERFORM_MORPH;
    params.sg_params.minArea = BGFG_SEG_MINAREA;
    params.perform_segmentation = 1;
  }else{
    params = *parameters;
  }

  CV_CALL( p_model = (BGMeanStatModel*)cvAlloc( sizeof(*p_model) ));
  memset( p_model, 0, sizeof(*p_model) );
  p_model->type = BG_MODEL_MEAN;
  p_model->release = (CvReleaseBGStatModel)releaseBGMeanStatModel;
  p_model->update = (CvUpdateBGStatModel)updateBGMeanStatModel;;
  p_model->params = params;

  //init frame counters. Max value so first call will inialize bg & fg: FIXME: fast initialization??
  p_model->bg_frame_count = params.bg_update_rate;
  p_model->fg_frame_count = params.fg_update_rate;

  // Initialize storage pools:
  pixel_count = first_frame->width * first_frame->height * sizeof(uchar);
  buf_size = pixel_count * first_frame->nChannels * params.n_frames;

  CV_CALL( p_model->frame_cbuffer = (uchar*)cvAlloc(buf_size) );
  memset( p_model->frame_cbuffer, 0, buf_size );
  p_model->cbuffer_idx = 0;
  p_model->cbuffer_nentries_init = 0;

  buf_size = pixel_count * first_frame->nChannels * sizeof(double);
  CV_CALL( p_model->mean = (double*)cvAlloc(buf_size) );
  CV_CALL( p_model->std_dev = (double*)cvAlloc(buf_size) );
  

  // Init temporary images:
  CV_CALL( p_model->foreground = cvCreateImage(cvSize(first_frame->width, first_frame->height),
					       IPL_DEPTH_8U, 1));
  CV_CALL( p_model->background = cvCloneImage(first_frame));
  CV_CALL( p_model->storage = cvCreateMemStorage(0));


  __END__;
  
  if( cvGetErrStatus() < 0 ){
    CvBGStatModel* base_ptr = (CvBGStatModel*)p_model;
    
    if( p_model && p_model->release )
      p_model->release( &base_ptr );
    else
      cvFree( &p_model );
    p_model = 0;
  }
  
  return (CvBGStatModel*)p_model;
}
コード例 #25
0
ファイル: persistence_c.cpp プロジェクト: Achraf33/opencv
CV_IMPL CvFileStorage*
cvOpenFileStorage( const char* query, CvMemStorage* dststorage, int flags, const char* encoding )
{
    CvFileStorage* fs = 0;
    int default_block_size = 1 << 18;
    bool append = (flags & 3) == CV_STORAGE_APPEND;
    bool mem = (flags & CV_STORAGE_MEMORY) != 0;
    bool write_mode = (flags & 3) != 0;
    bool write_base64 = (write_mode || append) && (flags & CV_STORAGE_BASE64) != 0;
    bool isGZ = false;
    size_t fnamelen = 0;
    const char * filename = query;

    std::vector<std::string> params;
    if ( !mem )
    {
        params = analyze_file_name( query );
        if ( !params.empty() )
            filename = params.begin()->c_str();

        if ( write_base64 == false && is_param_exist( params, "base64" ) )
            write_base64 = (write_mode || append);
    }

    if( !filename || filename[0] == '\0' )
    {
        if( !write_mode )
            CV_Error( CV_StsNullPtr, mem ? "NULL or empty filename" : "NULL or empty buffer" );
        mem = true;
    }
    else
        fnamelen = strlen(filename);

    if( mem && append )
        CV_Error( CV_StsBadFlag, "CV_STORAGE_APPEND and CV_STORAGE_MEMORY are not currently compatible" );

    fs = (CvFileStorage*)cvAlloc( sizeof(*fs) );
    CV_Assert(fs);
    memset( fs, 0, sizeof(*fs));

    fs->memstorage = cvCreateMemStorage( default_block_size );
    fs->dststorage = dststorage ? dststorage : fs->memstorage;

    fs->flags = CV_FILE_STORAGE;
    fs->write_mode = write_mode;

    if( !mem )
    {
        fs->filename = (char*)cvMemStorageAlloc( fs->memstorage, fnamelen+1 );
        strcpy( fs->filename, filename );

        char* dot_pos = strrchr(fs->filename, '.');
        char compression = '\0';

        if( dot_pos && dot_pos[1] == 'g' && dot_pos[2] == 'z' &&
            (dot_pos[3] == '\0' || (cv_isdigit(dot_pos[3]) && dot_pos[4] == '\0')) )
        {
            if( append )
            {
                cvReleaseFileStorage( &fs );
                CV_Error(CV_StsNotImplemented, "Appending data to compressed file is not implemented" );
            }
            isGZ = true;
            compression = dot_pos[3];
            if( compression )
                dot_pos[3] = '\0', fnamelen--;
        }

        if( !isGZ )
        {
            fs->file = fopen(fs->filename, !fs->write_mode ? "rt" : !append ? "wt" : "a+t" );
            if( !fs->file )
                goto _exit_;
        }
        else
        {
            #if USE_ZLIB
            char mode[] = { fs->write_mode ? 'w' : 'r', 'b', compression ? compression : '3', '\0' };
            fs->gzfile = gzopen(fs->filename, mode);
            if( !fs->gzfile )
                goto _exit_;
            #else
            cvReleaseFileStorage( &fs );
            CV_Error(CV_StsNotImplemented, "There is no compressed file storage support in this configuration");
            #endif
        }
    }

    fs->roots = 0;
    fs->struct_indent = 0;
    fs->struct_flags = 0;
    fs->wrap_margin = 71;

    if( fs->write_mode )
    {
        int fmt = flags & CV_STORAGE_FORMAT_MASK;

        if( mem )
            fs->outbuf = new std::deque<char>;

        if( fmt == CV_STORAGE_FORMAT_AUTO && filename )
        {
            const char* dot_pos = NULL;
            const char* dot_pos2 = NULL;
            // like strrchr() implementation, but save two last positions simultaneously
            for (const char* pos = filename; pos[0] != 0; pos++)
            {
                if (pos[0] == '.')
                {
                    dot_pos2 = dot_pos;
                    dot_pos = pos;
                }
            }
            if (cv_strcasecmp(dot_pos, ".gz") && dot_pos2 != NULL)
            {
                dot_pos = dot_pos2;
            }
            fs->fmt
                = (cv_strcasecmp(dot_pos, ".xml") || cv_strcasecmp(dot_pos, ".xml.gz"))
                ? CV_STORAGE_FORMAT_XML
                : (cv_strcasecmp(dot_pos, ".json") || cv_strcasecmp(dot_pos, ".json.gz"))
                ? CV_STORAGE_FORMAT_JSON
                : CV_STORAGE_FORMAT_YAML
                ;
        }
        else if ( fmt != CV_STORAGE_FORMAT_AUTO )
        {
            fs->fmt = fmt;
        }
        else
        {
            fs->fmt = CV_STORAGE_FORMAT_XML;
        }

        // we use factor=6 for XML (the longest characters (' and ") are encoded with 6 bytes (&apos; and &quot;)
        // and factor=4 for YAML ( as we use 4 bytes for non ASCII characters (e.g. \xAB))
        int buf_size = CV_FS_MAX_LEN*(fs->fmt == CV_STORAGE_FORMAT_XML ? 6 : 4) + 1024;

        if (append)
        {
            fseek( fs->file, 0, SEEK_END );
            if (ftell(fs->file) == 0)
                append = false;
        }

        fs->write_stack = cvCreateSeq( 0, sizeof(CvSeq), fs->fmt == CV_STORAGE_FORMAT_XML ?
                sizeof(CvXMLStackRecord) : sizeof(int), fs->memstorage );
        fs->is_first = 1;
        fs->struct_indent = 0;
        fs->struct_flags = CV_NODE_EMPTY;
        fs->buffer_start = fs->buffer = (char*)cvAlloc( buf_size + 1024 );
        fs->buffer_end = fs->buffer_start + buf_size;

        fs->base64_writer           = 0;
        fs->is_default_using_base64 = write_base64;
        fs->state_of_writing_base64 = base64::fs::Uncertain;

        fs->is_write_struct_delayed = false;
        fs->delayed_struct_key      = 0;
        fs->delayed_struct_flags    = 0;
        fs->delayed_type_name       = 0;

        if( fs->fmt == CV_STORAGE_FORMAT_XML )
        {
            size_t file_size = fs->file ? (size_t)ftell( fs->file ) : (size_t)0;
            fs->strstorage = cvCreateChildMemStorage( fs->memstorage );
            if( !append || file_size == 0 )
            {
                if( encoding )
                {
                    if( strcmp( encoding, "UTF-16" ) == 0 ||
                        strcmp( encoding, "utf-16" ) == 0 ||
                        strcmp( encoding, "Utf-16" ) == 0 )
                    {
                        cvReleaseFileStorage( &fs );
                        CV_Error( CV_StsBadArg, "UTF-16 XML encoding is not supported! Use 8-bit encoding\n");
                    }

                    CV_Assert( strlen(encoding) < 1000 );
                    char buf[1100];
                    sprintf(buf, "<?xml version=\"1.0\" encoding=\"%s\"?>\n", encoding);
                    icvPuts( fs, buf );
                }
                else
                    icvPuts( fs, "<?xml version=\"1.0\"?>\n" );
                icvPuts( fs, "<opencv_storage>\n" );
            }
            else
            {
                int xml_buf_size = 1 << 10;
                char substr[] = "</opencv_storage>";
                int last_occurence = -1;
                xml_buf_size = MIN(xml_buf_size, int(file_size));
                fseek( fs->file, -xml_buf_size, SEEK_END );
                char* xml_buf = (char*)cvAlloc( xml_buf_size+2 );
                // find the last occurrence of </opencv_storage>
                for(;;)
                {
                    int line_offset = (int)ftell( fs->file );
                    char* ptr0 = icvGets( fs, xml_buf, xml_buf_size ), *ptr;
                    if( !ptr0 )
                        break;
                    ptr = ptr0;
                    for(;;)
                    {
                        ptr = strstr( ptr, substr );
                        if( !ptr )
                            break;
                        last_occurence = line_offset + (int)(ptr - ptr0);
                        ptr += strlen(substr);
                    }
                }
                cvFree( &xml_buf );
                if( last_occurence < 0 )
                {
                    cvReleaseFileStorage( &fs );
                    CV_Error( CV_StsError, "Could not find </opencv_storage> in the end of file.\n" );
                }
                icvCloseFile( fs );
                fs->file = fopen( fs->filename, "r+t" );
                CV_Assert(fs->file);
                fseek( fs->file, last_occurence, SEEK_SET );
                // replace the last "</opencv_storage>" with " <!-- resumed -->", which has the same length
                icvPuts( fs, " <!-- resumed -->" );
                fseek( fs->file, 0, SEEK_END );
                icvPuts( fs, "\n" );
            }
            fs->start_write_struct = icvXMLStartWriteStruct;
            fs->end_write_struct = icvXMLEndWriteStruct;
            fs->write_int = icvXMLWriteInt;
            fs->write_real = icvXMLWriteReal;
            fs->write_string = icvXMLWriteString;
            fs->write_comment = icvXMLWriteComment;
            fs->start_next_stream = icvXMLStartNextStream;
        }
        else if( fs->fmt == CV_STORAGE_FORMAT_YAML )
        {
            if( !append)
                icvPuts( fs, "%YAML:1.0\n---\n" );
            else
                icvPuts( fs, "...\n---\n" );
            fs->start_write_struct = icvYMLStartWriteStruct;
            fs->end_write_struct = icvYMLEndWriteStruct;
            fs->write_int = icvYMLWriteInt;
            fs->write_real = icvYMLWriteReal;
            fs->write_string = icvYMLWriteString;
            fs->write_comment = icvYMLWriteComment;
            fs->start_next_stream = icvYMLStartNextStream;
        }
        else
        {
            if( !append )
                icvPuts( fs, "{\n" );
            else
            {
                bool valid = false;
                long roffset = 0;
                for ( ;
                      fseek( fs->file, roffset, SEEK_END ) == 0;
                      roffset -= 1 )
                {
                    const char end_mark = '}';
                    if ( fgetc( fs->file ) == end_mark )
                    {
                        fseek( fs->file, roffset, SEEK_END );
                        valid = true;
                        break;
                    }
                }

                if ( valid )
                {
                    icvCloseFile( fs );
                    fs->file = fopen( fs->filename, "r+t" );
                    CV_Assert(fs->file);
                    fseek( fs->file, roffset, SEEK_END );
                    fputs( ",", fs->file );
                }
                else
                {
                    CV_Error( CV_StsError, "Could not find '}' in the end of file.\n" );
                }
            }
            fs->struct_indent = 4;
            fs->start_write_struct = icvJSONStartWriteStruct;
            fs->end_write_struct = icvJSONEndWriteStruct;
            fs->write_int = icvJSONWriteInt;
            fs->write_real = icvJSONWriteReal;
            fs->write_string = icvJSONWriteString;
            fs->write_comment = icvJSONWriteComment;
            fs->start_next_stream = icvJSONStartNextStream;
        }
    }
    else
    {
        if( mem )
        {
            fs->strbuf = filename;
            fs->strbufsize = fnamelen;
        }

        size_t buf_size = 1 << 20;
        const char* yaml_signature = "%YAML";
        const char* json_signature = "{";
        const char* xml_signature  = "<?xml";
        char buf[16];
        icvGets( fs, buf, sizeof(buf)-2 );
        char* bufPtr = cv_skip_BOM(buf);
        size_t bufOffset = bufPtr - buf;

        if(strncmp( bufPtr, yaml_signature, strlen(yaml_signature) ) == 0)
            fs->fmt = CV_STORAGE_FORMAT_YAML;
        else if(strncmp( bufPtr, json_signature, strlen(json_signature) ) == 0)
            fs->fmt = CV_STORAGE_FORMAT_JSON;
        else if(strncmp( bufPtr, xml_signature, strlen(xml_signature) ) == 0)
            fs->fmt = CV_STORAGE_FORMAT_XML;
        else if(fs->strbufsize  == bufOffset)
            CV_Error(CV_BADARG_ERR, "Input file is empty");
        else
            CV_Error(CV_BADARG_ERR, "Unsupported file storage format");

        if( !isGZ )
        {
            if( !mem )
            {
                fseek( fs->file, 0, SEEK_END );
                buf_size = ftell( fs->file );
            }
            else
                buf_size = fs->strbufsize;
            buf_size = MIN( buf_size, (size_t)(1 << 20) );
            buf_size = MAX( buf_size, (size_t)(CV_FS_MAX_LEN*2 + 1024) );
        }
        icvRewind(fs);
        fs->strbufpos = bufOffset;

        fs->str_hash = cvCreateMap( 0, sizeof(CvStringHash),
                        sizeof(CvStringHashNode), fs->memstorage, 256 );

        fs->roots = cvCreateSeq( 0, sizeof(CvSeq),
                        sizeof(CvFileNode), fs->memstorage );

        fs->buffer = fs->buffer_start = (char*)cvAlloc( buf_size + 256 );
        fs->buffer_end = fs->buffer_start + buf_size;
        fs->buffer[0] = '\n';
        fs->buffer[1] = '\0';

        //mode = cvGetErrMode();
        //cvSetErrMode( CV_ErrModeSilent );
        CV_TRY
        {
            switch (fs->fmt)
            {
            case CV_STORAGE_FORMAT_XML : { icvXMLParse ( fs ); break; }
            case CV_STORAGE_FORMAT_YAML: { icvYMLParse ( fs ); break; }
            case CV_STORAGE_FORMAT_JSON: { icvJSONParse( fs ); break; }
            default: break;
            }
        }
        CV_CATCH_ALL
        {
            fs->is_opened = true;
            cvReleaseFileStorage( &fs );
            CV_RETHROW();
        }
        //cvSetErrMode( mode );

        // release resources that we do not need anymore
        cvFree( &fs->buffer_start );
        fs->buffer = fs->buffer_end = 0;
    }
    fs->is_opened = true;

_exit_:
    if( fs )
    {
        if( cvGetErrStatus() < 0 || (!fs->file && !fs->gzfile && !fs->outbuf && !fs->strbuf) )
        {
            cvReleaseFileStorage( &fs );
        }
        else if( !fs->write_mode )
        {
            icvCloseFile(fs);
            // we close the file since it's not needed anymore. But icvCloseFile() resets is_opened,
            // which may be misleading. Since we restore the value of is_opened.
            fs->is_opened = true;
        }
    }

    return  fs;
}
コード例 #26
0
ファイル: core_c.cpp プロジェクト: neutmute/emgucv
int cveGetErrStatus()
{
   return cvGetErrStatus();
}
コード例 #27
0
ファイル: bgfg_acmmm2003.cpp プロジェクト: 09beezahmad/opencv
// Function cvCreateFGDStatModel initializes foreground detection process
// parameters:
//      first_frame - frame from video sequence
//      parameters  - (optional) if NULL default parameters of the algorithm will be used
//      p_model     - pointer to CvFGDStatModel structure
CV_IMPL CvBGStatModel*
cvCreateFGDStatModel( IplImage* first_frame, CvFGDStatModelParams* parameters )
{
    CvFGDStatModel* p_model = 0;
    
    CV_FUNCNAME( "cvCreateFGDStatModel" );

    __BEGIN__;
    
    int i, j, k, pixel_count, buf_size;
    CvFGDStatModelParams params;

    if( !CV_IS_IMAGE(first_frame) )
        CV_ERROR( CV_StsBadArg, "Invalid or NULL first_frame parameter" );

    if (first_frame->nChannels != 3)
        CV_ERROR( CV_StsBadArg, "first_frame must have 3 color channels" );

    // Initialize parameters:
    if( parameters == NULL )
    {
        params.Lc      = CV_BGFG_FGD_LC;
        params.N1c     = CV_BGFG_FGD_N1C;
        params.N2c     = CV_BGFG_FGD_N2C;

        params.Lcc     = CV_BGFG_FGD_LCC;
        params.N1cc    = CV_BGFG_FGD_N1CC;
        params.N2cc    = CV_BGFG_FGD_N2CC;

        params.delta   = CV_BGFG_FGD_DELTA;

        params.alpha1  = CV_BGFG_FGD_ALPHA_1;
        params.alpha2  = CV_BGFG_FGD_ALPHA_2;
        params.alpha3  = CV_BGFG_FGD_ALPHA_3;

        params.T       = CV_BGFG_FGD_T;
        params.minArea = CV_BGFG_FGD_MINAREA;

        params.is_obj_without_holes = 1;
        params.perform_morphing     = 1;
    }
    else
    {
        params = *parameters;
    }

    CV_CALL( p_model = (CvFGDStatModel*)cvAlloc( sizeof(*p_model) ));
    memset( p_model, 0, sizeof(*p_model) );
    p_model->type = CV_BG_MODEL_FGD;
    p_model->release = (CvReleaseBGStatModel)icvReleaseFGDStatModel;
    p_model->update = (CvUpdateBGStatModel)icvUpdateFGDStatModel;;
    p_model->params = params;

    // Initialize storage pools:
    pixel_count = first_frame->width * first_frame->height;
    
    buf_size = pixel_count*sizeof(p_model->pixel_stat[0]);
    CV_CALL( p_model->pixel_stat = (CvBGPixelStat*)cvAlloc(buf_size) );
    memset( p_model->pixel_stat, 0, buf_size );
    
    buf_size = pixel_count*params.N2c*sizeof(p_model->pixel_stat[0].ctable[0]);
    CV_CALL( p_model->pixel_stat[0].ctable = (CvBGPixelCStatTable*)cvAlloc(buf_size) );
    memset( p_model->pixel_stat[0].ctable, 0, buf_size );

    buf_size = pixel_count*params.N2cc*sizeof(p_model->pixel_stat[0].cctable[0]);
    CV_CALL( p_model->pixel_stat[0].cctable = (CvBGPixelCCStatTable*)cvAlloc(buf_size) );
    memset( p_model->pixel_stat[0].cctable, 0, buf_size );

    for(     i = 0, k = 0; i < first_frame->height; i++ ) {
        for( j = 0;        j < first_frame->width;  j++, k++ )
        {
            p_model->pixel_stat[k].ctable = p_model->pixel_stat[0].ctable + k*params.N2c;
            p_model->pixel_stat[k].cctable = p_model->pixel_stat[0].cctable + k*params.N2cc;
        }
    }

    // Init temporary images:
    CV_CALL( p_model->Ftd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
    CV_CALL( p_model->Fbd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
    CV_CALL( p_model->foreground = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));

    CV_CALL( p_model->background = cvCloneImage(first_frame));
    CV_CALL( p_model->prev_frame = cvCloneImage(first_frame));
    CV_CALL( p_model->storage = cvCreateMemStorage());

    __END__;

    if( cvGetErrStatus() < 0 )
    {
        CvBGStatModel* base_ptr = (CvBGStatModel*)p_model;

        if( p_model && p_model->release )
            p_model->release( &base_ptr );
        else
            cvFree( &p_model );
        p_model = 0;
    }

    return (CvBGStatModel*)p_model;
}
コード例 #28
0
ファイル: persistence_c.cpp プロジェクト: Achraf33/opencv
CV_IMPL void*
cvLoad( const char* filename, CvMemStorage* memstorage,
        const char* name, const char** _real_name )
{
    void* ptr = 0;
    const char* real_name = 0;
    cv::FileStorage fs(cvOpenFileStorage(filename, memstorage, CV_STORAGE_READ));

    CvFileNode* node = 0;

    if( !fs.isOpened() )
        return 0;

    if( name )
    {
        node = cvGetFileNodeByName( *fs, 0, name );
    }
    else
    {
        int i, k;
        for( k = 0; k < (*fs)->roots->total; k++ )
        {
            CvSeq* seq;
            CvSeqReader reader;

            node = (CvFileNode*)cvGetSeqElem( (*fs)->roots, k );
            CV_Assert(node != NULL);
            if( !CV_NODE_IS_MAP( node->tag ))
                return 0;
            seq = node->data.seq;
            node = 0;

            cvStartReadSeq( seq, &reader, 0 );

            // find the first element in the map
            for( i = 0; i < seq->total; i++ )
            {
                if( CV_IS_SET_ELEM( reader.ptr ))
                {
                    node = (CvFileNode*)reader.ptr;
                    goto stop_search;
                }
                CV_NEXT_SEQ_ELEM( seq->elem_size, reader );
            }
        }

stop_search:
        ;
    }

    if( !node )
        CV_Error( CV_StsObjectNotFound, "Could not find the/an object in file storage" );

    real_name = cvGetFileNodeName( node );
    ptr = cvRead( *fs, node, 0 );

    // sanity check
    if( !memstorage && (CV_IS_SEQ( ptr ) || CV_IS_SET( ptr )) )
        CV_Error( CV_StsNullPtr,
        "NULL memory storage is passed - the loaded dynamic structure can not be stored" );

    if( cvGetErrStatus() < 0 )
    {
        cvRelease( (void**)&ptr );
        real_name = 0;
    }

    if( _real_name)
    {
    if (real_name)
    {
        *_real_name = (const char*)cvAlloc(strlen(real_name));
            memcpy((void*)*_real_name, real_name, strlen(real_name));
    } else {
        *_real_name = 0;
    }
    }

    return ptr;
}