Ejemplo n.º 1
0
CV_IMPL void
cvConDensInitSampleSet( CvConDensation * conDens, CvMat * lowerBound, CvMat * upperBound )
{
    int i, j;
    float *LBound;
    float *UBound;
    float Prob = 1.f / conDens->SamplesNum;

    CV_FUNCNAME( "cvConDensInitSampleSet" );
    __BEGIN__;
    
    if( !conDens || !lowerBound || !upperBound )
        CV_ERROR( CV_StsNullPtr, "" );

    if( CV_MAT_TYPE(lowerBound->type) != CV_32FC1 ||
        !CV_ARE_TYPES_EQ(lowerBound,upperBound) )
        CV_ERROR( CV_StsBadArg, "source  has not appropriate format" );

    if( (lowerBound->cols != 1) || (upperBound->cols != 1) )
        CV_ERROR( CV_StsBadArg, "source  has not appropriate size" );

    if( (lowerBound->rows != conDens->DP) || (upperBound->rows != conDens->DP) )
        CV_ERROR( CV_StsBadArg, "source  has not appropriate size" );

    LBound = lowerBound->data.fl;
    UBound = upperBound->data.fl;
    /* Initializing the structures to create initial Sample set */
    //这里根据输入的动态范围给每个系统状态分配一个产生随机数的结构
    for( i = 0; i < conDens->DP; i++ )
    {
        cvRandInit( &(conDens->RandS[i]),
                    LBound[i],
                    UBound[i],
                    i );
    }
    /* Generating the samples */
    //根据产生的随即数,为每个粒子的每个系统状态分配初始值,并将每个粒子的置信度设置为相同的1/n
    for( j = 0; j < conDens->SamplesNum; j++ )
    {
        for( i = 0; i < conDens->DP; i++ )
        {
            cvbRand( conDens->RandS + i, conDens->flSamples[j] + i, 1 );
        }
        conDens->flConfidence[j] = Prob;
    }
    /* Reinitializes the structures to update samples randomly */
    //产生以后更新粒子系统状态的随即结构,采样范围为原来初始范围的-1/5到1/5
    for( i = 0; i < conDens->DP; i++ )
    {
        cvRandInit( &(conDens->RandS[i]),
                    (LBound[i] - UBound[i]) / 5,
                    (UBound[i] - LBound[i]) / 5,
                    i);
    }

    __END__;
}
Ejemplo n.º 2
0
CV_IMPL void
cvConDensInitSampleSet( CvConDensation * conDens, CvMat * lowerBound, CvMat * upperBound )
{
    int i, j;
    float *LBound;
    float *UBound;
    float Prob = 1.f / conDens->SamplesNum;

    if( !conDens || !lowerBound || !upperBound )
        CV_Error( CV_StsNullPtr, "" );

    if( CV_MAT_TYPE(lowerBound->type) != CV_32FC1 ||
        !CV_ARE_TYPES_EQ(lowerBound,upperBound) )
        CV_Error( CV_StsBadArg, "source  has not appropriate format" );

    if( (lowerBound->cols != 1) || (upperBound->cols != 1) )
        CV_Error( CV_StsBadArg, "source  has not appropriate size" );

    if( (lowerBound->rows != conDens->DP) || (upperBound->rows != conDens->DP) )
        CV_Error( CV_StsBadArg, "source  has not appropriate size" );

    LBound = lowerBound->data.fl;
    UBound = upperBound->data.fl;
    /* Initializing the structures to create initial Sample set */
    for( i = 0; i < conDens->DP; i++ )
    {
        cvRandInit( &(conDens->RandS[i]),
                    LBound[i],
                    UBound[i],
                    i );
    }
    /* Generating the samples */
    for( j = 0; j < conDens->SamplesNum; j++ )
    {
        for( i = 0; i < conDens->DP; i++ )
        {
            cvbRand( conDens->RandS + i, conDens->flSamples[j] + i, 1 );
        }
        conDens->flConfidence[j] = Prob;
    }
    /* Reinitializes the structures to update samples randomly */
    for( i = 0; i < conDens->DP; i++ )
    {
        cvRandInit( &(conDens->RandS[i]),
                    (LBound[i] - UBound[i]) / 5,
                    (UBound[i] - LBound[i]) / 5,
                    i);
    }
}
Ejemplo n.º 3
0
KalmanFilter::KalmanFilter()
{

	int dynam_params = 8; // x,y,width,height,dx,dy,dw,dh
	int measure_params = 4;//x,y,width,height

	m_pKalmanFilter = cvCreateKalman(dynam_params, measure_params,0);
	cvRandInit( &rng, 0, 1, -1, CV_RAND_UNI );
	cvRandSetRange( &rng, 0, 1, 0 );
	rng.disttype = CV_RAND_NORMAL;
	measurement = cvCreateMat( measure_params, 1, CV_32FC1 ); 
	cvZero(measurement);

	// F matrix data
	// F is transition matrix. It relates how the states interact
	const float F[] = {
	1, 0, 0, 0, 1, 0, 0, 0, //x + dx
	0, 1, 0, 0, 0, 1, 0, 0,//y + dy
	0, 0, 1, 0, 0, 0, 1, 0,//width + dw
	0, 0, 0, 1, 0, 0, 0, 1,//height + dh
	0, 0, 0, 0, 1, 0, 0, 0,//dx
	0, 0, 0, 0, 0, 1, 0, 0,//dy
	0, 0, 0, 0, 0, 0, 1, 0,//dw
	0, 0, 0, 0, 0, 0, 0, 1 //dh
	};

	memcpy( m_pKalmanFilter->transition_matrix->data.fl, F, sizeof(F));
	cvSetIdentity( m_pKalmanFilter->measurement_matrix, cvRealScalar(1) ); // (H)
	cvSetIdentity( m_pKalmanFilter->process_noise_cov, cvRealScalar(1e-5) ); // (Q)
	cvSetIdentity( m_pKalmanFilter->measurement_noise_cov, cvRealScalar(1e-1) ); //(R)
	cvSetIdentity( m_pKalmanFilter->error_cov_post, cvRealScalar(1));

	// choose random initial state
	cvRand( &rng, m_pKalmanFilter->state_post );

}
int main( int argc, char *argv[] ) {

    int webcamRun = 0;
    
    CvCapture *capture = 0;
    CvCapture *stereoCapture = 0;
    
    CvCapture **cptPtr = &capture;
    CvCapture **scptPtr = &stereoCapture;
    
    initCaptureFiles( argc, argv, cptPtr, scptPtr, &webcamRun, &enable3D );
  
  
  
  char locationMean[300];
  char locationCov[300];
  strncpy( locationMean, argv[1], 300 );
  strncpy(  locationCov, argv[2], 300 );
  
  
  
  /* check for stereo file */  
  
  
  
  if( !enable3D ) {
    displayStereoFrame   = 0;
    displayPositionFrame = 0;
    depthScaling         = 0;
  }
  
    
  /* Fast-forwards through the video to the action */
  int kl;
  for( kl = 0; kl < 100 && webcamRun == 0; kl++ ) {
    cvQueryFrame( capture );
    if( enable3D ) cvQueryFrame( stereoCapture );
  }
    
    
  /*
      Housekeeping
  */
  
  int maxBox[10];
  // this should be in the relevant file
  //! Create matrix files to hold intermediate calculations for likelihood
  
  
  
  


  /* this should have its own matrix setup function and not be hard-coded */
  //! Create N-dimensional matrices to hold mean and cov data
  int backMeanSizes[] = {_sizeY,_sizeX,4,_numResponses};
  int backCovSizes[]  = {_sizeY,_sizeX,4,_numResponses};

  CvMatND *backMean = cvCreateMatND( 4, backMeanSizes, CV_32F );
  CvMatND *backCov  = cvCreateMatND( 4, backCovSizes,  CV_32F );

  /* Fix this */
  if( webcamRun ) {
    backMean = (CvMatND*) cvLoad( "D:/Will/Dropbox/My Dropbox/Project/Matlab/backMean.xml", NULL, NULL, NULL );
    backCov  = (CvMatND*) cvLoad( "D:/Will/Dropbox/My Dropbox/Project/Matlab/backCov.xml", NULL, NULL, NULL );
  } else {
    backMean = (CvMatND*) cvLoad( locationMean, NULL, NULL, NULL );
    backCov  = (CvMatND*) cvLoad( locationCov, NULL, NULL, NULL );
  }
  
  /* end here */
  
  
  CvMat *imgLikelihood = cvCreateMat( 48, 64, CV_32F );
  


  img  = cvLoadImage( "foreground8/image (1).jpg", CV_LOAD_IMAGE_COLOR );
  yImg = cvLoadImage( "foreground8/image (1).jpg", CV_LOAD_IMAGE_COLOR );
  cvNamedWindow( "Tracker", CV_WINDOW_AUTOSIZE );
  
  
  /* Condensation stuff */
  ConDens = cvCreateConDensation( DP, MP, nSamples );
  
  bx = 320; by = 240;
	//hm = 0; //vm = 0;

  /* Initialize the random number generator */
	rng_state = cvRNG(0xffffffff);
 
  initializeCondensation();
  
  /* This allows us to change the probabiity with which Condensation alters each variable */
  cvRandInit( &(ConDens->RandS[0]), -75, 75, 0, CV_RAND_UNI);
  cvRandInit( &(ConDens->RandS[1]),  -5,  5, 1, CV_RAND_UNI);
  cvRandInit( &(ConDens->RandS[2]),  -5,  5, 2, CV_RAND_UNI);
  cvRandInit( &(ConDens->RandS[3]),  -2,  2, 3, CV_RAND_UNI);
  
  cvRandInit( &(ConDens->RandS[4]), -75, 75, 4, CV_RAND_UNI);
  cvRandInit( &(ConDens->RandS[5]),  -5,  5, 5, CV_RAND_UNI);
  cvRandInit( &(ConDens->RandS[6]),  -5,  5, 6, CV_RAND_UNI);
  cvRandInit( &(ConDens->RandS[7]),  -2,  2, 7, CV_RAND_UNI);
  
  /*
    If we have depth scaling, the depth controls the width & height of the box
    So we don't want any randomness
  */
  if( depthScaling ) {
    cvRandInit( &(ConDens->RandS[3]),  0,  0, 3, CV_RAND_UNI);
    cvRandInit( &(ConDens->RandS[7]),  0,  0, 7, CV_RAND_UNI);
  }

  
  IplImage* heatFrame = NULL;
  heatFrame = cvQueryFrame( capture );
  if(enable3D) cvQueryFrame( stereoCapture ); // making sure they stay in sync (and init positionFrame)
  
  
  int frameNumb = 0;
  // int mjk;
  // for( mjk = 0; mjk < 320; mjk++ ) {
    // cvQueryFrame( capture );
    // cvQueryFrame( stereoCapture );
    // frameNumb++;
  // }
  

  
  positionFrame = cvCreateImage(cvSize(640, 510), 8, 3);
  
  
  int trailLength = 20;
  int planX1Pos[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
  int planX2Pos[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
  int planZ1Pos[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
  int planZ2Pos[20] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
  

  int key = 0;
  double totalTime = 0;
  int totalFrames = 0;
  
  
  
  while( key != 'q' ) {
  
    // stopCount++;
    // strcpy(frameNameTemp, frameName);
    // itoa(stopCount, frameCount, 10);
    // strcat( frameNameTemp, frameCount);
    // strcat(frameNameTemp, ".jpg");
    // cvSaveImage(frameNameTemp, positionFrame, 0);
    
    
    /* Start timing */
    clock_t start = clock();
        
        frameNumb++;
        printf("Frame %d\n", frameNumb);
        if( frameNumb == 210 ) {
          for( ; frameNumb < 290; frameNumb++ ) {
            cvQueryFrame( capture );
            cvQueryFrame( stereoCapture );
          }
        }
        
        if( frameNumb == 350 ) {
          return(0);
        }
        
        /* Get the first video frame, and maybe stereo frame */
        frame = cvQueryFrame( capture );
        if(enable3D) stereoFrame = cvQueryFrame( stereoCapture );
        
        
        /* Double-check that we haven't reached the end */
        if( !frame ) break;
        if( enable3D ) if( !stereoFrame ) break;
        
        
        /* Compute likelihoods for new frame, using mean and cov */
        likelihood( frame, backMean, backCov, imgLikelihood );
        
        /* Update the Condensation model using new likelihoods */
        updateCondensation( 0, 0, imgLikelihood, maxBox );

        /* Maybe display the Condensation particles */
        if( displayParticles ) 
          drawParticles();

        /* Draw tracking boxes onto the video and stereo video frame */
        drawTrackingBoxes( 0, maxBox, frame, stereoFrame );
        drawTrackingBoxes( 1, maxBox, frame, stereoFrame );

        /* Show the latest video frame (with boxes and particles) */
        cvShowImage( "Tracker", frame );

        /* Maybe show latest stereo depth frame */
        if( displayStereoFrame )
          cvShowImage( "Stereo", stereoFrame );
      
        /* Maybe show latest position map */
        if( displayPositionFrame )
          drawPositionTrail( maxBox, trailLength, 
                             depth, 
                             planX1Pos, planX2Pos, planZ1Pos, 
                             planZ2Pos, positionFrame );
        
        /* Maybe show the heat map of the input image (the likelihood for each patch) */
        if( displayHeatFrame ) {
          drawHeatmap( maxBox, imgLikelihood, heatFrame );
          cvShowImage( "Heat", heatFrame );
        }
        
        /* Update previous x, y positions */
        prevX[0] = maxBox[0]; prevY[0] = maxBox[1];
        prevX[1] = maxBox[4]; prevY[1] = maxBox[5];
        
        
        /* Calculate fps and average fps */
        printf("fps: %3.1f, ", (double)1/(((double)clock() - start) / CLOCKS_PER_SEC));
        totalTime = totalTime + (double)1/(((double)clock() - start) / CLOCKS_PER_SEC);
        totalFrames++;
        printf(" average fps %3.1f", totalTime/totalFrames);
        
        /* Check for key presses */
        key = cvWaitKey( 50 );
        
        /* Enable debugging mode */
        if( key == 'd' ) {
          debug = 1;
          printf("\n*********\n\nDebug: ON\n\n*********\n");
        }
        
        /* Enable debugging mode */
        if( key == 'p' ) {
          if( stateDraw )
            stateDraw = 0;
          else
            stateDraw = 1;
          printf("\n*********\n\nParticle Drawing Toggled\n\n*********\n");
        }

    }
  
  printf("\n\nquitting");

//cvReleaseVideoWriter( &writerTracker );
cvReleaseImage( &img );
cvReleaseImage( &frame );
cvDestroyWindow( "Tracker" );
cvDestroyWindow( "Heat" );
cvDestroyWindow( "Stereo" );
cvDestroyWindow( "Position" );

return(0);

}
Ejemplo n.º 5
0
static CvTestSeqElem* icvTestSeqReadElemOne(CvTestSeq_* pTS, CvFileStorage* fs, CvFileNode* node)
{
    int             noise_type = CV_NOISE_NONE;;
    CvTestSeqElem*  pElem = NULL;
    const char*     pVideoName = cvReadStringByName( fs, node,"Video", NULL);
    const char*     pVideoObjName = cvReadStringByName( fs, node,"VideoObj", NULL);

    if(pVideoName)
    {   /* Check to noise flag: */
        if( cv_stricmp(pVideoName,"noise_gaussian") == 0 ||
            cv_stricmp(pVideoName,"noise_normal") == 0) noise_type = CV_NOISE_GAUSSIAN;
        if( cv_stricmp(pVideoName,"noise_uniform") == 0) noise_type = CV_NOISE_UNIFORM;
        if( cv_stricmp(pVideoName,"noise_speckle") == 0) noise_type = CV_NOISE_SPECKLE;
        if( cv_stricmp(pVideoName,"noise_salt_and_pepper") == 0) noise_type = CV_NOISE_SALT_AND_PEPPER;
    }

    if((pVideoName || pVideoObjName ) && noise_type == CV_NOISE_NONE)
    {   /* Read other elements: */
        if(pVideoName) pElem = icvTestSeqReadElemAll(pTS, fs, pVideoName);
        if(pVideoObjName)
        {
            CvTestSeqElem* pE;
            pElem = icvTestSeqReadElemAll(pTS, fs, pVideoObjName);
            for(pE=pElem;pE;pE=pE->next)
            {
                pE->ObjID = pTS->ObjNum;
                pE->pObjName = pVideoObjName;
            }
            pTS->ObjNum++;
        }
    }   /* Read other elements. */
    else
    {   /* Create new element: */
        CvFileNode* pPosNode = cvGetFileNodeByName( fs, node,"Pos");
        CvFileNode* pSizeNode = cvGetFileNodeByName( fs, node,"Size");
        int AutoSize = (pSizeNode && CV_NODE_IS_STRING(pSizeNode->tag) && cv_stricmp("auto",cvReadString(pSizeNode,""))==0);
        int AutoPos = (pPosNode && CV_NODE_IS_STRING(pPosNode->tag) && cv_stricmp("auto",cvReadString(pPosNode,""))==0);
        const char* pFileName = cvReadStringByName( fs, node,"File", NULL);
        pElem = (CvTestSeqElem*)cvAlloc(sizeof(CvTestSeqElem));
        memset(pElem,0,sizeof(CvTestSeqElem));

        pElem->ObjID = -1;
        pElem->noise_type = noise_type;
        cvRandInit( &pElem->rnd_state, 1, 0, 0,CV_RAND_NORMAL);

        if(pFileName && pElem->noise_type == CV_NOISE_NONE)
        {   /* If AVI or BMP: */
            size_t  l = strlen(pFileName);
            pElem->pFileName = pFileName;

            pElem->type = SRC_TYPE_IMAGE;
            if(cv_stricmp(".avi",pFileName+l-4) == 0)pElem->type = SRC_TYPE_AVI;

            if(pElem->type == SRC_TYPE_IMAGE)
            {
                //pElem->pImg = cvLoadImage(pFileName);
                if(pElem->pImg)
                {
                    pElem->FrameNum = 1;
                    if(pElem->pImgMask)cvReleaseImage(&(pElem->pImgMask));

                    pElem->pImgMask = cvCreateImage(
                        cvSize(pElem->pImg->width,pElem->pImg->height),
                        IPL_DEPTH_8U,1);
                    icvTestSeqCreateMask(pElem->pImg,pElem->pImgMask,FG_BG_THRESHOLD);
                }
            }

            if(pElem->type == SRC_TYPE_AVI && pFileName)
            {
                //pElem->pAVI = cvCaptureFromFile(pFileName);

                if(pElem->pAVI)
                {
                    IplImage* pImg = 0;//cvQueryFrame(pElem->pAVI);
                    pElem->pImg = cvCloneImage(pImg);
                    pElem->pImg->origin = 0;
                    //cvSetCaptureProperty(pElem->pAVI,CV_CAP_PROP_POS_FRAMES,0);
                    pElem->FrameBegin = 0;
                    pElem->AVILen = pElem->FrameNum = 0;//(int)cvGetCaptureProperty(pElem->pAVI, CV_CAP_PROP_FRAME_COUNT);
                    //cvReleaseCapture(&pElem->pAVI);
                    pElem->pAVI = NULL;
                }
                else
                {
                    printf("WARNING!!! Cannot open avi file %s\n",pFileName);
                }
            }

        }   /* If AVI or BMP. */

        if(pPosNode)
        {   /* Read positions: */
            if(CV_NODE_IS_SEQ(pPosNode->tag))
            {
                int num = pPosNode->data.seq->total;
                pElem->pPos = (CvPoint2D32f*)cvAlloc(sizeof(float)*num);
                cvReadRawData( fs, pPosNode, pElem->pPos, "f" );
                pElem->PosNum = num/2;
                if(pElem->FrameNum == 0) pElem->FrameNum = pElem->PosNum;
            }
        }

        if(pSizeNode)
        {   /* Read sizes: */
            if(CV_NODE_IS_SEQ(pSizeNode->tag))
            {
                int num = pSizeNode->data.seq->total;
                pElem->pSize = (CvPoint2D32f*)cvAlloc(sizeof(float)*num);
                cvReadRawData( fs, pSizeNode, pElem->pSize, "f" );
                pElem->SizeNum = num/2;
            }
        }

        if(AutoPos || AutoSize)
        {   /* Auto size and pos: */
            int     i;
            int     num = (pElem->type == SRC_TYPE_AVI)?pElem->AVILen:1;
            if(AutoSize)
            {
                pElem->pSize = (CvPoint2D32f*)cvAlloc(sizeof(CvPoint2D32f)*num);
                pElem->SizeNum = num;
            }
            if(AutoPos)
            {
                pElem->pPos = (CvPoint2D32f*)cvAlloc(sizeof(CvPoint2D32f)*num);
                pElem->PosNum = num;
            }

            for(i=0; i<num; ++i)
            {
                IplImage* pFG = NULL;
                CvPoint2D32f* pPos = AutoPos?(pElem->pPos + i):NULL;
                CvPoint2D32f* pSize = AutoSize?(pElem->pSize + i):NULL;

                icvTestSeqQureyFrameElem(pElem,i);
                pFG = pElem->pImgMask;

                if(pPos)
                {
                    pPos->x = 0.5f;
                    pPos->y = 0.5f;
                }
                if(pSize)
                {
                    pSize->x = 0;
                    pSize->y = 0;
                }

                if(pFG)
                {
                    double      M00;
                    CvMoments   m;
                    cvMoments( pElem->pImgMask, &m, 0 );
                    M00 = cvGetSpatialMoment( &m, 0, 0 );

                    if(M00 > 0 && pSize )
                    {
                        double X = cvGetSpatialMoment( &m, 1, 0 )/M00;
                        double Y = cvGetSpatialMoment( &m, 0, 1 )/M00;
                        double XX = (cvGetSpatialMoment( &m, 2, 0 )/M00) - X*X;
                        double YY = (cvGetSpatialMoment( &m, 0, 2 )/M00) - Y*Y;
                        pSize->x = (float)(4*sqrt(XX))/(pElem->pImgMask->width-1);
                        pSize->y = (float)(4*sqrt(YY))/(pElem->pImgMask->height-1);
                    }

                    if(M00 > 0 && pPos)
                    {
                        pPos->x = (float)(cvGetSpatialMoment( &m, 1, 0 )/(M00*(pElem->pImgMask->width-1)));
                        pPos->y = (float)(cvGetSpatialMoment( &m, 0, 1 )/(M00*(pElem->pImgMask->height-1)));
                    }

                    if(pPos)
                    {   /* Another way to calculate y pos
                         * using object median:
                         */
                        int y0=0, y1=pFG->height-1;
                        for(y0=0; y0<pFG->height; ++y0)
                        {
                            CvMat       m;
                            CvScalar    s = cvSum(cvGetRow(pFG, &m, y0));
                            if(s.val[0] > 255*7) break;
                        }

                        for(y1=pFG->height-1; y1>0; --y1)
                        {
                            CvMat m;
                            CvScalar s = cvSum(cvGetRow(pFG, &m, y1));
                            if(s.val[0] > 255*7) break;
                        }

                        pPos->y = (y0+y1)*0.5f/(pFG->height-1);
                    }
                }   /* pFG */
            }   /* Next frame. */

            //if(pElem->pAVI) cvReleaseCapture(&pElem->pAVI);

            pElem->pAVI = NULL;

        }   /* End auto position creation. */
    }   /*  Create new element. */

    if(pElem)
    {   /* Read transforms and: */
        int             FirstFrame, LastFrame;
        CvTestSeqElem*  p=pElem;
        CvFileNode*     pTransNode = NULL;
        CvFileNode*     pS = NULL;
        int             ShiftByPos = 0;
        int             KeyFrames[1024];
        CvSeq*          pTransSeq = NULL;
        int             KeyFrameNum = 0;

        pTransNode = cvGetFileNodeByName( fs, node,"Trans");

        while( pTransNode &&
               CV_NODE_IS_STRING(pTransNode->tag) &&
               cv_stricmp("auto",cvReadString(pTransNode,""))!=0)
        {   /* Trans is reference: */
            pTransNode = cvGetFileNodeByName( fs, NULL,cvReadString(pTransNode,""));
        }

        pS = cvGetFileNodeByName( fs, node,"Shift");
        ShiftByPos = 0;
        pTransSeq = pTransNode?(CV_NODE_IS_SEQ(pTransNode->tag)?pTransNode->data.seq:NULL):NULL;
        KeyFrameNum = pTransSeq?pTransSeq->total:1;

        if(   (pS && CV_NODE_IS_STRING(pS->tag) && cv_stricmp("auto",cvReadString(pS,""))==0)
            ||(pTransNode && CV_NODE_IS_STRING(pTransNode->tag) && cv_stricmp("auto",cvReadString(pTransNode,""))==0))
        {
            ShiftByPos = 1;
        }

        FirstFrame = pElem->FrameBegin;
        LastFrame = pElem->FrameBegin+pElem->FrameNum-1;

        /* Calculate length of video and reallocate
         * transformation array:
         */
        for(p=pElem; p; p=p->next)
        {
            int v;
            v = cvReadIntByName( fs, node, "BG", -1 );
            if(v!=-1)p->BG = v;
            v = cvReadIntByName( fs, node, "Mask", -1 );
            if(v!=-1)p->Mask = v;

            p->FrameBegin += cvReadIntByName( fs, node, "FrameBegin", 0 );
            p->FrameNum = cvReadIntByName( fs, node, "FrameNum", p->FrameNum );
            p->FrameNum = cvReadIntByName( fs, node, "Dur", p->FrameNum );
            {
                int LastFrame = cvReadIntByName( fs, node, "LastFrame", p->FrameBegin+p->FrameNum-1 );
                p->FrameNum = MIN(p->FrameNum,LastFrame - p->FrameBegin+1);
            }

            icvTestSeqAllocTrans(p);

            {   /* New range estimation: */
                int LF = p->FrameBegin+p->FrameNum-1;
                if(p==pElem || FirstFrame > p->FrameBegin)FirstFrame = p->FrameBegin;
                if(p==pElem || LastFrame < LF)LastFrame = LF;
            }   /* New range estimation. */
        }   /*  End allocate new transfrom array. */

        if(ShiftByPos)
        {
            for(p=pElem;p;p=p->next)
            {   /* Modify transformation to make autoshift: */
                int         i;
                int         num = p->FrameNum;
                assert(num <= p->TransNum);
                p->TransNum = MAX(1,num);

                for(i=0; i<num; ++i)
                {
                    CvTSTrans*  pT = p->pTrans+i;
                    //float   t = (num>1)?((float)i/(num-1)):0.0f;
                    float newx = p->pPos[i%p->PosNum].x;
                    float newy = p->pPos[i%p->PosNum].y;
                    pT->Shift.x = -newx*pT->Scale.x;
                    pT->Shift.y = -newy*pT->Scale.y;

                    if(p->pImg)
                    {
                        newx *= p->pImg->width-1;
                        newy *= p->pImg->height-1;
                    }

                    pT->T[2] = -(pT->T[0]*newx+pT->T[1]*newy);
                    pT->T[5] = -(pT->T[3]*newx+pT->T[4]*newy);
                }
            }   /* Modify transformation old. */
        }   /*  Next record. */

        /* Initialize frame number array: */
        KeyFrames[0] = FirstFrame;

        if(pTransSeq&&KeyFrameNum>1)
        {
            int i0,i1,i;
            for(i=0; i<KeyFrameNum; ++i)
            {
                CvFileNode* pTN = (CvFileNode*)cvGetSeqElem(pTransSeq,i);
                KeyFrames[i] = cvReadIntByName(fs,pTN,"frame",-1);
            }

            if(KeyFrames[0]<0)KeyFrames[0]=FirstFrame;
            if(KeyFrames[KeyFrameNum-1]<0)KeyFrames[KeyFrameNum-1]=LastFrame;

            for(i0=0, i1=1; i1<KeyFrameNum;)
            {
                int i;

                for(i1=i0+1; i1<KeyFrameNum && KeyFrames[i1]<0; i1++);

                assert(i1<KeyFrameNum);
                assert(i1>i0);

                for(i=i0+1; i<i1; ++i)
                {
                    KeyFrames[i] = cvRound(KeyFrames[i0] + (float)(i-i0)*(float)(KeyFrames[i1] - KeyFrames[i0])/(float)(i1-i0));
                }
                i0 = i1;
                i1++;
            }   /* Next key run. */
        }   /*  Initialize frame number array. */

        if(pTransNode || pTransSeq)
        {   /* More complex transform. */
            int     param;
            CvFileNode* pTN = pTransSeq?(CvFileNode*)cvGetSeqElem(pTransSeq,0):pTransNode;

            for(p=pElem; p; p=p->next)
            {
                //int trans_num = p->TransNum;
                for(param=0; param_name[param]; ++param)
                {
                    const char*   name = param_name[param];
                    float   defv = param_defval[param];
                    if(KeyFrameNum==1)
                    {   /* Only one transform record: */
                        int     i;
                        double  val;
                        CvFileNode* node = cvGetFileNodeByName( fs, pTN,name);
                        if(node == NULL) continue;
                        val = cvReadReal(node,defv);

                        for(i=0; i<p->TransNum; ++i)
                        {
                            icvUpdateTrans(
                                p->pTrans+i, param, val,
                                p->pImg?(float)(p->pImg->width-1):1.0f,
                                p->pImg?(float)(p->pImg->height-1):1.0f);
                        }
                    }   /* Next record. */
                    else
                    {   /* Several transforms: */
                        int         i0,i1;
                        double      v0;
                        double      v1;

                        CvFileNode* pTN = (CvFileNode*)cvGetSeqElem(pTransSeq,0);
                        v0 = cvReadRealByName(fs, pTN,name,defv);

                        for(i1=1,i0=0; i1<KeyFrameNum; ++i1)
                        {
                            int         f0,f1;
                            int         i;
                            CvFileNode* pTN = (CvFileNode*)cvGetSeqElem(pTransSeq,i1);
                            CvFileNode* pVN = cvGetFileNodeByName(fs,pTN,name);

                            if(pVN)v1 = cvReadReal(pVN,defv);
                            else if(pVN == NULL && i1 == KeyFrameNum-1) v1 = defv;
                            else continue;

                            f0 = KeyFrames[i0];
                            f1 = KeyFrames[i1];

                            if(i1==(KeyFrameNum-1)) f1++;

                            for(i=f0; i<f1; ++i)
                            {
                                double   val;
                                double   t = (float)(i-f0);
                                int      li = i - p->FrameBegin;
                                if(li<0) continue;
                                if(li>= p->TransNum) break;
                                if(KeyFrames[i1]>KeyFrames[i0]) t /=(float)(KeyFrames[i1]-KeyFrames[i0]);
                                val = t*(v1-v0)+v0;

                                icvUpdateTrans(
                                    p->pTrans+li, param, val,
                                    p->pImg?(float)(p->pImg->width-1):1.0f,
                                    p->pImg?(float)(p->pImg->height-1):1.0f);

                            }   /* Next transform. */
                            i0 = i1;
                            v0 = v1;

                        }   /* Next value run. */
                    }   /*  Several transforms. */
                }   /*  Next parameter. */
            }   /*  Next record. */
        }   /*  More complex transform. */
    }   /*  Read transfroms. */

    return pElem;

}   /* icvTestSeqReadElemOne */
Ejemplo n.º 6
0
int main(int argc, char **argv)
{

	// Initialize, create Kalman Filter object, window, random number
	// generator etc.
	//
	cvNamedWindow("Kalman", 1);
	CvRandState rng;
	cvRandInit(&rng, 0, 1, -1, CV_RAND_UNI);

	IplImage *img = cvCreateImage(cvSize(500, 500), 8, 3);
	CvKalman *kalman = cvCreateKalman(2, 1, 0);
	// state is (phi, delta_phi) - angle and angular velocity
	// Initialize with random guess.
	//
	CvMat *x_k = cvCreateMat(2, 1, CV_32FC1);
	cvRandSetRange(&rng, 0, 0.1, 0);
	rng.disttype = CV_RAND_NORMAL;
	cvRand(&rng, x_k);

	// process noise
	//
	CvMat *w_k = cvCreateMat(2, 1, CV_32FC1);

	// measurements, only one parameter for angle
	//
	CvMat *z_k = cvCreateMat(1, 1, CV_32FC1);
	cvZero(z_k);

	// Transition matrix 'F' describes relationship between
	// model parameters at step k and at step k+1 (this is 
	// the "dynamics" in our model.
	//
	const float F[] = { 1, 1, 0, 1 };
	memcpy(kalman->transition_matrix->data.fl, F, sizeof(F));
	// Initialize other Kalman filter parameters.
	//
	cvSetIdentity(kalman->measurement_matrix, cvRealScalar(1));
	cvSetIdentity(kalman->process_noise_cov, cvRealScalar(1e-5));
	cvSetIdentity(kalman->measurement_noise_cov, cvRealScalar(1e-1));
	cvSetIdentity(kalman->error_cov_post, cvRealScalar(1));

	// choose random initial state
	//
	cvRand(&rng, kalman->state_post);

	while (1) {
		// predict point position
		const CvMat *y_k = cvKalmanPredict(kalman, 0);

		// generate measurement (z_k)
		//
		cvRandSetRange(&rng,
					   0, sqrt(kalman->measurement_noise_cov->data.fl[0]), 0);
		cvRand(&rng, z_k);
		cvMatMulAdd(kalman->measurement_matrix, x_k, z_k, z_k);
		// plot points (eg convert to planar co-ordinates and draw)
		//
		cvZero(img);
		cvCircle(img, phi2xy(z_k), 4, CVX_YELLOW);	// observed state
		cvCircle(img, phi2xy(y_k), 4, CVX_WHITE, 2);	// "predicted" state
		cvCircle(img, phi2xy(x_k), 4, CVX_RED);	// real state
		cvShowImage("Kalman", img);
		// adjust Kalman filter state
		//
		cvKalmanCorrect(kalman, z_k);

		// Apply the transition matrix 'F' (eg, step time forward)
		// and also apply the "process" noise w_k.
		//
		cvRandSetRange(&rng, 0, sqrt(kalman->process_noise_cov->data.fl[0]), 0);
		cvRand(&rng, w_k);
		cvMatMulAdd(kalman->transition_matrix, x_k, w_k, x_k);

		// exit if user hits 'Esc'
		if (cvWaitKey(100) == 27)
			break;
	}

	return 0;
}
Ejemplo n.º 7
0
static int aMatchContourTrees(void)
{
    CvSeqBlock contour_blk1, contour_blk2;
    CvContour contour_h1, contour_h2;
    CvContourTree *tree1, *tree2;
    CvMemStorage *storage;   /*   storage for contour and tree writing */
    int block_size = 10000;

    CvRandState state;
    double lower, upper;
    int seed;
    float fr;
    int type_seq;
    int method;
    int nPoints1 = 12, nPoints2 = 12;
    int xc,yc,a1 = 10, b1 = 20, a2 = 10, b2 =20, fi = 0;
    int xmin,ymin,xmax,ymax;
    double error_test,rezult, eps_rez = 0.8;
    double pi = 3.1415926;
    double threshold = 1.e-7;
    double threshold2 = 5.;
    int i;
    int code = TRS_OK;

    int width=256,height=256;
    CvPoint *cp1,*cp2;

    /* read tests params */

    if (!trsiRead(&nPoints1,"20","Number of points first contour"))
        return TRS_UNDEF;
    if (!trsiRead(&nPoints2,"20","Number of points second contour"))
        return TRS_UNDEF;

    if(nPoints1>0&&nPoints2>0)
    {
        if (!trsiRead(&a1,"10","first radius of the first elipse"))
            return TRS_UNDEF;
        if (!trsiRead(&b1,"20","second radius of the first elipse"))
            return TRS_UNDEF;
        if (!trsiRead(&a2,"15","first radius of the second elipse"))
            return TRS_UNDEF;
        if (!trsiRead(&b2,"30","second radius of the second elipse"))
            return TRS_UNDEF;
        if (!trsiRead(&fi,"0","second radius of the second elipse"))
            return TRS_UNDEF;

        if (!trsdRead(&upper,"3","noise amplidude"))
            return TRS_UNDEF;

        xc = (int)(width/2.);
        yc = (int)(height/2.);
        xmin = width;
        ymin = height;
        xmax = 0;
        ymax = 0;

        cp1 = (CvPoint*) trsmAlloc(nPoints1*sizeof(CvPoint));
        cp2 = (CvPoint*) trsmAlloc(nPoints2*sizeof(CvPoint));

        for(i=0; i<nPoints1; i++)
        {
            cp1[i].x = (int)(a1*cos(2*pi*i/nPoints1))+xc;
            cp1[i].y = (int)(b1*sin(2*pi*i/nPoints1))+yc;
            if(xmin> cp1[i].x) xmin = cp1[i].x;
            if(xmax< cp1[i].x) xmax = cp1[i].x;
            if(ymin> cp1[i].y) ymin = cp1[i].y;
            if(ymax< cp1[i].y) ymax = cp1[i].y;
        }

        if(xmax>width||xmin<0||ymax>height||ymin<0) return TRS_FAIL;

        lower = -upper;
        /*     upper = 3;*/
        seed = 345753;
        cvRandInit(&state, (float)lower,(float)upper, seed );
        for(i=0; i<nPoints2; i++)
        {
            cvbRand( &state, &fr, 1 );
            cp2[i].x =(int)fr+(int)(a2*cos(2*pi*i/nPoints2)*cos(2*pi*fi/360.))-
                      (int)(b2*sin(2*pi*i/nPoints2)*sin(2*pi*fi/360.))+xc;
            cvbRand( &state, &fr, 1 );
            cp2[i].y =(int)fr+(int)(a2*cos(2*pi*i/nPoints2)*sin(2*pi*fi/360.))+
                      (int)(b2*sin(2*pi*i/nPoints2)*cos(2*pi*fi/360.))+yc;

            if(xmin> cp2[i].x) xmin = cp2[i].x;
            if(xmax< cp2[i].x) xmax = cp2[i].x;
            if(ymin> cp2[i].y) ymin = cp2[i].y;
            if(ymax< cp2[i].y) ymax = cp2[i].y;
        }
        if(xmax>width||xmin<0||ymax>height||ymin<0) return TRS_FAIL;

        /*   contours initialazing */
        type_seq = CV_SEQ_POLYGON;
        cvMakeSeqHeaderForArray( type_seq, sizeof(CvContour), sizeof(CvPoint),
                                 (char*)cp1, nPoints1, (CvSeq*)&contour_h1, &contour_blk1);

        cvMakeSeqHeaderForArray( type_seq, sizeof(CvContour), sizeof(CvPoint),
                                 (char*)cp2, nPoints2, (CvSeq*)&contour_h2, &contour_blk2);

        /*  contour trees created*/
        storage = cvCreateMemStorage( block_size );

        tree1 = cvCreateContourTree ((CvSeq*)&contour_h1, storage, threshold);
        tree2 = cvCreateContourTree ((CvSeq*)&contour_h2, storage, threshold);


        /*  countours matchig */
        error_test = 0.;
        method = 1;

        rezult = cvMatchContourTrees (tree1, tree2, (CvContourTreesMatchMethod)method,threshold2);
        error_test+=rezult;

        if(error_test > eps_rez ) code = TRS_FAIL;
        else code = TRS_OK;

        trsWrite( ATS_CON | ATS_LST | ATS_SUM, "contours matching error_test =%f \n",
                  error_test);

        cvReleaseMemStorage ( &storage );

        trsFree (cp2);
        trsFree (cp1);

    }


    /*    _getch();     */
    return code;
}
//パーティクルフィルタ
void particleFilter()
{
	int i, c;
	double w = 0.0, h = 0.0;
	cv::VideoCapture capture(0);
	//CvCapture *capture = 0;
	//capture = cvCreateCameraCapture (0);

	  int n_stat = 4;
	  int n_particle = 4000;
	  CvConDensation *cond = 0;
	  CvMat *lowerBound = 0;
	  CvMat *upperBound = 0;
	  int xx, yy;

	  capture >> capframe;

	//1フレームキャプチャし,キャプチャサイズを取得する.
	//frame = cvQueryFrame (capture);
	//redimage=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
	//greenimage=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
	//blueimage=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,1);
	w = capframe.cols;
	h = capframe.rows;
	//w = frame->width;
    //h = frame->height;
	cv::namedWindow("Condensation", CV_WINDOW_AUTOSIZE);
	cv::setMouseCallback("Condensation", on_mouse, 0);
	//cvNamedWindow ("Condensation", CV_WINDOW_AUTOSIZE);
	//cvSetMouseCallback("Condensation",on_mouse,0);
 
 	//フォントの設定
	//CvFont dfont;
	//float hscale      = 0.7f;
	//float vscale      = 0.7f;
	//float italicscale = 0.0f;
	//int  thickness    = 1;
	//char text[255] = "";
	//cvInitFont (&dfont, CV_FONT_HERSHEY_SIMPLEX , hscale, vscale, italicscale, thickness, CV_AA); 

	//Condensation構造体を作成する.
	cond = cvCreateConDensation (n_stat, 0, n_particle);

	//状態ベクトル各次元の取りうる最小値・最大値を指定する
	//今回は位置(x,y)と速度(xpixcel/frame,ypixcel/frame)の4次元
	lowerBound = cvCreateMat (4, 1, CV_32FC1);
	upperBound = cvCreateMat (4, 1, CV_32FC1);
	cvmSet (lowerBound, 0, 0, 0.0);
	cvmSet (lowerBound, 1, 0, 0.0);
	cvmSet (lowerBound, 2, 0, -20.0);
	cvmSet (lowerBound, 3, 0, -20.0);
	cvmSet (upperBound, 0, 0, w);
	cvmSet (upperBound, 1, 0, h);
	cvmSet (upperBound, 2, 0, 20.0);
	cvmSet (upperBound, 3, 0, 20.0);
  
	//Condensation構造体を初期化する
	cvConDensInitSampleSet (cond, lowerBound, upperBound);

	//ConDensationアルゴリズムにおける状態ベクトルのダイナミクスを指定する
	cond->DynamMatr[0] = 1.0;
	cond->DynamMatr[1] = 0.0;
	cond->DynamMatr[2] = 1.0;
	cond->DynamMatr[3] = 0.0;
	cond->DynamMatr[4] = 0.0;
	cond->DynamMatr[5] = 1.0;
	cond->DynamMatr[6] = 0.0;
	cond->DynamMatr[7] = 1.0;
	cond->DynamMatr[8] = 0.0;
	cond->DynamMatr[9] = 0.0;
	cond->DynamMatr[10] = 1.0;
	cond->DynamMatr[11] = 0.0;
	cond->DynamMatr[12] = 0.0;
	cond->DynamMatr[13] = 0.0;
	cond->DynamMatr[14] = 0.0;
	cond->DynamMatr[15] = 1.0;
  
	//ノイズパラメータを再設定する.
	cvRandInit (&(cond->RandS[0]), -25, 25, (int) cvGetTickCount ());
	cvRandInit (&(cond->RandS[1]), -25, 25, (int) cvGetTickCount ());
	cvRandInit (&(cond->RandS[2]), -5, 5, (int) cvGetTickCount ());
	cvRandInit (&(cond->RandS[3]), -5, 5, (int) cvGetTickCount ());
	
	while (1) 
	{
		capture >> capframe;
		//frame = cvQueryFrame (capture);


		//各パーティクルについて尤度を計算する.
		for (i = 0; i < n_particle; i++)
		{ 
			xx = (int) (cond->flSamples[i][0]);
			yy = (int) (cond->flSamples[i][1]);
			 if (xx < 0 || xx >= w || yy < 0 || yy >= h) 
				{  
					cond->flConfidence[i] = 0.0;
				}
				else
				{  
					cond->flConfidence[i] = calc_likelihood (capframe, xx, yy);
					//cond->flConfidence[i] = calc_likelihood (frame, xx, yy);
				
					cv::circle(capframe, cv::Point(xx, yy), 1, CV_RGB(0, 255, 200));
					//cvCircle (frame, cvPoint (xx, yy), 1, CV_RGB (0, 255, 200), -1);
				}	 
		}

		//重みの総和&重心を求める
		double wx = 0, wy = 0;
		double sumWeight = 0;
		for (i = 0; i < n_particle; i++)
		{
			sumWeight += cond->flConfidence[i];
		}
		for (i = 0; i < n_particle; i++)
		{
			wx += (int) (cond->flSamples[i][0]) * (cond->flConfidence[i] / sumWeight);
			wy += (int) (cond->flSamples[i][1]) * (cond->flConfidence[i] / sumWeight);
		}

		//重心表示
		cv::circle(capframe, cv::Point((int)wx, (int)wy), 10, cv::Scalar(0, 0, 255));
		cv::circle(capframe, cv::Point(20, 20), 10, CV_RGB(red, green, blue), 6);
		cv::putText(capframe, "target", cv::Point(0, 50), cv::FONT_HERSHEY_SIMPLEX, 0.7, CV_RGB(red, green, blue));
		cv::imshow("Condensation", capframe);

		//cvCircle(frame,cvPoint(20,20),10,CV_RGB(red,green,blue),-1);
		//cvPutText(frame,"target",cvPoint(0,50),&dfont,CV_RGB(red,green,blue));
		//cvShowImage ("Condensation", frame);
		
		c = cv::waitKey(30);
		//c = cvWaitKey (30);
		if (c == 27)      break;
  
		//次のモデルの状態を推定する 
		cvConDensUpdateByTime (cond);

	}

	cv::destroyWindow("Condensation");
	//cvDestroyWindow ("Condensation");
	//cvReleaseCapture (&capture);

	//cvReleaseImage(&redimage);
	//cvReleaseImage(&greenimage);
	//cvReleaseImage(&blueimage);
	cvReleaseConDensation (&cond);

	cvReleaseMat (&lowerBound);
	cvReleaseMat (&upperBound);
}
Ejemplo n.º 9
0
static int arithm_test( void* arg )
{
    double success_error_level = 0;

    int   param = (int)arg;
    int   func = param / 256;
    int   depth = (param % 256) % 8;
    int   channels = (param % 256) / 8;
    int   mattype;
    int   seed = -1;//atsGetSeed();

    int   btpix, max_img_bytes;

    int     merr_i = 0, i;
    double  max_err = 0.;

    uchar *src1data, *src2data, *dstdata, *dstdbdata, *maskdata;
    CvRandState rng_state;
    AtsBinArithmMaskFunc bin_func = 0;
    AtsUnArithmMaskFunc un_func = 0;
    AtsBinArithmFunc mul_func = 0;

    CvScalar alpha, beta, gamma;
    CvMat gammaarr;

    alpha = beta = gamma = cvScalarAll(0);

    read_arithm_params();

    if( !(ATS_RANGE( depth, dt_l, dt_h+1 ) &&
          ATS_RANGE( channels, ch_l, ch_h+1 ))) return TRS_UNDEF;

    cvInitMatHeader( &gammaarr, 1, 1, CV_64FC4, gamma.val );

    switch( func )
    {
    case 0:
        bin_func = cvAdd;
        alpha = beta = cvScalarAll(1);
        break;
    case 1:
        bin_func = cvSub;
        alpha = cvScalarAll(1);
        beta = cvScalarAll(-1);
        break;
    case 2:
        mul_func = cvMul;
        break;
    case 3:
        un_func = cvAddS;
        alpha = cvScalarAll(1);
        break;
    case 4:
        un_func = cvSubRS;
        alpha = cvScalarAll(-1);
        break;
    default:
        assert(0);
        return TRS_FAIL;
    }

    mattype = depth + channels*8;
    depth = depth == 0 ? IPL_DEPTH_8U : depth == 1 ? IPL_DEPTH_8S :
            depth == 2 ? IPL_DEPTH_16S : depth == 3 ? IPL_DEPTH_32S :
            depth == 4 ? IPL_DEPTH_32F : IPL_DEPTH_64F;

    channels = channels + 1;

    cvRandInit( &rng_state, 0, 1, seed );

    max_img_bytes = (max_img_size + 32) * (max_img_size + 2) * cvPixSize(mattype);

    src1data = (uchar*)cvAlloc( max_img_bytes );
    src2data = (uchar*)cvAlloc( max_img_bytes );
    dstdata = (uchar*)cvAlloc( max_img_bytes );
    dstdbdata = (uchar*)cvAlloc( max_img_bytes );
    maskdata = (uchar*)cvAlloc( max_img_bytes / cvPixSize(mattype));

    btpix = ((depth & 255)/8)*channels;
    
    if( depth == IPL_DEPTH_32F )
        success_error_level = FLT_EPSILON * img32f_range * (mul_func ? img32f_range : 2.f);
    else if( depth == IPL_DEPTH_64F )
        success_error_level = DBL_EPSILON * img32f_range * (mul_func ? img32f_range : 2.f);

    for( i = 0; i < base_iters; i++ )
    {
        int continuous = (cvRandNext( &rng_state ) % 3) == 0;
        int is_mask_op = mul_func ? 0 : ((cvRandNext( &rng_state ) % 3) == 0);
        int step1, step2, step, mstep;
        CvMat  src1, src2, dst1, dst2, mask, dst;
        double err;
        int w, h;
                
        w = cvRandNext( &rng_state ) % (max_img_size - min_img_size) + min_img_size;
        h = cvRandNext( &rng_state ) % (max_img_size - min_img_size) + min_img_size;

        step1 = step2 = step = w*btpix;
        mstep = w;

        if( !continuous )
        {
            step1 += (cvRandNext( &rng_state ) % 4)*(btpix/channels);
            step2 += (cvRandNext( &rng_state ) % 4)*(btpix/channels);
            step += (cvRandNext( &rng_state ) % 4)*(btpix/channels);
            mstep += (cvRandNext( &rng_state ) % 4);
        }

        switch( depth )
        {
        case IPL_DEPTH_8U:
            cvRandSetRange( &rng_state, 0, img8u_range );
            break;
        case IPL_DEPTH_8S:
            cvRandSetRange( &rng_state, -img8s_range, img8s_range );
            break;
        case IPL_DEPTH_16S:
            cvRandSetRange( &rng_state, -img16s_range, img16s_range );
            break;
        case IPL_DEPTH_32S:
            cvRandSetRange( &rng_state, -img32s_range, img32s_range );
            break;
        case IPL_DEPTH_32F:
        case IPL_DEPTH_64F:
            cvRandSetRange( &rng_state, -img32f_range, img32f_range );
            break;
        }

        cvInitMatHeader( &src1, h, w, mattype, src1data, step1 );
        cvInitMatHeader( &src2, h, w, mattype, src2data, step2 );
        cvInitMatHeader( &dst1, h, w, mattype, dstdata, step );
        cvInitMatHeader( &dst2, h, w, mattype, dstdbdata, step );

        cvInitMatHeader( &mask, h, w, CV_8UC1, maskdata, mstep );

        cvRand( &rng_state, &src1 );

        switch( cvRandNext(&rng_state) % 3 )
        {
        case 0:
            memcpy( &dst, &src1, sizeof(dst));
            break;
        case 1:
            if( un_func )
                memcpy( &dst, &src1, sizeof(dst));
            else
                memcpy( &dst, &src2, sizeof(dst));
            break;
        default:
            memcpy( &dst, &dst1, sizeof(dst));
            break;
        }

        if( un_func )
        {
            if( depth == IPL_DEPTH_8U )
                cvRandSetRange( &rng_state, -img8u_range, img8u_range );
            
            cvRand( &rng_state, &gammaarr );
        }
        else
        {
            cvRand( &rng_state, &src2 );
        }

        if( is_mask_op )
        {
            const int upper = 4;
            
            if( dst.data.ptr == dst1.data.ptr )
                cvRand( &rng_state, &dst );

            cvRandSetRange( &rng_state, 0, upper );
            cvRand( &rng_state, &mask );
            atsLinearFunc( &mask, cvScalarAll(1), 0, cvScalarAll(0),
                           cvScalarAll(2-upper), &mask );
        }

        if( !mul_func )
        {
            atsLinearFunc( &src1, alpha, un_func ? 0 : &src2, beta, gamma, &dst2 );
            if( is_mask_op )
            {
                cvXorS( &mask, cvScalarAll(1), &mask );
                cvCopy( &dst, &dst2, &mask );
                cvXorS( &mask, cvScalarAll(1), &mask );
            }

            if( un_func )
                un_func( &src1, gamma, &dst, is_mask_op ? &mask : 0 );
            else
                bin_func( &src1, &src2, &dst, is_mask_op ? &mask : 0 );
        }
        else
        {
            atsMul( &src1, &src2, &dst2 );
            mul_func( &src1, &src2, &dst );
        }

        /*if( i == 9 )
        {
            putchar('.');
        }*/

        //cvXor( &dst2, &dst, &dst2 );
        err = cvNorm( &dst2, &dst, CV_C );

        if( err > max_err )
        {
            max_err = err;
            merr_i = i;

            if( max_err > success_error_level )
                goto test_exit;
        }
    }

test_exit:
    cvFree( (void**)&src1data );
    cvFree( (void**)&src2data );
    cvFree( (void**)&dstdata );
    cvFree( (void**)&dstdbdata );
    cvFree( (void**)&maskdata );

    trsWrite( ATS_LST, "Max err is %g at iter = %d, seed = %08x",
                       max_err, merr_i, seed );

    return max_err <= success_error_level ?
        trsResult( TRS_OK, "No errors" ) :
        trsResult( TRS_FAIL, "Bad accuracy" );
}
Ejemplo n.º 10
0
int main(int argc, char** argv)
{
    /* A matrix data */
    const float A[] = { 1, 1, 0, 1 };

    IplImage* img = cvCreateImage( cvSize(500,500), 8, 3 );
    CvKalman* kalman = cvCreateKalman( 2, 1, 0 );
    /* state is (phi, delta_phi) - angle and angle increment */
    CvMat* state = cvCreateMat( 2, 1, CV_32FC1 );
    CvMat* process_noise = cvCreateMat( 2, 1, CV_32FC1 );
    /* only phi (angle) is measured */
    CvMat* measurement = cvCreateMat( 1, 1, CV_32FC1 );
    CvRandState rng;
    int code = -1;

    cvRandInit( &rng, 0, 1, -1, CV_RAND_UNI );

    cvZero( measurement );
    cvNamedWindow( "Kalman", 1 );

    for(;;)
    {
        cvRandSetRange( &rng, 0, 0.1, 0 );
        rng.disttype = CV_RAND_NORMAL;

        cvRand( &rng, state );

        memcpy( kalman->transition_matrix->data.fl, A, sizeof(A));
        cvSetIdentity( kalman->measurement_matrix, cvRealScalar(1) );
        cvSetIdentity( kalman->process_noise_cov, cvRealScalar(1e-5) );
        cvSetIdentity( kalman->measurement_noise_cov, cvRealScalar(1e-1) );
        cvSetIdentity( kalman->error_cov_post, cvRealScalar(1));
        /* choose random initial state */
        cvRand( &rng, kalman->state_post );

        rng.disttype = CV_RAND_NORMAL;

        for(;;)
        {
            #define calc_point(angle)                                      \
                cvPoint( cvRound(img->width/2 + img->width/3*cos(angle)),  \
                         cvRound(img->height/2 - img->width/3*sin(angle)))

            float state_angle = state->data.fl[0];
            CvPoint state_pt = calc_point(state_angle);

            /* predict point position */
            const CvMat* prediction = cvKalmanPredict( kalman, 0 );
            float predict_angle = prediction->data.fl[0];
            CvPoint predict_pt = calc_point(predict_angle);
            float measurement_angle;
            CvPoint measurement_pt;

            cvRandSetRange( &rng,
                            0,
                            sqrt(kalman->measurement_noise_cov->data.fl[0]),
                            0 );
            cvRand( &rng, measurement );

            /* generate measurement */
            cvMatMulAdd( kalman->measurement_matrix, state, measurement, measurement );

            measurement_angle = measurement->data.fl[0];
            measurement_pt = calc_point(measurement_angle);

            /* plot points */
            #define draw_cross( center, color, d )                        \
                cvLine( img, cvPoint( center.x - d, center.y - d ),       \
                             cvPoint( center.x + d, center.y + d ),       \
                             color, 1, 0 );                               \
                cvLine( img, cvPoint( center.x + d, center.y - d ),       \
                             cvPoint( center.x - d, center.y + d ),       \
                             color, 1, 0 )

            cvZero( img );
            draw_cross( state_pt, CV_RGB(255,255,255), 3 );
            draw_cross( measurement_pt, CV_RGB(255,0,0), 3 );
            draw_cross( predict_pt, CV_RGB(0,255,0), 3 );
            cvLine( img, state_pt, predict_pt, CV_RGB(255,255,0), 3, 0 );

            /* adjust Kalman filter state */
            cvKalmanCorrect( kalman, measurement );

            cvRandSetRange( &rng,
                            0,
                            sqrt(kalman->process_noise_cov->data.fl[0]),
                            0 );
            cvRand( &rng, process_noise );
            cvMatMulAdd( kalman->transition_matrix,
                         state,
                         process_noise,
                         state );

            cvShowImage( "Kalman", img );
            code = cvWaitKey( 100 );

            if( code > 0 ) /* break current simulation by pressing a key */
                break;
        }
        if( code == 27 ) /* exit by ESCAPE */
            break;
    }

    return 0;
}