Exemple #1
0
/**
 * Invert the illumination, so white becomes black and vice-versa.
 */
void InvertIllumination(Experiment* exp){
	IplImage* temp= cvCreateImage( cvSize(exp->IlluminationFrame->iplimg->width,exp->IlluminationFrame->iplimg->height),
			IPL_DEPTH_8U, 1);

	/** Invert Illumination Frame **/

	cvXorS(exp->IlluminationFrame->iplimg,cvScalar(255,255,255),temp);
	LoadFrameWithImage(temp,exp->IlluminationFrame);



	/** Invert DLP Frame **/
	cvXorS(exp->forDLP->iplimg,cvScalar(255,255,255),temp);
	LoadFrameWithImage(temp,exp->forDLP);


}
void THISCLASS::OnStep()
{
	IplImage *inputimage = mCore->mDataStructureImageBinary.mImage;
	if (! inputimage) {
		return;
	}
	if (!mOutputImage)
		mOutputImage = cvCreateImage(cvSize(inputimage->width, inputimage->height), inputimage->depth, 1);
	if (inputimage->nChannels != 1)
	{
		AddError(wxT("This function require a Binary input Image"));
	}

	cvXorS(inputimage, cvScalar(255), mOutputImage);


	mCore->mDataStructureImageBinary.mImage = mOutputImage;

	// Let the Display know about our image
	DisplayEditor de(&mDisplayOutput);
	if (de.IsActive()) {
		de.SetMainImage(mCore->mDataStructureImageBinary.mImage);
	}
}
//=========================================
CvRect camKalTrack(IplImage* frame, camshift_kalman_tracker& camKalTrk) {
//=========================================
	if (!frame)
		printf("Input frame empty!\n");

	cvCopy(frame, camKalTrk.image, 0);
	cvCvtColor(camKalTrk.image, camKalTrk.hsv, CV_BGR2HSV); // BGR to HSV

	if (camKalTrk.trackObject) {
		int _vmin = vmin, _vmax = vmax;
		cvInRangeS(camKalTrk.hsv, cvScalar(0, smin, MIN(_vmin,_vmax), 0), cvScalar(180, 256, MAX(_vmin,_vmax), 0), camKalTrk.mask); // MASK
		cvSplit(camKalTrk.hsv, camKalTrk.hue, 0, 0, 0); //  HUE
		if (camKalTrk.trackObject < 0) {
			float max_val = 0.f;
			boundaryCheck(camKalTrk.originBox, frame->width, frame->height);
			cvSetImageROI(camKalTrk.hue, camKalTrk.originBox); // for ROI
			cvSetImageROI(camKalTrk.mask, camKalTrk.originBox); // for camKalTrk.mask
			cvCalcHist(&camKalTrk.hue, camKalTrk.hist, 0, camKalTrk.mask); //
			cvGetMinMaxHistValue(camKalTrk.hist, 0, &max_val, 0, 0);
			cvConvertScale(camKalTrk.hist->bins, camKalTrk.hist->bins, max_val ? 255. / max_val : 0., 0); //  bin  [0,255]
			cvResetImageROI(camKalTrk.hue); // remove ROI
			cvResetImageROI(camKalTrk.mask);
			camKalTrk.trackWindow = camKalTrk.originBox;
			camKalTrk.trackObject = 1;
			camKalTrk.lastpoint = camKalTrk.predictpoint = cvPoint(camKalTrk.trackWindow.x + camKalTrk.trackWindow.width / 2,
					camKalTrk.trackWindow.y + camKalTrk.trackWindow.height / 2);
			getCurrState(camKalTrk.kalman, camKalTrk.lastpoint, camKalTrk.predictpoint);//input curent state
		}
		//(x,y,vx,vy),
		camKalTrk.prediction = cvKalmanPredict(camKalTrk.kalman, 0);//predicton=kalman->state_post

		camKalTrk.predictpoint = cvPoint(cvRound(camKalTrk.prediction->data.fl[0]), cvRound(camKalTrk.prediction->data.fl[1]));

		camKalTrk.trackWindow = cvRect(camKalTrk.predictpoint.x - camKalTrk.trackWindow.width / 2, camKalTrk.predictpoint.y
				- camKalTrk.trackWindow.height / 2, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);

		camKalTrk.trackWindow = checkRectBoundary(cvRect(0, 0, frame->width, frame->height), camKalTrk.trackWindow);

		camKalTrk.searchWindow = cvRect(camKalTrk.trackWindow.x - region, camKalTrk.trackWindow.y - region, camKalTrk.trackWindow.width + 2
				* region, camKalTrk.trackWindow.height + 2 * region);

		camKalTrk.searchWindow = checkRectBoundary(cvRect(0, 0, frame->width, frame->height), camKalTrk.searchWindow);

		cvSetImageROI(camKalTrk.hue, camKalTrk.searchWindow);
		cvSetImageROI(camKalTrk.mask, camKalTrk.searchWindow);
		cvSetImageROI(camKalTrk.backproject, camKalTrk.searchWindow);

		cvCalcBackProject( &camKalTrk.hue, camKalTrk.backproject, camKalTrk.hist ); // back project

		cvAnd(camKalTrk.backproject, camKalTrk.mask, camKalTrk.backproject, 0);

		camKalTrk.trackWindow = cvRect(region, region, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);

		if (camKalTrk.trackWindow.height > 5 && camKalTrk.trackWindow.width > 5) {
			// calling CAMSHIFT
			cvCamShift(camKalTrk.backproject, camKalTrk.trackWindow, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1),
					&camKalTrk.trackComp, &camKalTrk.trackBox);

			/*cvMeanShift( camKalTrk.backproject, camKalTrk.trackWindow,
			 cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
			 &camKalTrk.trackComp);*/
		}
		else {
			camKalTrk.trackComp.rect.x = 0;
			camKalTrk.trackComp.rect.y = 0;
			camKalTrk.trackComp.rect.width = 0;
			camKalTrk.trackComp.rect.height = 0;
		}

		cvResetImageROI(camKalTrk.hue);
		cvResetImageROI(camKalTrk.mask);
		cvResetImageROI(camKalTrk.backproject);
		camKalTrk.trackWindow = camKalTrk.trackComp.rect;
		camKalTrk.trackWindow = cvRect(camKalTrk.trackWindow.x + camKalTrk.searchWindow.x, camKalTrk.trackWindow.y
				+ camKalTrk.searchWindow.y, camKalTrk.trackWindow.width, camKalTrk.trackWindow.height);

		camKalTrk.measurepoint = cvPoint(camKalTrk.trackWindow.x + camKalTrk.trackWindow.width / 2, camKalTrk.trackWindow.y
				+ camKalTrk.trackWindow.height / 2);
		camKalTrk.realposition->data.fl[0] = camKalTrk.measurepoint.x;
		camKalTrk.realposition->data.fl[1] = camKalTrk.measurepoint.y;
		camKalTrk.realposition->data.fl[2] = camKalTrk.measurepoint.x - camKalTrk.lastpoint.x;
		camKalTrk.realposition->data.fl[3] = camKalTrk.measurepoint.y - camKalTrk.lastpoint.y;
		camKalTrk.lastpoint = camKalTrk.measurepoint;//keep the current real position

		//measurement x,y
		cvMatMulAdd( camKalTrk.kalman->measurement_matrix/*2x4*/, camKalTrk.realposition/*4x1*/,/*measurementstate*/0, camKalTrk.measurement );
		cvKalmanCorrect(camKalTrk.kalman, camKalTrk.measurement);

		cvRectangle(frame, cvPoint(camKalTrk.trackWindow.x, camKalTrk.trackWindow.y), cvPoint(camKalTrk.trackWindow.x
				+ camKalTrk.trackWindow.width, camKalTrk.trackWindow.y + camKalTrk.trackWindow.height), CV_RGB(255,128,0), 4, 8, 0);
	}
	// set new selection if it exists
	if (camKalTrk.selectObject && camKalTrk.selection.width > 0 && camKalTrk.selection.height > 0) {
		cvSetImageROI(camKalTrk.image, camKalTrk.selection);
		cvXorS(camKalTrk.image, cvScalarAll(255), camKalTrk.image, 0);
		cvResetImageROI(camKalTrk.image);
	}

	return camKalTrk.trackWindow;
}
void FindArenaObjects(IplImage* Image, CvFont Font, _ArenaObject *pRamp, _ArenaObject* pPlatform, _ArenaObject* pRightPit, _ArenaObject* pLeftPit, _Robot* pRobot)
{
	IplImage* ImageCopy = cvCloneImage(Image);
	IplImage* ImageCopy2 = cvCloneImage(Image);
	SelectionNumber = 0;
	Select_Object = 0;
	int PrevSelectionNumber = -1;

	cvNamedWindow("Arena");
	cvShowImage("Arena", ImageCopy);
	cvSetMouseCallback("Arena", OnMouse);	

	while(SelectionNumber < 6 && cvWaitKey(10) != 27)
	{
		if(SelectionNumber - PrevSelectionNumber > 0)
		{
			PrevSelectionNumber = SelectionNumber;
			cvCopyImage(Image, ImageCopy);
			switch(SelectionNumber)
			{
			case 0:
				cvPutText(ImageCopy, "Select Temp Ramp", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			case 1:
				if(pRamp)
				{
					pRamp->BoundingRect = Selection;
					pRamp->Center = cvPoint(pRamp->BoundingRect.x + pRamp->BoundingRect.width/2, pRamp->BoundingRect.y + pRamp->BoundingRect.height/2);
				}
				cvPutText(ImageCopy, "Select Temp Platform", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			case 2:
				if(pPlatform)
				{
					pPlatform->BoundingRect = Selection;
					pPlatform->Center = cvPoint(pPlatform->BoundingRect.x + pPlatform->BoundingRect.width/2, pPlatform->BoundingRect.y + pPlatform->BoundingRect.height/2);
				}
				cvPutText(ImageCopy, "Select Right Pit", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			case 3:
				if(pRightPit)
				{
					pRightPit->BoundingRect = Selection;
					pRightPit->Center = cvPoint(pRightPit->BoundingRect.x + pRightPit->BoundingRect.width/2, pRightPit->BoundingRect.y + pRightPit->BoundingRect.height/2);
				}
				cvPutText(ImageCopy, "Select Left Pit", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			case 4:
				if(pLeftPit)
				{
					pLeftPit->BoundingRect = Selection;
					pLeftPit->Center = cvPoint(pLeftPit->BoundingRect.x + pLeftPit->BoundingRect.width/2, pLeftPit->BoundingRect.y + pLeftPit->BoundingRect.height/2);
				}
				cvPutText(ImageCopy, "Select Robot", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			case 5:
				if(pRobot)
				{
					pRobot->BoundingRect = Selection;
				}
				cvPutText(ImageCopy, "Select Robot Patch", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			case 6:
				if(pRobot)
				{
					pRobot->Patch = Selection;
					pRobot->PatchCenter = cvPoint(pRobot->Patch.x + pRobot->Patch.width/2, pRobot->Patch.y + pRobot->Patch.height/2);
					pRobot->Updated = 1;
				}
				cvPutText(ImageCopy, "Press Escape to Continue...", cvPoint(0, 20), &Font, cvScalarAll(255));
				break;
			default:
				break;
			}
			cvShowImage("Arena", ImageCopy);
		}
		if(Select_Object && Selection.width > 0 && Selection.height > 0 )
        {
			cvCopyImage(ImageCopy, ImageCopy2);
            cvSetImageROI(ImageCopy2, Selection);
            
			cvXorS(ImageCopy2, cvScalarAll(255), ImageCopy2);
            
			cvResetImageROI(ImageCopy2);
			cvShowImage("Arena", ImageCopy2);
        }
	}
	cvReleaseImage(&ImageCopy);
	cvReleaseImage(&ImageCopy2);
	cvDestroyWindow("Arena");
}
void AdaptiveHistogramCamshift::ProcessFrame(const IplImage* img, IplImage** out)
{
  // Check image size, type matches.
  const bool outMatches = (NULL != *out) &&
                          (img->width == (*out)->width) &&
                          (img->height == (*out)->height) &&
                          (img->depth == (*out)->depth) &&
                          (img->nChannels == (*out)->nChannels);
  if (!outMatches)
  {
    cvReleaseImage(out);
    *out = cvCreateImage(cvSize(img->width, img->height), img->depth, img->nChannels);
  }
  cvCopy(img, *out);

  // Check for selection
  if (g_selId == m_id)
  {
    // DEBUG selection
    //printf("AdaptiveHistCamshift %d is g_selId.\n", m_id);

    // Init already done, so m_frameSize should be set
    g_selRect.width = std::min(g_selRect.width, m_frameSize.width);
    g_selRect.height = std::min(g_selRect.height, m_frameSize.height);

    // Check if selecting
    if (g_selectObject)
    {
      // DEBUG selection
      //printf("AdaptiveHistCamshift %d detects in progress selection.\n", m_id);

      // Draw selection box
      if ((g_selRect.width > 0) && (g_selRect.height > 0))
      {
        cvSetImageROI(*out, g_selRect);
        cvXorS(*out, cvScalarAll(255), *out, 0);
        cvResetImageROI(*out);
      }
    }
    // Check if time to init
    else if (g_initTracking)
    {
      // DEBUG selection
      //printf("AdaptiveHistCamshift %d detects time to init.\n", m_id);

      InitTrackWindow(img, g_selRect);
      g_initTracking = false;
      g_selId = -1;
    }
  }

  // Check if not initialized
  if (!m_tracking)
  {
    // Show input image and return
    PresentOutput(*out);
  }
  else
  {
    // Get hue and mask
    cvCvtColor(img, m_imgHSV, CV_BGR2HSV);
    cvInRangeS(m_imgHSV,
               cvScalar(m_histRanges[0], m_sMin, std::min(m_vMin, m_vMax), 0),
               cvScalar(m_histRanges[1], 255, std::max(m_vMin, m_vMax), 0), m_imgMask);
    cvSplit(m_imgHSV, m_imgHue, 0, 0, 0);

    m_trackWindow = m_trackCompRect;
//    // Draw initial search window
//    cvRectangle( img,
//    cvPoint(m_trackWindow.x, m_trackWindow.y),
//    cvPoint(m_trackWindow.x + m_trackWindow.width, m_trackWindow.y + m_trackWindow.height),
//    colors[ORANGE], 1 );

    // Grow track window in direction of velocity
    // Compute velocity (last frame minus two frames back)
    const float trackBoxArea = m_trackBox.size.width * m_trackBox.size.height;
    m_velocity = cvScalar(m_trackBox.center.x - m_trackPosTwoFramesBack.x,
                          m_trackBox.center.y - m_trackPosTwoFramesBack.y,
                          trackBoxArea - m_trackAreaTwoFramesBack);

    // DEBUG velocity
    //printf("wnd vcty: (%f, %f, %f)\n", m_velocity.val[0], m_velocity.val[1], m_velocity.val[2]);

    // Draw velocity
    CvPoint vPt1 = cvPoint(static_cast<int>(m_trackBox.center.x),
                           static_cast<int>(m_trackBox.center.y));
    CvPoint vPt2 = cvPoint(static_cast<int>(vPt1.x + m_velocity.val[0]),
                           static_cast<int>(vPt1.y + m_velocity.val[1]));
    cvLine(*out, vPt1, vPt2, colors[PURPLE], 4);
    cvCircle(*out, vPt2, 3, colors[RED], CV_FILLED); 
    {
      const int dx = static_cast<int>(m_velocity.val[0] * VELOCITY_WEIGHT);
      const int l = dx > 0 ? m_trackWindow.x : m_trackWindow.x + dx;
      const int r = l + m_trackWindow.width + std::abs(dx);
      m_trackWindow.x = std::max(l, 0);
      m_trackWindow.width = std::min(r - l, m_frameSize.width - m_trackWindow.x);
    }
    {
      const int dy = static_cast<int>(m_velocity.val[1] * VELOCITY_WEIGHT);
      const int t = dy > 0 ? m_trackWindow.y : m_trackWindow.y + dy;
      const int b = t + m_trackWindow.height + std::abs(dy);
      m_trackWindow.y = std::max(t, 0);
      m_trackWindow.height = std::min(b - t, m_frameSize.height - m_trackWindow.y);
    }

    // DEBUG enhanced window point
    //printf("wnd init: (%d, %d, %d, %d)\n",
    //m_trackWindow.x, m_trackWindow.y,
    //m_trackWindow.width, m_trackWindow.height);

    // Draw enhanced search window
    cvRectangle(*out,
                cvPoint(m_trackWindow.x, m_trackWindow.y),
                cvPoint(m_trackWindow.x + m_trackWindow.width,
                        m_trackWindow.y + m_trackWindow.height),
                colors[PURPLE], 3);

    // Now compute camshift and adapt histogram
    const bool camShiftRes = ComputeCamshift(m_imgHue, m_imgMask);
    if (camShiftRes)
    {
      AdaptHistogram(m_imgHue, m_imgMask, *out);
    }
    else
    {
      m_tracking = false;
    }
    // Show output
    PresentOutput(*out);
  }
}
Exemple #6
0
void cv_XorS(CvArr* src, CvScalar* value, CvArr* dst, const CvArr* mask) {
  cvXorS(src, *value, dst, mask);
}
Exemple #7
0
int main222( int argc,   char** argv )
{
    CvCapture* capture = 0;

    if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
        capture = cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    else if( argc == 2 )
        capture = cvCaptureFromAVI( argv[1] );

    if( !capture )
    {
        fprintf(stderr,"Could not initialize capturing...\n");
        return -1;
    }

    printf( "Hot keys: \n"
        "\tESC - quit the program\n"
        "\tc - stop the tracking\n"
        "\tb - switch to/from backprojection view\n"
        "\th - show/hide object histogram\n"
        "To initialize tracking, select the object with mouse\n" );

    cvNamedWindow( "Histogram", 1 );
    cvNamedWindow( "CamShiftDemo", 1 );
    cvSetMouseCallback( "CamShiftDemo", on_mouse, 0 );
    cvCreateTrackbar( "Vmin", "CamShiftDemo", &vmin, 256, 0 );
    cvCreateTrackbar( "Vmax", "CamShiftDemo", &vmax, 256, 0 );
    cvCreateTrackbar( "Smin", "CamShiftDemo", &smin, 256, 0 );

    for(;;)
    {
        IplImage* frame = 0;
        int i, bin_w, c;


        if( !frame )
            break;

        if( !image )
        {
            /* allocate all the buffers */
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            hsv = cvCreateImage( cvGetSize(frame), 8, 3 );
            hue = cvCreateImage( cvGetSize(frame), 8, 1 );
            mask = cvCreateImage( cvGetSize(frame), 8, 1 );
            backproject = cvCreateImage( cvGetSize(frame), 8, 1 );
            hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 );
            histimg = cvCreateImage( cvSize(320,200), 8, 3 );
            cvZero( histimg );
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, hsv, CV_BGR2HSV );

        if( track_object )
        {
            int _vmin = vmin, _vmax = vmax;

            cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),
                        cvScalar(180,256,MAX(_vmin,_vmax),0), mask );
            cvSplit( hsv, hue, 0, 0, 0 );

            if( track_object < 0 )
            {
                float max_val = 0.f;
                cvSetImageROI( hue, selection );
                cvSetImageROI( mask, selection );
                cvCalcHist( &hue, hist, 0, mask );
                cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 );
                cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 );
                cvResetImageROI( hue );
                cvResetImageROI( mask );
                track_window = selection;
                track_object = 1;

                cvZero( histimg );
                bin_w = histimg->width / hdims;
                for( i = 0; i < hdims; i++ )
                {
                    int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 );
                    CvScalar color = hsv2rgb(i*180.f/hdims);
                    cvRectangle( histimg, cvPoint(i*bin_w,histimg->height),
                                 cvPoint((i+1)*bin_w,histimg->height - val),
                                 color, -1, 8, 0 );
                }
            }

            cvCalcBackProject( &hue, backproject, hist );
            cvAnd( backproject, mask, backproject, 0 );
            cvCamShift( backproject, track_window,
                        cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
                        &track_comp, &track_box );
            track_window = track_comp.rect;

            if( backproject_mode )
                cvCvtColor( backproject, image, CV_GRAY2BGR );
            if( !image->origin )
                track_box.angle = -track_box.angle;
            cvEllipseBox( image, track_box, CV_RGB(255,0,0), 3, CV_AA, 0 );
        }

        if( select_object && selection.width > 0 && selection.height > 0 )
        {
            cvSetImageROI( image, selection );
            cvXorS( image, cvScalarAll(255), image, 0 );
            cvResetImageROI( image );
        }

        cvShowImage( "CamShiftDemo", image );
        cvShowImage( "Histogram", histimg );

        c = cvWaitKey(10);
        if( (char) c == 27 )
            break;
        switch( (char) c )
        {
        case 'b':
            backproject_mode ^= 1;
            break;
        case 'c':
            track_object = 0;
            cvZero( histimg );
            break;
        case 'h':
            show_hist ^= 1;
            if( !show_hist )
                cvDestroyWindow( "Histogram" );
            else
                cvNamedWindow( "Histogram", 1 );
            break;
        default:
            ;
        }
    }

    cvReleaseCapture( &capture );
    cvDestroyWindow("CamShiftDemo");

    return 0;
}
Exemple #8
0
static int arithm_test( void* arg )
{
    double success_error_level = 0;

    int   param = (int)arg;
    int   func = param / 256;
    int   depth = (param % 256) % 8;
    int   channels = (param % 256) / 8;
    int   mattype;
    int   seed = -1;//atsGetSeed();

    int   btpix, max_img_bytes;

    int     merr_i = 0, i;
    double  max_err = 0.;

    uchar *src1data, *src2data, *dstdata, *dstdbdata, *maskdata;
    CvRandState rng_state;
    AtsBinArithmMaskFunc bin_func = 0;
    AtsUnArithmMaskFunc un_func = 0;
    AtsBinArithmFunc mul_func = 0;

    CvScalar alpha, beta, gamma;
    CvMat gammaarr;

    alpha = beta = gamma = cvScalarAll(0);

    read_arithm_params();

    if( !(ATS_RANGE( depth, dt_l, dt_h+1 ) &&
          ATS_RANGE( channels, ch_l, ch_h+1 ))) return TRS_UNDEF;

    cvInitMatHeader( &gammaarr, 1, 1, CV_64FC4, gamma.val );

    switch( func )
    {
    case 0:
        bin_func = cvAdd;
        alpha = beta = cvScalarAll(1);
        break;
    case 1:
        bin_func = cvSub;
        alpha = cvScalarAll(1);
        beta = cvScalarAll(-1);
        break;
    case 2:
        mul_func = cvMul;
        break;
    case 3:
        un_func = cvAddS;
        alpha = cvScalarAll(1);
        break;
    case 4:
        un_func = cvSubRS;
        alpha = cvScalarAll(-1);
        break;
    default:
        assert(0);
        return TRS_FAIL;
    }

    mattype = depth + channels*8;
    depth = depth == 0 ? IPL_DEPTH_8U : depth == 1 ? IPL_DEPTH_8S :
            depth == 2 ? IPL_DEPTH_16S : depth == 3 ? IPL_DEPTH_32S :
            depth == 4 ? IPL_DEPTH_32F : IPL_DEPTH_64F;

    channels = channels + 1;

    cvRandInit( &rng_state, 0, 1, seed );

    max_img_bytes = (max_img_size + 32) * (max_img_size + 2) * cvPixSize(mattype);

    src1data = (uchar*)cvAlloc( max_img_bytes );
    src2data = (uchar*)cvAlloc( max_img_bytes );
    dstdata = (uchar*)cvAlloc( max_img_bytes );
    dstdbdata = (uchar*)cvAlloc( max_img_bytes );
    maskdata = (uchar*)cvAlloc( max_img_bytes / cvPixSize(mattype));

    btpix = ((depth & 255)/8)*channels;
    
    if( depth == IPL_DEPTH_32F )
        success_error_level = FLT_EPSILON * img32f_range * (mul_func ? img32f_range : 2.f);
    else if( depth == IPL_DEPTH_64F )
        success_error_level = DBL_EPSILON * img32f_range * (mul_func ? img32f_range : 2.f);

    for( i = 0; i < base_iters; i++ )
    {
        int continuous = (cvRandNext( &rng_state ) % 3) == 0;
        int is_mask_op = mul_func ? 0 : ((cvRandNext( &rng_state ) % 3) == 0);
        int step1, step2, step, mstep;
        CvMat  src1, src2, dst1, dst2, mask, dst;
        double err;
        int w, h;
                
        w = cvRandNext( &rng_state ) % (max_img_size - min_img_size) + min_img_size;
        h = cvRandNext( &rng_state ) % (max_img_size - min_img_size) + min_img_size;

        step1 = step2 = step = w*btpix;
        mstep = w;

        if( !continuous )
        {
            step1 += (cvRandNext( &rng_state ) % 4)*(btpix/channels);
            step2 += (cvRandNext( &rng_state ) % 4)*(btpix/channels);
            step += (cvRandNext( &rng_state ) % 4)*(btpix/channels);
            mstep += (cvRandNext( &rng_state ) % 4);
        }

        switch( depth )
        {
        case IPL_DEPTH_8U:
            cvRandSetRange( &rng_state, 0, img8u_range );
            break;
        case IPL_DEPTH_8S:
            cvRandSetRange( &rng_state, -img8s_range, img8s_range );
            break;
        case IPL_DEPTH_16S:
            cvRandSetRange( &rng_state, -img16s_range, img16s_range );
            break;
        case IPL_DEPTH_32S:
            cvRandSetRange( &rng_state, -img32s_range, img32s_range );
            break;
        case IPL_DEPTH_32F:
        case IPL_DEPTH_64F:
            cvRandSetRange( &rng_state, -img32f_range, img32f_range );
            break;
        }

        cvInitMatHeader( &src1, h, w, mattype, src1data, step1 );
        cvInitMatHeader( &src2, h, w, mattype, src2data, step2 );
        cvInitMatHeader( &dst1, h, w, mattype, dstdata, step );
        cvInitMatHeader( &dst2, h, w, mattype, dstdbdata, step );

        cvInitMatHeader( &mask, h, w, CV_8UC1, maskdata, mstep );

        cvRand( &rng_state, &src1 );

        switch( cvRandNext(&rng_state) % 3 )
        {
        case 0:
            memcpy( &dst, &src1, sizeof(dst));
            break;
        case 1:
            if( un_func )
                memcpy( &dst, &src1, sizeof(dst));
            else
                memcpy( &dst, &src2, sizeof(dst));
            break;
        default:
            memcpy( &dst, &dst1, sizeof(dst));
            break;
        }

        if( un_func )
        {
            if( depth == IPL_DEPTH_8U )
                cvRandSetRange( &rng_state, -img8u_range, img8u_range );
            
            cvRand( &rng_state, &gammaarr );
        }
        else
        {
            cvRand( &rng_state, &src2 );
        }

        if( is_mask_op )
        {
            const int upper = 4;
            
            if( dst.data.ptr == dst1.data.ptr )
                cvRand( &rng_state, &dst );

            cvRandSetRange( &rng_state, 0, upper );
            cvRand( &rng_state, &mask );
            atsLinearFunc( &mask, cvScalarAll(1), 0, cvScalarAll(0),
                           cvScalarAll(2-upper), &mask );
        }

        if( !mul_func )
        {
            atsLinearFunc( &src1, alpha, un_func ? 0 : &src2, beta, gamma, &dst2 );
            if( is_mask_op )
            {
                cvXorS( &mask, cvScalarAll(1), &mask );
                cvCopy( &dst, &dst2, &mask );
                cvXorS( &mask, cvScalarAll(1), &mask );
            }

            if( un_func )
                un_func( &src1, gamma, &dst, is_mask_op ? &mask : 0 );
            else
                bin_func( &src1, &src2, &dst, is_mask_op ? &mask : 0 );
        }
        else
        {
            atsMul( &src1, &src2, &dst2 );
            mul_func( &src1, &src2, &dst );
        }

        /*if( i == 9 )
        {
            putchar('.');
        }*/

        //cvXor( &dst2, &dst, &dst2 );
        err = cvNorm( &dst2, &dst, CV_C );

        if( err > max_err )
        {
            max_err = err;
            merr_i = i;

            if( max_err > success_error_level )
                goto test_exit;
        }
    }

test_exit:
    cvFree( (void**)&src1data );
    cvFree( (void**)&src2data );
    cvFree( (void**)&dstdata );
    cvFree( (void**)&dstdbdata );
    cvFree( (void**)&maskdata );

    trsWrite( ATS_LST, "Max err is %g at iter = %d, seed = %08x",
                       max_err, merr_i, seed );

    return max_err <= success_error_level ?
        trsResult( TRS_OK, "No errors" ) :
        trsResult( TRS_FAIL, "Bad accuracy" );
}
Exemple #9
0
// chain function - this function does the actual processing
static GstFlowReturn
gst_bgfg_acmmm2003_chain(GstPad *pad, GstBuffer *buf)
{
    GstBgFgACMMM2003 *filter;

    // sanity checks
    g_return_val_if_fail(pad != NULL, GST_FLOW_ERROR);
    g_return_val_if_fail(buf != NULL, GST_FLOW_ERROR);

    filter = GST_BGFG_ACMMM2003(GST_OBJECT_PARENT(pad));

    filter->image->imageData = (gchar*) GST_BUFFER_DATA(buf);

    // the bg model must be initialized with a valid image; thus we delay its
    // creation until the chain function
    if (filter->model == NULL) {
        filter->model = cvCreateFGDStatModel(filter->image, NULL);

        ((CvFGDStatModel*)filter->model)->params.minArea           = filter->min_area;
        ((CvFGDStatModel*)filter->model)->params.erode_iterations  = filter->n_erode_iterations;
        ((CvFGDStatModel*)filter->model)->params.dilate_iterations = filter->n_dilate_iterations;

        return gst_pad_push(filter->srcpad, buf);
    }

    cvUpdateBGStatModel(filter->image, filter->model, -1);

    // send mask event, if requested
    if (filter->send_mask_events) {
        GstStructure *structure;
        GstEvent     *event;
        GArray       *data_array;
        IplImage     *mask;

        // prepare and send custom event with the mask surface
        mask = filter->model->foreground;
        data_array = g_array_sized_new(FALSE, FALSE, sizeof(mask->imageData[0]), mask->imageSize);
        g_array_append_vals(data_array, mask->imageData, mask->imageSize);

        structure = gst_structure_new("bgfg-mask",
                                      "data",      G_TYPE_POINTER, data_array,
                                      "width",     G_TYPE_UINT,    mask->width,
                                      "height",    G_TYPE_UINT,    mask->height,
                                      "depth",     G_TYPE_UINT,    mask->depth,
                                      "channels",  G_TYPE_UINT,    mask->nChannels,
                                      "timestamp", G_TYPE_UINT64,  GST_BUFFER_TIMESTAMP(buf),
                                      NULL);

        event = gst_event_new_custom(GST_EVENT_CUSTOM_DOWNSTREAM, structure);
        gst_pad_push_event(filter->srcpad, event);
        g_array_unref(data_array);

        if (filter->display) {
            // shade the regions not selected by the acmmm2003 algorithm
            cvXorS(mask,          CV_RGB(255, 255, 255), mask,          NULL);
            cvSubS(filter->image, CV_RGB(191, 191, 191), filter->image, mask);
            cvXorS(mask,          CV_RGB(255, 255, 255), mask,          NULL);
        }
    }

    if (filter->send_roi_events) {
        CvSeq        *contour;
        CvRect       *bounding_rects;
        guint         i, j, n_rects;

        // count # of contours, allocate array to store the bounding rectangles
        for (contour = filter->model->foreground_regions, n_rects = 0;
             contour != NULL;
             contour = contour->h_next, ++n_rects);

        bounding_rects = g_new(CvRect, n_rects);

        for (contour = filter->model->foreground_regions, i = 0; contour != NULL; contour = contour->h_next, ++i)
            bounding_rects[i] = cvBoundingRect(contour, 0);

        for (i = 0; i < n_rects; ++i) {
            // skip collapsed rectangles
            if ((bounding_rects[i].width == 0) || (bounding_rects[i].height == 0)) continue;

            for (j = (i + 1); j < n_rects; ++j) {
                // skip collapsed rectangles
                if ((bounding_rects[j].width == 0) || (bounding_rects[j].height == 0)) continue;

                if (rect_overlap(bounding_rects[i], bounding_rects[j])) {
                    bounding_rects[i] = rect_collapse(bounding_rects[i], bounding_rects[j]);
                    bounding_rects[j] = NULL_RECT;
                }
            }
        }

        for (i = 0; i < n_rects; ++i) {
            GstEvent     *event;
            GstStructure *structure;
            CvRect        r;

            // skip collapsed rectangles
            r = bounding_rects[i];
            if ((r.width == 0) || (r.height == 0)) continue;

            structure = gst_structure_new("bgfg-roi",
                                          "x",         G_TYPE_UINT,   r.x,
                                          "y",         G_TYPE_UINT,   r.y,
                                          "width",     G_TYPE_UINT,   r.width,
                                          "height",    G_TYPE_UINT,   r.height,
                                          "timestamp", G_TYPE_UINT64, GST_BUFFER_TIMESTAMP(buf),
                                          NULL);

            event = gst_event_new_custom(GST_EVENT_CUSTOM_DOWNSTREAM, structure);
            gst_pad_send_event(filter->sinkpad, event);

            if (filter->verbose)
                GST_INFO("[roi] x: %d, y: %d, width: %d, height: %d\n",
                         r.x, r.y, r.width, r.height);

            if (filter->display)
                cvRectangle(filter->image, cvPoint(r.x, r.y), cvPoint(r.x + r.width, r.y + r.height),
                            CV_RGB(0, 0, 255), 1, 0, 0);
        }

        g_free(bounding_rects);
    }

    if (filter->display)
        gst_buffer_set_data(buf, (guchar*) filter->image->imageData, filter->image->imageSize);

    return gst_pad_push(filter->srcpad, buf);
}
Exemple #10
0
int track( IplImage* frame, int flag,int Cx,int Cy,int R )
{

    {

        int i, bin_w, c;

        LOGE("#######################Check1############################");

        if( !image )
        {
            /* allocate all the buffers */
            image = cvCreateImage( cvGetSize(frame), 8, 3 );
            image->origin = frame->origin;
            hsv = cvCreateImage( cvGetSize(frame), 8, 3 );
            hue = cvCreateImage( cvGetSize(frame), 8, 1 );
            mask = cvCreateImage( cvGetSize(frame), 8, 1 );
            backproject = cvCreateImage( cvGetSize(frame), 8, 1 );
            hist = cvCreateHist( 1, &hdims, CV_HIST_ARRAY, &hranges, 1 );
            histimg = cvCreateImage( cvSize(320,200), 8, 3 );
            cvZero( histimg );
            LOGE("######################Check2###########################");
        }

        cvCopy( frame, image, 0 );
        cvCvtColor( image, hsv, CV_BGR2HSV );


        {
            int _vmin = vmin, _vmax = vmax;

            cvInRangeS( hsv, cvScalar(0,smin,MIN(_vmin,_vmax),0),
                        cvScalar(180,256,MAX(_vmin,_vmax),0), mask );
            cvSplit( hsv, hue, 0, 0, 0 );
            LOGE("###########################Check3######################");
            if(flag==0)
            {
            	LOGE("###############Initialized#############################");
				selection.x=Cx-R;
				selection.y=Cy-R;
				selection.height=2*R;
				selection.width=2*R;
                float max_val = 0.f;
                cvSetImageROI( hue, selection );
                cvSetImageROI( mask, selection );
                cvCalcHist( &hue, hist, 0, mask );
                cvGetMinMaxHistValue( hist, 0, &max_val, 0, 0 );
                cvConvertScale( hist->bins, hist->bins, max_val ? 255. / max_val : 0., 0 );
                cvResetImageROI( hue );
                cvResetImageROI( mask );
                track_window = selection;
                track_object = 1;

                cvZero( histimg );
                bin_w = histimg->width / hdims;
                for( i = 0; i < hdims; i++ )
                {
                    int val = cvRound( cvGetReal1D(hist->bins,i)*histimg->height/255 );
                    CvScalar color = hsv2rgb(i*180.f/hdims);
                    cvRectangle( histimg, cvPoint(i*bin_w,histimg->height),
                                 cvPoint((i+1)*bin_w,histimg->height - val),
                                 color, -1, 8, 0 );
                }
                LOGE("##############Check4#########################");
            }
            LOGE("##############Check5#########################");
            cvCalcBackProject( &hue, backproject, hist );
            cvAnd( backproject, mask, backproject, 0 );
            cvCamShift( backproject, track_window,
                        cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ),
                        &track_comp, &track_box );
            track_window = track_comp.rect;
            char buffer[50];
            sprintf(buffer,"vals= %d %d and %d",track_window.x,track_window.y,track_window.width);
            LOGE(buffer);
            if( backproject_mode )
                cvCvtColor( backproject, image, CV_GRAY2BGR );
            if( image->origin )
                track_box.angle = -track_box.angle;
            cvEllipseBox( image, track_box, CV_RGB(255,0,0), 3, CV_AA, 0 );
        }

        if( select_object && selection.width > 0 && selection.height > 0 )
        {
            cvSetImageROI( image, selection );
            cvXorS( image, cvScalarAll(255), image, 0 );
            cvResetImageROI( image );
        }

        LOGE("!!!!!!!!!!!!!!!!!!Done Tracking!!!!!!!!!!!!!!!!!!!!!!!!!!!!");


    }



    return 0;
}