Ejemplo n.º 1
0
void VelocityDetector::processImage(Image* input, Image* output)
{
    // Resize images and data structures if needed
    if ((m_lastFrame->getWidth() != input->getWidth()) &&
        (m_lastFrame->getHeight() != input->getHeight()))
    { 
        // Release all the old images
        deleteImages();
        // Allocate the images
        allocateImages(input->getWidth(), input->getHeight());
    }

    // Copy the current frame locally
    m_currentFrame->copyFrom(input);
    if (m_first)
    {
        // Initialize to the same as the current
        m_lastFrame->copyFrom(input);      
	cvCvtColor(m_lastFrame->asIplImage(), m_lastGreyScale, CV_BGR2GRAY);
        m_first = false;
    }


    if (0 != output)
    {
        // Copy the input image
        output->copyFrom(input);
    }

    // Now run all different optical flow algorithms
    if (m_usePhaseCorrelation)
        phaseCorrelation(output);
    else if (m_useLKFlow)
        LKFlow(output);
    
    // Draw velocity vector
    if (output)
    {
        CvPoint start;
        start.x = output->getWidth() / 2;
        start.y = output->getHeight() / 2;
        CvPoint end;
        end.x = start.x + ((int)(m_velocity.x*m_phaseLineScale));
        end.y = start.y - ((int)(m_velocity.y*m_phaseLineScale));
        cvLine(output->asIplImage(), start, end, CV_RGB(255,0,0), 1, CV_AA, 0);
    }

    // We are done with our work now last save the input for later use
    m_lastFrame->copyFrom(input);

    // Now lets publish our result
    math::Vector2EventPtr event(new math::Vector2Event());
    event->vector2 = getVelocity();
    publish(EventType::VELOCITY_UPDATE, event);
}
Ejemplo n.º 2
0
	//--------------------------------------------------------------------------------
void ofxBackground::allocate( int w, int h ) {
	if (bAllocated == true){
		ofLog(OF_LOG_WARNING, "in allocate, reallocating a ofxCvImage, within OfxBackground");
		clear();
	}
	
	inputCopy.allocate(w, h);
	yuvImage.allocate(w, h);
	
	backgroundAverage.allocate(w, h);
	backgroundAverageConnectedComponents.allocate(w, h);
    backgroundCodebook.allocate(w, h);
	backgroundCodeBookConnectedComponents.allocate(w, h);
	
		//AVG METHOD ALLOCATION
	allocateImages(w,h); //redo everything if you change the size! and this will be triggered first time round
	scaleHigh(scalehigh);
	scaleLow(scalelow);
	ImaskAVG = cvCreateImage( cvGetSize(inputCopy.getCvImage()), IPL_DEPTH_8U, 1 );
	ImaskAVGCC = cvCreateImage( cvGetSize(inputCopy.getCvImage()), IPL_DEPTH_8U, 1 );
	cvSet(ImaskAVG,cvScalar(255));
		//CODEBOOK METHOD ALLOCATION:
	yuvImage = cvCloneImage(inputCopy.getCvImage());
	ImaskCodeBook = cvCreateImage( cvGetSize(inputCopy.getCvImage()), IPL_DEPTH_8U, 1 );
	ImaskCodeBookCC = cvCreateImage( cvGetSize(inputCopy.getCvImage()), IPL_DEPTH_8U, 1 );
	cvSet(ImaskCodeBook,cvScalar(255));
	imageLen = inputCopy.getCvImage()->width*inputCopy.getCvImage()->height;
	cB = new codeBook [imageLen];
	for(int f = 0; f<imageLen; f++)
	{
		cB[f].numEntries = 0;
	}
	for(int nc=0; nc<nChannels;nc++)
	{
		cbBounds[nc] = 10; //Learning bounds factor
	}
	ch[0] = true; //Allow threshold setting simultaneously for all channels
	ch[1] = true;
	ch[2] = true;		
	
	_width = w;
	_height = h;
	bAllocated = true;
	
	timeStartedLearning = ofGetElapsedTimeMillis(); //for safetly? TODO: question
	bStatsDone = false;
}
Ejemplo n.º 3
0
void VelocityDetector::init(core::ConfigNode config)
{
    // Detection variables
    // NOTE: The property set automatically loads the value from the given
    //       config if its present, if not it uses the default value presented.
    core::PropertySetPtr propSet(getPropertySet());

    propSet->addProperty(config, false, "usePhaseCorrelation",
        "Run the phase correlation algorithim", false, &m_usePhaseCorrelation);
    
    propSet->addProperty(config, false, "phaseLineScale",
        "Scale red line draw by the phase correlation",
        1.0, &m_phaseLineScale, 1.0, 50.0);
    
    // Parameters for LK Flow
    propSet->addProperty(config, false, "useLKFlow",
                         "Run the phase pyramidal Lucas-Kanade algorithm", 
                         false, &m_useLKFlow);
    propSet->addProperty(config, false, "lkMaxNumberFeatures", 
                         "maximum number of features to track", 400,
                         &m_lkMaxNumberFeatures);
    propSet->addProperty(config, false, "lkMinQualityFeatures", 
                         "minimum quality of the features to track", .01,
                         &m_lkMinQualityFeatures);
    propSet->addProperty(config, false, "lkMinEucDistance", 
                         "minimum Euclidean distance between features", .01,
                         &m_lkMinEucDistance);
    propSet->addProperty(config, false, "lkIterations", 
                         "termination criteria (iterations)", 20, 
                         &m_lkIterations);
    propSet->addProperty(config, false, "lkEpsilon", 
                         "termination criteria (better than error)", .3, 
                         &m_lkEpsilon);
    propSet->addProperty(config, false, "lkFlowFieldScale",
                         "length of field lines", 3.0, &m_lkFlowFieldScale,
                         1.0, 10.0);
    propSet->addProperty(config, false, "lkLengthMaxError",
                         "Filter flow field vectors", 5.0,
                         &m_lkLengthMaxError, 0.0, 10.0);
    
    // Allocate all scratch images
    allocateImages(640, 480);
}
Ejemplo n.º 4
0
void StdDCT()
{
	allocateImages();

	if(!cvIm32F) {
		cvIm32Fin = cvCreateImage(cvGetSize(cvImGray), IPL_DEPTH_32F, 1);
		cvIm32F = cvCreateImage(cvGetSize(cvImGray), IPL_DEPTH_32F, 1);
		cvImDCT = cvCreateImage(cvGetSize(cvImGray), IPL_DEPTH_32F, 1);
	}


	cvConvertScale(cvImGray, cvIm32Fin);
	cvCopy(cvIm32Fin, cvIm32F);

	cvDCT(cvIm32F, cvImDCT, CV_DXT_FORWARD);
	/* OUTPUTS
	 *
	 char * LogDCT_outputnames_list[] = {
		"LogDCT",
		"Log DCT Cropped",
		"Log DCT Inv",
		"DCT Inv",
		"Final"};
		*/
	if(StdDCT_output.curitem == 0) // "StdDCT"
	{
		cvConvertScale(cvImDCT, cvImGray, 1.);
	}

	// Reduce low DCT
	float rad = (float)StdDCT_radius / 100.f;
	for(int r  = 0; r<cvImDCT->height; r++)
	{
		float fr = (float)r / (float)cvImDCT->height;
		if(fr > rad)
		{
			float * line = (float *)(cvImDCT->imageData + r*cvImDCT->widthStep);
			for(int c  = 0; c<cvImDCT->width; c++)
			{
				float fc = (float)c / (float)cvImDCT->width;
	//			float dc = fc*fc;
	//			float dr = fr*fr;
				if(fc > rad)
				{
					line[c] = 0;
				}
			}
		}
	}
	if(StdDCT_output.curitem == 1) // "Log DCT Cropped"
	{
		cvConvertScale(cvImDCT, cvImGray, 1.);
	}



	cvDCT(cvImDCT, cvIm32F, CV_DXT_INVERSE);
	if(StdDCT_output.curitem == 2) // "StdDCT Inv"
	{
		cvConvertScale(cvIm32F, cvImGray, 1.);
	}


	cvConvertScale(cvIm32F, cvImDCT, LogDCT_coef);
	if(StdDCT_output.curitem == 3) // "DCT Inv"
	{
		cvConvertScale(cvImDCT, cvImGray, 1.);
	}

	// Substract low pass image from input image
	cvSub(cvIm32Fin, cvImDCT, cvIm32F);
	if(StdDCT_output.curitem == 4) // "DCT Inv - scal"
	{
		cvConvertScale(cvIm32F, cvImGray, 1.);
	}

	cvAddS(cvIm32F, cvScalarAll(LogDCT_add), cvImDCT);
	if(StdDCT_output.curitem == 5) // "Out"
	{
		cvConvertScale(cvImDCT, cvImGray, 1.);
	}

	finishImages();
}