SHModel* ShapeModel( CvCapture* g_capture,StaticBGModel* BGModel , BGModelParams* BGParams){

	int num_frames = 0;
	int total_blobs=0;
	float Sumatorio = 0;
	float SumatorioDes = 0;
	IplImage* frame = NULL;

	STFrame* frameData = NULL;
	SHModel* Shape = NULL;

	CBlobResult blobs;
	CBlob *currentBlob;

	IplImage* ImGris = cvCreateImage(cvGetSize( BGModel->Imed ), 8, 1 );
	IplImage* Imblob = cvCreateImage(cvGetSize( BGModel->Imed ), 8, 3 );
	IplImage* lastBG = cvCreateImage( cvGetSize( BGModel->Imed ),8, 1 );
	IplImage* lastIdes = cvCreateImage( cvGetSize( BGModel->Imed ), IPL_DEPTH_32F, 1);
	cvZero(Imblob);
	// Iniciar estructura para modelo de forma

	Shape = ( SHModel *) malloc( sizeof( SHModel));
	if ( !Shape ) {error(4);return 0;}
	Shape->FlyAreaDes = 0;
	Shape->FlyAreaMedia=0;
	//Pone a 0 los valores del vector areas


	//EXTRACCION DE LOS BLOBS Y CALCULO DE MEDIANA/MEDIA Y DESVIACION TIPICA PARA TODOS LOS FRAMES
	cvSetCaptureProperty( g_capture,1,BGParams->initDelay ); // establecemos la posición
	while( num_frames < ShParams->FramesTraining ){
		frame = cvQueryFrame( g_capture );
		if ( !frame ) {
			error(2);
			break;
		}
		if ( (cvWaitKey(10) & 255) == 27 ) break;

		ImPreProcess( frame, ImGris, BGModel->ImFMask, 0, BGModel->DataFROI);

		// Cargamos datos del fondo
		if(!frameData ) { //en la primera iteración iniciamos el modelo dinamico al estático
			// Iniciar estructura para datos del nuevo frame
			frameData = InitNewFrameData( frame );
			cvCopy(  BGModel->Imed,frameData->BGModel);
			cvSet(frameData->IDesvf, cvScalar(1));
			cvCopy(  BGModel->Imed,lastBG);
		}
		else{	// cargamos los últimos parámetros del fondo.
			cvCopy( lastBG, frameData->BGModel);
			cvCopy( lastIdes,frameData->IDesvf );
		}
	//	obtener la mascara del FG y la lista con los datos de sus blobs.
		//// BACKGROUND UPDATE
		// Actualización del fondo
		// establecer parametros

		UpdateBGModel( ImGris,frameData->BGModel,frameData->IDesvf, BGParams, BGModel->DataFROI, BGModel->ImFMask );
		/////// BACKGROUND DIFERENCE. Obtención de la máscara del foreground
		BackgroundDifference( ImGris, frameData->BGModel,frameData->IDesvf, frameData->FG ,BGParams, BGModel->DataFROI);

		// guardamos las imagenes para iniciar el siguiente frame
		cvCopy( frameData->BGModel, lastBG);
		cvCopy(  frameData->IDesvf,lastIdes);

		//Obtener los Blobs y excluir aquellos que no interesan por su tamaño
//		cvSetImageROI(  frameData->FG , BGModel->DataFROI);

		blobs = CBlobResult( frameData->FG, NULL, 100, true );
		blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(),B_GREATER,100);
		blobs.Filter( blobs, B_EXCLUDE, CBlobGetPerimeter(),B_GREATER,1000);

		int j = blobs.GetNumBlobs();//numero de blos encontrados en el frame

		total_blobs=total_blobs+j; // Contabiliza los blobs encontrados para todos los frames

		//Recorrer Blob a blob y obtener las caracteristicas del AREA de cada uno de ellos

		for (int i = 0; i < blobs.GetNumBlobs(); i++ ){ //for 1

			currentBlob = blobs.GetBlob(i);

			CBlobGetArea();
			if(ShParams->SHOW_DATA_AREAS) {
				//printf("Area blob %d = %f ",i,currentBlob->area);
			}
			//Estimar la media de las Areas

			Sumatorio = Sumatorio + currentBlob->area;
			SumatorioDes = SumatorioDes + currentBlob->area*currentBlob->area;

			muestrearAreas( currentBlob->area);
			currentBlob->FillBlob( Imblob, CV_RGB(255,0,0));

		}//Fin del For 1

		Shape->FlyAreaMedia = Sumatorio / total_blobs;
		Shape->FlyAreaDes = (SumatorioDes / total_blobs) - Shape->FlyAreaMedia*Shape->FlyAreaMedia;

		num_frames += 1;
//		cvResetImageROI(frameData->FG);
		DraWWindow(Imblob, frameData, BGModel, SHOW_SHAPE_MODELING, COMPLETO);
		DraWWindow(Imblob, frameData, BGModel, SHAPE,SIMPLE );

	}
	desvanecer( NULL, 20);
	Shape->FlyAreaDes = sqrt(abs(Shape->FlyAreaDes) ) ;
	if( Shape->FlyAreaDes == 0){
		printf("hola");
	}

	//Mostrar mediana y media para todos los frames

	if(ShParams->SHOW_DATA_AREAS )
		printf("\n MEDIA AREAS: %f \t DESVIACION AREAS: %f",Shape->FlyAreaMedia,Shape->FlyAreaDes);

	free( ShParams);
	liberarSTFrame( frameData );
	cvReleaseImage( &ImGris);
	cvReleaseImage( &Imblob);
	cvReleaseImage( &lastIdes);
	cvReleaseImage( &lastBG);

	return Shape;

}//Fin de la función ShapeModel2
Beispiel #2
0
/**
- FUNCI? ComponentLabeling
- FUNCIONALITAT: Calcula els components binaris (blobs) d'una imatge amb connectivitat a 8
- PARÀMETRES:
	- inputImage: image to segment (pixel values different than blobColor are treated as background)
	- maskImage: if not NULL, all the pixels equal to 0 in mask are skipped in input image
	- backgroundColor: color of background (ignored pixels)
	- blobs: blob vector destination
- RESULTAT:
	- 
- RESTRICCIONS:
	- 
- AUTOR: rborras
- DATA DE CREACI? 2008/04/21
- MODIFICACI? Data. Autor. Descripci?
- NOTA: Algorithm based on "A linear-time component labeling algorithm using contour tracing technique", 
		F.Chang et al
*/
bool ComponentLabeling(	IplImage* inputImage,
						IplImage* maskImage,
						unsigned char backgroundColor,
						Blob_vector &blobs )
{
	int i,j;
	// row major vector with visited points 
	bool *visitedPoints, *pVisitedPoints, internalContour, externalContour;
	unsigned char *pInputImage, *pMask, *pAboveInputImage, *pBelowInputImage,
				  *pAboveMask, *pBelowMask;
	int imageWidth, imageHeight, currentLabel, contourLabel;
	// row major vector with labelled image 
	t_labelType *labelledImage, *pLabels;
	//! current blob pointer
	CBlob *currentBlob;
	CvSize imageSizes;
	CvPoint currentPoint;

	// verify input image
	if( !CV_IS_IMAGE( inputImage ) )
		return false;

	// verify that input image and mask image has same size
	if( maskImage )
	{
		if( !CV_IS_IMAGE(maskImage) || 
			maskImage->width != inputImage->width || 
			maskImage->height != inputImage->height )
		return false;
	}
	else
	{
		pMask = NULL;
		pAboveMask = NULL;
		pBelowMask = NULL;
	}

	imageSizes = cvSize(inputImage->width,inputImage->height);
	
	imageWidth = inputImage->width;
	imageHeight = inputImage->height;

	// create auxiliary buffers
	labelledImage = (t_labelType*) malloc( inputImage->width * inputImage->height * sizeof(t_labelType) );
	visitedPoints = (bool*) malloc( inputImage->width * inputImage->height * sizeof(bool) );

	// initialize it to 0
	memset(labelledImage, 0, inputImage->width * inputImage->height * sizeof(t_labelType) ) ;
	memset(visitedPoints, false, inputImage->width * inputImage->height * sizeof(bool) ) ;

	// initialize pointers and label counter
	pLabels = labelledImage;
	pVisitedPoints = visitedPoints;
	currentLabel = 1;

	for (j = 0; j < imageHeight; j++ )
	{
		// don't verify if we area on first or last row, it will verified on pointer access
		pAboveInputImage = (unsigned char*) inputImage->imageData + (j-1) * inputImage->widthStep;
		pBelowInputImage = (unsigned char*) inputImage->imageData + (j+1) * inputImage->widthStep;
	
		pInputImage = (unsigned char*) inputImage->imageData + j * inputImage->widthStep;

		if( maskImage )
		{
			pMask = (unsigned char*) maskImage->imageData + j * maskImage->widthStep;
			// don't verify if we area on first or last row, it will verified on pointer access
			pAboveMask = (unsigned char*) maskImage->imageData + (j-1) * maskImage->widthStep;
			pBelowMask = (unsigned char*) maskImage->imageData + (j+1) * maskImage->widthStep;

		}
		
		for (i = 0; i < imageWidth; i++, pInputImage++, pMask++, pAboveInputImage++, pBelowInputImage++,
										 pAboveMask++, pBelowMask++ )
		{
			// ignore background pixels or 0 pixels in mask
			if ( (*pInputImage == backgroundColor) || (maskImage && *pMask == 0 ))
			{
				pLabels++;
				pVisitedPoints++;
				continue;
			}
			
			// new external contour: current label == 0 and above pixel is background
			if( j > 0 )
			{
				externalContour = ((*pAboveInputImage == backgroundColor) || 
								  (maskImage && *pAboveMask == 0)) && 
								  (*pLabels == 0);
			}
			else
				externalContour = (*pLabels == 0);

			// new internal contour: below pixel is background and not visited
			if( !externalContour && j < imageHeight - 1 )
			{
				internalContour = *pBelowInputImage == backgroundColor &&
								  !GET_BELOW_VISITEDPIXEL( pVisitedPoints, imageWidth);
			}
			else
			{
				internalContour = false;
			}
			
			
			if( externalContour )
			{
				currentPoint = cvPoint(i,j);
				// assign label to labelled image
				*pLabels = currentLabel;
				
				// create new blob
				currentBlob = new CBlob(currentLabel, currentPoint, imageSizes );

				// contour tracing with currentLabel
				contourTracing( inputImage, maskImage, currentPoint, 
								labelledImage, visitedPoints, 
								currentLabel, false, backgroundColor, currentBlob->GetExternalContour() );

				// add new created blob
				blobs.push_back(currentBlob);

				currentLabel++;
			}
			else 
			{
				if( internalContour )
				{
					currentPoint = cvPoint(i,j);

					if( *pLabels == 0 )
					{
						// take left neightbour value as current
						if( i > 0 )
							contourLabel = *(pLabels - 1);
					}
					else
					{
						contourLabel = *pLabels;
					}

					if(contourLabel>0)
					{
						currentBlob = blobs[contourLabel-1];
						CBlobContour newContour(currentPoint, currentBlob->GetStorage());
						

						// contour tracing with contourLabel
						contourTracing( inputImage, maskImage, currentPoint, labelledImage, visitedPoints,
										contourLabel, true, backgroundColor, &newContour ); 

						currentBlob->AddInternalContour( newContour );
					}
				}
				// neither internal nor external contour
				else
				{
					// take left neightbour value as current if it is not labelled
					if( i > 0 && *pLabels == 0 )
						*pLabels = *(pLabels - 1);
				}

			}
			
			pLabels++;
			pVisitedPoints++;

		}
	}


	// free auxiliary buffers
	free( labelledImage );
	free( visitedPoints );

	return true;
}
Beispiel #3
0
void Blobs::blobify()
{
	//mm not entirely sure yet, but I think this function might just "draw" the blobs, based on the color model of each pixel. the color model is already determined in the "packed" data (see unpack...color model information is extracted from the packed data there...)
	
	
    uint32_t i, j, k;
    CBlob *blob;
    uint16_t *blobsStart;
    uint16_t numBlobsStart, invalid, invalid2;
    uint16_t left, top, right, bottom;
    //uint32_t timer, timer2=0;

    unpack(); //mm as is clear in unpack(), at this point, we already know the model to which each blob belongs.

    // copy blobs into memory //mm does this refer to the unpack() above??
    invalid = 0;
	
    // mutex keeps interrupt routine from stepping on us
    m_mutex = true;
	
	//mm iterate through models:
    for (i=0, m_numBlobs=0; i<NUM_MODELS; i++)
    {
		//mm iterate through blobs in this model:
        for (j=m_numBlobs*5, k=0, blobsStart=m_blobs+j, numBlobsStart=m_numBlobs, blob=m_assembler[i].finishedBlobs;
             blob && m_numBlobs<m_maxBlobs && k<m_maxBlobsPerModel; blob=blob->next, k++)
        {
            if (blob->GetArea()<(int)m_minArea)
                continue;
            blob->getBBox((short &)left, (short &)top, (short &)right, (short &)bottom);
            m_blobs[j + 0] = i+1;
            m_blobs[j + 1] = left;
            m_blobs[j + 2] = right;
            m_blobs[j + 3] = top;
            m_blobs[j + 4] = bottom;
            m_numBlobs++;
            j += 5;

        }
        //setTimer(&timer);
        if (true)
        {
            while(1)
            {
                invalid2 = combine2(blobsStart, m_numBlobs-numBlobsStart);
                if (invalid2==0)
                    break;
                invalid += invalid2;
            }
        }
        //timer2 += getTimer(timer);
    }
    //setTimer(&timer);
    invalid += combine(m_blobs, m_numBlobs);
    if (false)
    {
        m_codedBlobs = (BlobB *)(m_blobs + m_numBlobs*5);
        processCoded();
    }
    if (invalid)
    {
        invalid2 = compress(m_blobs, m_numBlobs);
        m_numBlobs -= invalid2;
        if (invalid2!=invalid)
            cprintf("**** %d %d\n", invalid2, invalid);

    }
    //timer2 += getTimer(timer);
    //cprintf("time=%d\n", timer2); // never seen this greater than 200us.  or 1% of frame period

    // reset read index-- new frame
    m_blobReadIndex = 0;
    m_mutex = false;

    // free memory
    for (i=0; i<NUM_MODELS; i++)
        m_assembler[i].Reset();

#if 0
    static int frame = 0;
    if (m_numBlobs>0)
        cprintf("%d: blobs %d %d %d %d %d\n", frame, m_numBlobs, m_blobs[1], m_blobs[2], m_blobs[3], m_blobs[4]);
    else
        cprintf("%d: blobs 0\n", frame);
    frame++;
#endif
}
Beispiel #4
0
void Blobs::blobify()
{
    uint32_t i, j, k;
    bool colorCode;
    CBlob *blob;
    uint16_t *blobsStart;
    uint16_t numBlobsStart, invalid, invalid2;
    uint16_t left, top, right, bottom;
    //uint32_t timer, timer2=0;

    unpack();

    // copy blobs into memory
    invalid = 0;
    // mutex keeps interrupt routine from stepping on us
    m_mutex = true;
    for (i=0, m_numBlobs=0; i<NUM_MODELS; i++)
    {
        colorCode = m_clut->getType(i+1)==CL_MODEL_TYPE_COLORCODE;

        for (j=m_numBlobs*5, k=0, blobsStart=m_blobs+j, numBlobsStart=m_numBlobs, blob=m_assembler[i].finishedBlobs;
             blob && m_numBlobs<m_maxBlobs && k<m_maxBlobsPerModel; blob=blob->next, k++)
        {
            if ((colorCode && blob->GetArea()<MIN_COLOR_CODE_AREA) ||
                    (!colorCode && blob->GetArea()<(int)m_minArea))
                continue;
            blob->getBBox((short &)left, (short &)top, (short &)right, (short &)bottom);
            m_blobs[j + 0] = i+1;
            m_blobs[j + 1] = left;
            m_blobs[j + 2] = right;
            m_blobs[j + 3] = top;
            m_blobs[j + 4] = bottom;
            m_numBlobs++;
            j += 5;

        }
        //setTimer(&timer);
        if (!colorCode) // do not combine color code models
        {
            while(1)
            {
                invalid2 = combine2(blobsStart, m_numBlobs-numBlobsStart);
                if (invalid2==0)
                    break;
                invalid += invalid2;
            }
        }
        //timer2 += getTimer(timer);
    }
    //setTimer(&timer);
    invalid += combine(m_blobs, m_numBlobs);
    if (m_codedMode)
    {
        m_codedBlobs = (BlobB *)(m_blobs + m_numBlobs*5);
        // calculate number of codedblobs left
        processCoded();
    }
    if (invalid || m_codedMode)
    {
        invalid2 = compress(m_blobs, m_numBlobs);
        m_numBlobs -= invalid2;
    }
    //timer2 += getTimer(timer);
    //cprintf("time=%d\n", timer2); // never seen this greater than 200us.  or 1% of frame period

    // reset read index-- new frame
    m_blobReadIndex = 0;
    m_mutex = false;

    // free memory
    for (i=0; i<NUM_MODELS; i++)
        m_assembler[i].Reset();

#if 0
    static int frame = 0;
    if (m_numBlobs>0)
        cprintf("%d: blobs %d %d %d %d %d\n", frame, m_numBlobs, m_blobs[1], m_blobs[2], m_blobs[3], m_blobs[4]);
    else
        cprintf("%d: blobs 0\n", frame);
    frame++;
#endif
}
	bool findBiggestBlobImage(IplImage* img, int color, IplImage* &output)
	{
		CBlobResult blobs;
		CBlob *currentBlob;

		blobs = CBlobResult( img, NULL, 0 );
		blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, m_minBlobSize );

		double biggestArea = m_minBlobSize;
		int biggestBlob = -1;

		for (int i = 0; i < blobs.GetNumBlobs(); i++ )
		{
			currentBlob = blobs.GetBlob(i);
			double blobArea = currentBlob->Area();
			if(blobArea > biggestArea) 
			{
				biggestBlob = i;
				biggestArea = blobArea;
			}
		}

		if(biggestBlob >= 0)
		{
			int x = (int) blobs.GetBlob(biggestBlob)->MinX();
			int y = (int) blobs.GetBlob(biggestBlob)->MinY();
			int width= (int) blobs.GetBlob(biggestBlob)->MaxX()-x;
			int height= (int) blobs.GetBlob(biggestBlob)->MaxY()-y;

			IplImage* temp = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U, 1);
			IplImage* temp2 = cvCreateImage(cvSize(width, height),IPL_DEPTH_8U, 1);
			IplImage* result = cvCreateImage(cvSize(width, height),IPL_DEPTH_8U, 1);

			if(biggestBlob>=0) blobs.GetBlob(biggestBlob)->FillBlob(temp,cvScalar(255),x,y);

			cvSetImageROI(temp, cvRect(x, y, width, height));

			cvCopy(temp,temp2);

			uchar* tempData;

			uchar* resultData;

			tempData = (uchar *)(temp2->imageData);
			resultData = (uchar *) (result->imageData);

			for (int j = 0; j < width*height; j++)
			{
				if (tempData[j]==255) resultData[j] = color;
				else	resultData[j] = 0;
			}

			cvResize(result, output);

			cvReleaseImage(&temp);
			cvReleaseImage(&temp2);
			cvReleaseImage(&result);

			return true;
		}
		else
			return false;
	}
/*
arg1: Width of each frame
arg2: Height of each frame
arg3: Target frames per second of the program
arg4: Maximum number of blobs to track. Each blob MAY corresspond to a person in front of the camera
*/
int main(int argc, char* argv[])
{
    if (argc < 5)
    {
        cout << "Too few arguments to the program. Exiting...\n";
        return 0;
    }

    int width, height, fps, numberOfBlobs;
    try
    {
        //Read the arguments
        width = atoi(argv[1]);
        height = atoi(argv[2]);
        fps = atoi(argv[3]);
        numberOfBlobs = atoi(argv[4]);
        //Done reading arguments
    }
    catch(...)
    {
        cout << "One or more arguments are invalid!. Exiting...\n";
        return 0;
    }


    /*
    int width = 320;
    int height = 240;
    int fps = 10;
    int numberOfBlobs = 2;
    */

    tempImageV4L = cvCreateImage(cvSize(width, height), 8, 3);
    frameNumber = 0;

    //Beginning initialising cameras
    rightCamera = new Camera("/dev/video0", width, height, fps);
    leftCamera = new Camera("/dev/video1", width, height, fps);
	//leftCamera = rightCamera; //If only one camera is available, uncomment this line and comment the line above this.
    //Done initialising cameras

    //Waste some frames so as to get the cameras running in full flow
    WasteNFrames(10);

    //Beginning capturing background
    backImageRight = GetNextCameraShot(rightCamera);
    backImageLeft = GetNextCameraShot(leftCamera);
    frameNumber++;
    cvtColor(backImageRight, backImageRight, CV_BGR2HSV);
    cvtColor(backImageLeft, backImageLeft, CV_BGR2HSV);
    //Done capturing background

    //General Stuff
    Mat motionImageRight(backImageRight.rows, backImageRight.cols, CV_8UC1);
    Mat motionImageLeft(backImageLeft.rows, backImageLeft.cols, CV_8UC1);
    Mat HSVImageRight, HSVImageLeft;
    Mat displayImageRight, displayImageLeft;
    //End of General Stuff


    while (1) //The infinite loop
    {
        //Beginning getting camera shots
        rightImage = GetNextCameraShot(rightCamera);
        leftImage = GetNextCameraShot(leftCamera);
        frameNumber++;
        //Done getting camera shots


        //Beginning getting motion images
        HSVImageRight = rightImage.clone();
        cvtColor(HSVImageRight, HSVImageRight, CV_BGR2HSV);
        CompareWithBackground(HSVImageRight, backImageRight, motionImageRight);
        medianBlur(motionImageRight, motionImageRight, 3);

        HSVImageLeft = leftImage.clone();
        cvtColor(HSVImageLeft, HSVImageLeft, CV_BGR2HSV);
        CompareWithBackground(HSVImageLeft, backImageLeft, motionImageLeft);
        medianBlur(motionImageLeft, motionImageLeft, 3);
        //Ended getting motion images

        cout << "\nFor frame #" << frameNumber << " :\n";

        //Beginning Getting Blobs
        IplImage  imageblobPixels = motionImageRight;
        CBlobResult blobs;
        blobs = CBlobResult(&imageblobPixels, NULL, 0);	// Use a black background color.
        int minArea = 100 / ((640 / width) * (640 / width));
        blobs.Filter(blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, minArea);
        int foundBlobs = blobs.GetNumBlobs();
        //Ended Getting Blobs

        cout << "Found " << foundBlobs << " motion blobs\n";

        //Creating copies of original images for modifying and displaying
        displayImageRight = rightImage.clone();
        displayImageLeft = leftImage.clone();
        //Done creating copies

        //Cycling through the blobs
        for (int blobIndex = 0; blobIndex < blobs.GetNumBlobs() && blobIndex < numberOfBlobs; blobIndex++)
        {
            cout << "Blob #" << blobIndex << " : ";

            //Getting blob details
            CBlob * blob = blobs.GetBlob(blobIndex);
            int x = blob->GetBoundingBox().x;
            int y = blob->GetBoundingBox().y;
            int w = blob->GetBoundingBox().width;
            int h = blob->GetBoundingBox().height;
            //Done getting blob details

            int sep = 0;

            //The point for which we want to find depth
            PixPoint inP = {x + w/2, y + h/2}, oP = {0, 0};
            cout << "inPoint = {" << inP.x << ", " << inP.y << "} ";

            //Initialing the rectangle in which the corressponding point is likely in
            Rectangle rect;
            rect.location.x = -1;
            rect.location.y = inP.y - 5;
            rect.size.x = rightImage.cols;
            rect.size.y = 11;
            //Done initialising the target rectangle

            //Find the corressponding point and calculate the sepertion
            oP = PointCorresponder::correspondPoint(rightImage, leftImage, inP, rect, motionImageLeft);
            sep = inP.x - oP.x;
            cout << "foundPoint = {" << oP.x << ", " << oP.y << "} ";

            //Just for visual presentation
            DrawRect(displayImageRight, x, y, w, h);
            cv::circle(displayImageRight, Point(inP.x, inP.y), 10, Scalar(0), 3);
            cv::circle(displayImageLeft, Point(oP.x, oP.y), 10, Scalar(0), 3);
            //Done decoration

            //The thing we were looking for... how can we forget to print this? :P
            cout << "seperation = " << sep << "\n";
        }

        //Show the windows
        cv::namedWindow("RIGHT");
        cv::namedWindow("thresh");
        cv::namedWindow("LEFT");
        imshow("LEFT", displayImageLeft);
        imshow("RIGHT", displayImageRight);
        imshow("thresh", motionImageRight);
        //End of code for showing windows

        //The loop terminating condition
        if (waitKey(27) >= 0) break;
    }

    //Mission Successful!! :D :)
    return 0;
}
Beispiel #7
0
void
Auvsi_Recognize::extractLetter( void )
{
	typedef cv::Vec<unsigned char, 1> VT_binary;
	#ifdef TWO_CHANNEL
		typedef cv::Vec<T, 2> VT;
	#else
		typedef cv::Vec<T, 3> VT;
	#endif
	typedef cv::Vec<int, 1> IT;
	

	
	
	
	
	// Erode input slightly
	cv::Mat input;
	cv::erode( _shape, input, cv::Mat() );

	// Remove any small white blobs left over
	CBlobResult blobs;
	CBlob * currentBlob;
	CBlob biggestBlob;
	IplImage binaryIpl = input;
	
	blobs = CBlobResult( &binaryIpl, NULL, 0 );
	blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );
	
	blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() );
	
	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{
    	currentBlob = blobs.GetBlob(i);
		currentBlob->FillBlob( &binaryIpl, cvScalar(0));
	}
	
	// Perform k-means on this region only
	int areaLetter = (int)biggestBlob.Area();
	cv::Mat kMeansInput = cv::Mat( areaLetter, 1, _image.type() );

	// Discard if we couldn't extract a letter
	if( areaLetter <= 0 )
	{
		_letter = cv::Mat( _shape );
		_letter = cv::Scalar(0);
		return;
	}

	cv::MatIterator_<VT_binary> binaryIterator = input.begin<VT_binary>();
	cv::MatIterator_<VT_binary> binaryEnd = input.end<VT_binary>();
	cv::MatIterator_<VT> kMeansIterator = kMeansInput.begin<VT>();

	for( ; binaryIterator != binaryEnd; ++binaryIterator )
	{
		if( (*binaryIterator)[0] > 0 )
		{
			(*kMeansIterator) = _image.at<VT>( binaryIterator.pos() );
			++kMeansIterator;
		}
	}

	// Get k-means labels
	cv::Mat labels = doClustering<T>( kMeansInput, 2, false );	
	int numZeros = areaLetter - cv::countNonZero( labels );
	bool useZeros = numZeros < cv::countNonZero( labels );

	// Reshape into original form
	_letter = cv::Mat( _shape.size(), _shape.type() );
	_letter = cv::Scalar(0);

	binaryIterator = input.begin<VT_binary>();
	binaryEnd = input.end<VT_binary>();
	cv::MatIterator_<IT> labelsIterator = labels.begin<IT>();

	for( int index = 0; binaryIterator != binaryEnd; ++binaryIterator )
	{
		if( (*binaryIterator)[0] > 0 )
		{
			// Whichever label was the minority, we make that value white and all other values black
			unsigned char value = (*labelsIterator)[0];

			if( useZeros )
				if( value )
					value = 0;
				else
					value = 255;
			else
				if( value )
					value = 255;
				else
					value = 0;

			_letter.at<VT_binary>( binaryIterator.pos() ) = VT_binary( value );
			++labelsIterator;
		}
	}
	
	// Attempt to deal with any spurious locations that are left
	//	If we can be fairly confident that one of the blobs left is not a letter, remove it
	double confidence = 0.50; // how much we trust that the biggest blob is the letter
	binaryIpl = _letter;
	
	blobs = CBlobResult( &binaryIpl, NULL, 0 );
	blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );
	
	blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() * confidence );
	
	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{
		currentBlob = blobs.GetBlob(i);
		currentBlob->FillBlob( &binaryIpl, cvScalar(0));
	}
}
Beispiel #8
0
/*
 * thread for displaying the opencv content
 */
void *cv_threadfunc (void *ptr) {
	IplImage* timg = cvCloneImage(rgbimg); // Image we do our processing on
	IplImage* dimg = cvCloneImage(rgbimg); // Image we draw on
	CvSize sz = cvSize( timg->width & -2, timg->height & -2);
	IplImage* outimg = cvCreateImage(sz, 8, 3);

	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* squares; // Sequence for squares - sets of 4 points
	CvSeq* contours; // Raw contours list
	CvSeq* result; // Single contour being processed

	CBlobResult blobs;
	CBlob *currentBlob;

	IplImage *pyr = cvCreateImage(cvSize(sz.width/2, sz.height/2), 8, 1);

	// Set region of interest
	cvSetImageROI(timg, cvRect(0, 0, sz.width, sz.height));
	cvSetImageROI(dimg, cvRect(0, 0, sz.width, sz.height));

	// Processing and contours
	while (1) {
		squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage);

		pthread_mutex_lock( &mutex_rgb );
		cvCopy(rgbimg, dimg, 0);
		cvCopy(rgbimg, timg, 0);
		pthread_mutex_unlock( &mutex_rgb );

		// BLUR TEST
		// cvPyrDown(dimg, pyr, 7);
		// cvPyrUp(pyr, timg, 7);

		// DILATE TEST
		IplConvKernel* element = cvCreateStructuringElementEx(5, 5, 2, 2, 0);
		IplConvKernel* element2 = cvCreateStructuringElementEx(3, 3, 1, 1, 0);
		cvDilate(timg, timg, element, 2);
		cvErode(timg, timg, element2, 3);

		// THRESHOLD TEST 
		cvThreshold(timg, timg, 200, 255, CV_THRESH_BINARY);

		// Output processed or raw image.
		cvCvtColor(timg, outimg, CV_GRAY2BGR);

		// BLOB TEST
		blobs = CBlobResult( timg, (IplImage*)NULL, 0, true );
		// blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 50 );
		
		printf("Blobs: %d\n", blobs.GetNumBlobs());

		CBlob biggestBlob;
		blobs.GetNthBlob( CBlobGetArea(), 1, biggestBlob );
		biggestBlob.FillBlob( outimg, CV_RGB(255, 0, 0) );
		CvSeq* dest;
		biggestBlob.GetConvexHull(dest);
		
		// for (int i = 0; i < blobs.GetNumBlobs(); i++ )
		// {
		// 	currentBlob = blobs.GetBlob(i);
		// 	currentBlob->FillBlob( outimg, CV_RGB(255,0,0) );
		// }
		

//		// CONTOUR FINDING
//		cvFindContours(timg, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
//
//		while (contours)
//		{
//			// Approximate contour, accuracy proportional to perimeter of contour; may want to tune accuracy.
//			result = cvApproxPoly(contours, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contours) * 0.02, 0);
//			// Filter small contours and contours w/o 4 vertices (filters noise, finds rectangles)
//			if (result->total == 4 && 
//				fabs(cvContourArea(result, CV_WHOLE_SEQ)) > 600 && 
//				cvCheckContourConvexity(result))
//			{
//				// Skipped checking whether angles were close to 90 degrees here; may want to implement.
//				// Probably also want to check if it's square enough to filter out ex. long windows.
//
//				for (int i = 0; i < 4; i++)
//				{
//					// Write vertices to output sequence
//					cvSeqPush(squares, (CvPoint*)cvGetSeqElem(result, i));
//				}
//			}
//
//			// Take next contour
//			contours = contours->h_next;
//		}
//
//
//		// DRAW RECTANGLES
//		CvSeqReader reader;
//		cvStartReadSeq(squares, &reader, 0);
//
//		// Read 4 points at a time
//		CvPoint pt[4];
//		CvPoint *rect = pt;
//		CvRect out[4];
//		CvRect *outrect = out;
//		for (int i = 0; i < squares->total; i += 4)
//		{
//			int count = 4;
//			
//			// Which point is which corner is unpredictable.
//			CV_READ_SEQ_ELEM(pt[0], reader); 
//			CV_READ_SEQ_ELEM(pt[1], reader);
//			CV_READ_SEQ_ELEM(pt[2], reader);
//			CV_READ_SEQ_ELEM(pt[3], reader);
//			// Draw rectangle on output
//			cvPolyLine(outimg, &rect, &count, 1, 1, CV_RGB(0,255,0), 1, CV_AA, 0);
//			// Make rectangles
//			// Print (temporary)
//			printf("Rect[0]: %d, %d\n", pt[0].x, pt[0].y);
//			printf("Rect[1]: %d, %d\n", pt[1].x, pt[1].y);
//			printf("Rect[2]: %d, %d\n", pt[2].x, pt[2].y);
//			printf("Rect[3]: %d, %d\n\n", pt[3].x, pt[3].y);
//			fflush(stdout);
//
//		}
//
		// Print on order
		if( cvWaitKey( 15 )==27 )
		{
				}

		cvShowImage (FREENECTOPENCV_WINDOW_N,outimg);
		cvClearMemStorage(storage);
	}
	pthread_exit(NULL);
}
Beispiel #9
0
int32_t cc_getMaxBlob(uint32_t *qvals, uint32_t numRls, int16_t *bdata)
{
	
	int16_t* c_components = new int16_t[MAX_BLOBS*4];
	
	uint32_t result;//, prebuf;
	
	CBlobAssembler blobber;
	
	int32_t row;
	uint32_t i, startCol, length;
	uint8_t model;

	for (i=0, row=-1; i<numRls; i++)
	{
			if (qvals[i]==0)
			{
					row++;
					continue;
			}
			model = qvals[i]&0x03;
			qvals[i] >>= 3;
			startCol = qvals[i]&0x1ff;
			qvals[i] >>= 9;
			length = qvals[i]&0x1ff;
			if(!handleRL(&blobber, model, row, startCol, length))
				break;
	}
	
	blobber.EndFrame();
	blobber.SortFinished();

	int16_t top, right, bottom, left;
	CBlob *blob;
	blob = blobber.finishedBlobs;
	if (blob->GetArea()>MIN_AREA)
	{
		blob->getBBox(left, top, right, bottom);
		bdata[0] = left;
		bdata[1] = right;
		bdata[2] = top;
		bdata[3] = bottom;
	}
	else 
		bdata[0] = -1;

#if 0	
	//
	// Take Finished blobs and return with chirp
	//
	CBlob *blob, *temp;
	blob = blobber.finishedBlobs;
	
	uint32_t cc_num = 0;
	temp = blob;
	while (temp)
	{
		int16_t top, right, bottom, left;
		temp->getBBox(left, top, right, bottom);
		
		// Don't want objects with area less than 9...
		if ((right-left)*(bottom-top) < 9)
			break;
		
		temp = temp->next;
		cc_num++;
	}
	
	// Remove the rest that we don't care about
	/*while(temp)
	{
		CBlob *next = temp->next;
		temp->~CBlob();
		temp = NULL;
		temp = next;
	}*/
	
	cc_num = (cc_num < 15) ? cc_num : MAX_BLOBS;

	// Populate return w/ result
	//void* mem = malloc(sizeof(int16_t)*cc_num*4);
	//if (mem == NULL)
	//	int i = 0;
	//free(mem);
	//int16_t* c_components = new int16_t[cc_num*4];
	//g_mem += sizeof(int16_t)*cc_num*4;
	memset((void *)c_components, 0, sizeof(uint16_t)*cc_num*4);

	for (int i = 0; i < cc_num; i++)
	{
		int16_t top, right, bottom, left;
		blob->getBBox(left, top, right, bottom);
		c_components[(i*4)+0] = top;
		c_components[(i*4)+1] = right;
		c_components[(i*4)+2] = bottom;
		c_components[(i*4)+3] = left;

		blob = blob->next;
	}
	
	//CRP_RETURN(chirp, USE_BUFFER(SRAM0_SIZE, SRAM0_LOC), HTYPE(0), UINT16(0), UINT16(0), UINTS8(0, 0), END);
	//prebuf = chirp->getPreBufLen();
#endif
	
	blobber.Reset();
		
	delete[] c_components;
	//g_mem -= sizeof(int16_t)*cc_num*4;

	return result;
}
Beispiel #10
0
 int main()  
 {  
     CBlobResult blobs;    
     CBlob *currentBlob;   
     CvPoint pt1, pt2;  
     CvRect cvRect;  
     int key = 0;  
     IplImage* frame = 0;  
   
     // Initialize capturing live feed from video file or camera  
     CvCapture* capture = cvCaptureFromFile( "MOV.MPG" );  
   
     // Get the frames per second  
     int fps = ( int )cvGetCaptureProperty( capture,  
                                            CV_CAP_PROP_FPS );    
   
     // Can't get device? Complain and quit  
     if( !capture )  
     {  
         printf( "Could not initialize capturing...\n" );  
         return -1;  
     }  
   
     // Windows used to display input video with bounding rectangles  
     // and the thresholded video  
     cvNamedWindow( "video" );  
     cvNamedWindow( "thresh" );        
   
     // An infinite loop  
     while( key != 'x' ) 
     { 
         // If we couldn't grab a frame... quit  
         if( !( frame = cvQueryFrame( capture ) ) )  
             break;        
   
         // Get object's thresholded image (blue = white, rest = black)  
         IplImage* imgThresh = GetThresholdedImageHSV( frame );        
   
         // Detect the white blobs from the black background  
         blobs = CBlobResult( imgThresh, NULL, 0 );    
   
         // Exclude white blobs smaller than the given value (10)    
         // The bigger the last parameter, the bigger the blobs need    
         // to be for inclusion    
         blobs.Filter( blobs,  
                       B_EXCLUDE,  
                       CBlobGetArea(),  
                       B_LESS,  
                       10 );           
   
         // Attach a bounding rectangle for each blob discovered  
         int num_blobs = blobs.GetNumBlobs();  
   
         for ( int i = 0; i < num_blobs; i++ )    
         {                 
             currentBlob = blobs.GetBlob( i );               
             cvRect = currentBlob->GetBoundingBox();  
   
             pt1.x = cvRect.x;  
             pt1.y = cvRect.y;  
             pt2.x = cvRect.x + cvRect.width;  
             pt2.y = cvRect.y + cvRect.height;  
   
             // Attach bounding rect to blob in orginal video input  
             cvRectangle( frame,  
                          pt1,   
                          pt2,  
                          cvScalar(0, 0, 0, 0),  
                          1,  
                          8,  
                          0 );  
         }  
   
         // Add the black and white and original images  
         cvShowImage( "thresh", imgThresh );  
         cvShowImage( "video", frame );  
   
         // Optional - used to slow up the display of frames  
         key = cvWaitKey( 2000 / fps );  
   
         // Prevent memory leaks by releasing thresholded image  
         cvReleaseImage( &imgThresh );        
     }  
   
     // We're through with using camera.   
     cvReleaseCapture( &capture );  
   
     return 0;  
 }  
Beispiel #11
0
 int main() {
  CvPoint pt1,pt2;
  CvRect regt;
   CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
   if ( !capture ) {
     fprintf(stderr, "ERROR: capture is NULL \n" );
     getchar();
     return -1;
   }
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,144);
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,216);	 
  // Create a window in which the captured images will be presented
   cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
   // Show the image captured from the camera in the window and repeat
   while ( 1 ) {
     // Get one frame
     IplImage* frame = cvQueryFrame( capture );
     if ( !frame ) {
       fprintf( stderr, "ERROR: frame is null...\n" );
       getchar();
       break;
     }
     int modfheight, modfwidth;

     modfheight = frame->height;
     modfwidth = frame->width;
     // create modified frame with 1/4th the original size
     IplImage* modframe = cvCreateImage(cvSize((int)(modfwidth/4),(int)(modfheight/4)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
     cvResize(frame, modframe,CV_INTER_LINEAR);
     // create HSV(Hue, Saturation, Value) frame
     IplImage* hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
     cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)
     // create a frame within threshold. 
     IplImage* threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
     cvInRangeS(hsvframe,cvScalar(30, 25, 150),cvScalar(60, 60, 220),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
     // created dilated image
     IplImage* dilframe = cvCreateImage(cvGetSize(threshframe),8,1);
     cvDilate(threshframe,dilframe,NULL,2); //cvDilate(input frame, output frame, mask, number of times to dilate)

     CBlobResult blobs; 
     blobs = CBlobResult(dilframe,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs.Filter(blobs,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob;
     blobs.GetNthBlob(CBlobGetArea(),0,biggestblob); //GetNthBlob(criteria, number, output) Get only  the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1.x = biggestblob.MinX()*4;
     pt1.y = biggestblob.MinY()*4;
     pt2.x = biggestblob.MaxX()*4;
     pt2.y = biggestblob.MaxY()*4;
     cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob

     cvShowImage( "mywindow", frame); // show output image
     // Do not release the frame!
     //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
     //remove higher bits using AND operator
     if ( (cvWaitKey(10) & 255) == 27 ) break;
   }
   // Release the capture device housekeeping
   cvReleaseCapture( &capture );
   cvDestroyWindow( "mywindow" );
   return 0;
 }
double findShadow(IplImage *l_img, int hue,int sat,int val,int threshold, double blobLowLimit,double blobHighLimit){
	// Input HSV value of color blob your seeking, acceptable threshold of that color, and Min and Max blob sizes beeing sought out. 
// Input HSV value of color blob your seeking, acceptable threshold of that color, and Min and Max blob sizes beeing sought out. 
	//Ouput: pointer to data array, size[#ofblobs*3+1]; Format data=[Number of Blobs, Area1,X of center1, y of center1, Area2,X of center2,y of center2,...,areaN,X of centerN, Y of centerN];
    

    

	// Image variables
	IplImage* local_copy = cvCloneImage(l_img);
	IplImage* imageSmooth = cvCreateImage( cvGetSize(l_img),8,3);//Gausian Filtered image
	IplImage* imageSuperSmooth = cvCreateImage( cvGetSize(l_img),8,3);//Gausian Filtered image
	IplImage* imageHSV = cvCreateImage( cvGetSize(l_img),8,3); //HSV image
	IplImage* i1 = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image
	IplImage* i2 = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image
	IplImage* i_ts = cvCreateImage( cvGetSize(l_img),8,1);//desired color filtered image
	IplImage* planeH = cvCreateImage(cvGetSize(l_img),8,1); //Hue
	IplImage* planeS = cvCreateImage(cvGetSize(l_img),8,1); //Saturation
	IplImage* planeV = cvCreateImage(cvGetSize(l_img),8,1); //Brightness
	IplImage* planeSmoothV = cvCreateImage(cvGetSize(l_img),8,1); //Brightness
	IplImage* imageSmoothHSV = cvCreateImage( cvGetSize(l_img),8,3); //HSV image
	IplImage* obsdetmask = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
	IplImage* obsdetmask_dil = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
	IplImage* obsdetmask_b = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
	IplImage* obsdetmask_bdil = cvCreateImage( cvGetSize(l_img),8,1); //Obs det mask
	//Blob variables
	CBlobResult mask_bls;
	CBlob	mask_bl;
	CBlobResult blobs;
	CBlob blob;
	CBlobResult blobs1;
	CBlob blob1;
	CBlobGetXCenter getXCenter;
	CBlobGetYCenter getYCenter;
	//Output Variable
	//Gausian Filter
	cvSmooth(l_img,imageSmooth,CV_GAUSSIAN,13,13,0,0);
	cvSmooth(l_img,imageSuperSmooth,CV_GAUSSIAN,41,41,0,0);
	//cvShowImage("View2a",imageSmooth);
	
	
	
	//Covert RGB to HSV
	cvCvtColor(imageSmooth,imageHSV,CV_BGR2HSV);
	cvCvtColor(imageSuperSmooth,imageSmoothHSV,CV_BGR2HSV);
	cvCvtPixToPlane(imageSuperSmooth,NULL,NULL,planeSmoothV,0);
	cvCvtPixToPlane(imageHSV, planeH,planeS,planeV,0);//Extract the 3 color components
	cvSetImageROI(imageHSV,cvRect(0,imageHSV->height/3,imageHSV->width,imageHSV->height*2/3));
	IplImage* planeH1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Hue
	IplImage* planeS1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Saturation
	IplImage* planeV1 = cvCreateImage(cvGetSize(imageHSV),8,1); //Brightness
	cvCvtPixToPlane(imageHSV, planeH1,planeS1,planeV1,0);//Extract the 3 color components
	cvResetImageROI(imageHSV);
	
	
	cvShowImage("Dark_Value",planeV);
	cvShowImage("Dark_Sat",planeS);
	cvShowImage("Dark_Hue",planeH);
	cvSet(obsdetmask, cvScalar(0,0,0));
	cv::waitKey(3);
	
	
	int maxDark = 0;
	int minDark = 255;
	int minDarknessValue=0;
	int maxDarknessValue = 0;
	int midDarknessValue = 0;
	//Filter image for desired Color, output image with only desired color highlighted remaining
	for( int y = 0; y < planeH1->height; y++ ){
		unsigned char* h = &CV_IMAGE_ELEM( planeH1, unsigned char, y, 0 );
		unsigned char* s = &CV_IMAGE_ELEM( planeS1, unsigned char, y, 0 );
		unsigned char* v = &CV_IMAGE_ELEM( planeV1, unsigned char, y, 0 );
		for( int x = 0; x < planeH1->width*planeH1->nChannels; x += planeH1->nChannels ){
		  //if(x<5){ROS_INFO("hsv[x] is %d,%d,%d",h[x],v[x],x]);}
			//int f= HSV_filter(h[x],s[x],v[x],threshold,minDarknessValue,maxDarknessValue,midDarknessValue,hue,sat,val);
			int diff = abs((h[x]-hue));
			if(((diff < threshold)||(v[x]<MIN_BRIGHT)||(s[x]<MIN_SAT)))
			{ 
			  ((uchar *)(obsdetmask->imageData + (y+planeH->height-planeH1->height)*obsdetmask->widthStep))[x]=255;
			   if(v[x]<minDark)
			   {minDark=v[x];}
			    if(v[x]>maxDark)
			    {maxDark=v[x];}
			}
			else
			{
			  ((uchar *)(obsdetmask->imageData + (y+planeH->height-planeH1->height)*obsdetmask->widthStep))[x]=0;
			}
		}
	}//debug
	cvDilate(obsdetmask,obsdetmask_dil,NULL,1);
	cvShowImage("Dark_ObsDetPre",obsdetmask_dil);
	mask_bls = CBlobResult(obsdetmask_dil,NULL,0);
	mask_bls.Filter(mask_bls,B_EXCLUDE,CBlobGetArea(),B_LESS,MASK_MIN_BLOB); // Filter Blobs with min and max size
	mask_bls.GetNthBlob( CBlobGetArea(), 0, mask_bl );
	cvSet(obsdetmask_b, cvScalar(0,0,0));
	mask_bl.FillBlob(obsdetmask_b,CV_RGB(255,255,255));
	cvDilate(obsdetmask_b,obsdetmask_bdil,NULL,5);
	cvShowImage("Dark_ObsDet",obsdetmask_bdil);
	cvWaitKey(3);
	minDarknessValue=((maxDark-minDark)*LOW_PERCENT)+minDark;
	if(minDarknessValue<VALUE_LOW_LIM){minDarknessValue=VALUE_LOW_LIM;}
	maxDarknessValue=(maxDark)-((maxDark-minDark)*HIGH_PERCENT);
	midDarknessValue = .5*(minDarknessValue+maxDarknessValue);
	ROS_INFO("minDark = %d, maxDark = %d, minDV = %d, maxDV = %d",minDark,maxDark,minDarknessValue,maxDarknessValue);
	for( int y = 0; y < planeH->height; y++ ){
		unsigned char* h = &CV_IMAGE_ELEM( planeH, unsigned char, y, 0 );
		unsigned char* s = &CV_IMAGE_ELEM( planeS, unsigned char, y, 0 );
		unsigned char* v = &CV_IMAGE_ELEM( planeV, unsigned char, y, 0 );
		unsigned char* m = &CV_IMAGE_ELEM( obsdetmask_bdil, unsigned char, y, 0 );
		for( int x = 0; x < planeH->width*planeH->nChannels; x += planeH->nChannels ){
		  //if(x<5){ROS_INFO("hsv[x] is %d,%d,%d",h[x],v[x],x]);}
			 int f = HSV_filter(h[x],s[x],v[x],m[x],threshold,minDarknessValue,maxDarknessValue,midDarknessValue,hue,sat,val);
			 if((f==0))//Non-floor
			 {
				 ((uchar *)(i1->imageData + y*i1->widthStep))[x]=0;
				 ((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=0;
				 ((uchar *)(i2->imageData + y*i2->widthStep))[x]=0;
			 }
			 else if(f==1)	//dark
			 {
				((uchar *)(i1->imageData + y*i1->widthStep))[x]=255;
				((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=64;
				((uchar *)(i2->imageData + y*i2->widthStep))[x]=0;
			 }
			 else if(f==2)
			 {
				((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=128;
				((uchar *)(i1->imageData + y*i1->widthStep))[x]=0;
				((uchar *)(i2->imageData + y*i2->widthStep))[x]=0;
			 }
			 else if(f==3)
			 {
				((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=196;
				((uchar *)(i1->imageData + y*i1->widthStep))[x]=0;
				((uchar *)(i2->imageData + y*i2->widthStep))[x]=0;
			   
			 }
			 else if(f==4)	//bright
			 {
				 ((uchar *)(i_ts->imageData + y*i_ts->widthStep))[x]=255;
				 ((uchar *)(i1->imageData + y*i1->widthStep))[x]=0;
				 ((uchar *)(i2->imageData + y*i2->widthStep))[x]=255;
			 }else{	
			   
			 }
		}
	}

	
	cvShowImage("Dark_Triscale",i_ts);
	cvWaitKey(3);
	//Blob stuff
	blobs = CBlobResult(i1,NULL,0);   //Get blobs of image
	blobs1 =CBlobResult(i2,NULL,0);
	blobs.Filter(blobs,B_INCLUDE,CBlobGetArea(),B_INSIDE,blobLowLimit,blobHighLimit);  // Filter Blobs with min and max size
	blobs1.Filter(blobs1,B_INCLUDE,CBlobGetArea(),B_INSIDE,blobLowLimit,blobHighLimit);
	//Set up data array
	xCent = new int[blobs.GetNumBlobs()+blobs1.GetNumBlobs()];
	yCent = new int[blobs.GetNumBlobs()+blobs1.GetNumBlobs()];
	valCent = new int[blobs.GetNumBlobs()+blobs1.GetNumBlobs()];
	
	ROS_INFO("size:%d  ",blobs.GetNumBlobs()+blobs1.GetNumBlobs());
	double data;
	if(maxDark>190)
	{
	 data=blobs.GetNumBlobs()+blobs1.GetNumBlobs();// Set first data value to total number of blobs
	//cout<<data[0]<<"  ";
	int k=0;
	//ROS_INFO("Blobs gotten.");
	cvWaitKey(3);
	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{ // Get Blob Data 
	    blob = blobs.GetBlob(i);//cycle through each blob
		//data[i*3+1]=blob.area;//blob areaEFF
		xCent[i]= getXCenter(blob); //X min
		yCent[i]= getYCenter(blob); //X max	
		valCent[i]= 1; //Y max 
		//debug
		blob.FillBlob(local_copy, cvScalar(255, 0, 0)); // This line will give you a visual marker on image for the blob if you want it for testing or something
      }    
      //ROS_INFO("loop 1 done.");
      cvWaitKey(3);
      for (int i = 0; i < blobs1.GetNumBlobs(); i++ )
      { // Get Blob Data 
	      blob = blobs1.GetBlob(i);//cycle through each blob
		  //data[i*3+1]=blob.area;//blob area
		xCent[blobs.GetNumBlobs()+i]= getXCenter(blob); //X min
		yCent[blobs.GetNumBlobs()+i]= getYCenter(blob); //X max
		valCent[blobs.GetNumBlobs()+i]= -1;

		  //debug
		  blob.FillBlob(local_copy, cvScalar(0, 255, 0)); // This line will give you a visual marker on image for the blob if you want it for testing or something
      }    
	  
	}else{
    //
	data=blobs.GetNumBlobs();// Set first data value to total number of blobs
	//cout<<data[0]<<"  ";
	int k=0;
	//ROS_INFO("Blobs gotten.");
	cvWaitKey(3);
	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{ // Get Blob Data 
	    blob = blobs.GetBlob(i);//cycle through each blob
		//data[i*3+1]=blob.area;//blob areaEFF
		xCent[i]= getXCenter(blob); //X min
		yCent[i]= getYCenter(blob); //X max
		valCent[i]= 1; //Y max 
		//debug
		blob.FillBlob(local_copy, cvScalar(255, 0, 0)); // This line will give you a visual marker on image for the blob if you want it for testing or something
      }    
   
   
      }
   cvShowImage("Dark_Detected",local_copy);
    //cv::imshow("View",cv_ptr->image);
    cv::waitKey(3);
    
    
      cvReleaseImage(&local_copy);
      cvReleaseImage(&imageSmooth);
      cvReleaseImage(&imageSuperSmooth);
      cvReleaseImage(&imageHSV);
      cvReleaseImage(&i1);
      cvReleaseImage(&i2);
      cvReleaseImage(&planeSmoothV);
      cvReleaseImage(&imageSmoothHSV);
      cvReleaseImage(&i_ts);
      cvReleaseImage(&planeH);
      cvReleaseImage(&planeS);
      cvReleaseImage(&planeV);
      cvReleaseImage(&planeH1);
      cvReleaseImage(&planeS1);
      cvReleaseImage(&planeV1);
      cvReleaseImage(&obsdetmask);
      cvReleaseImage(&obsdetmask_dil);
      cvReleaseImage(&obsdetmask_b);
      cvReleaseImage(&obsdetmask_bdil);
      return data; //return pointer to data array
}
Beispiel #13
0
	void locator()
	{
		namedWindow("Tracking");
		int hMin, hMax, sMin, sMax, vMin, vMax,area_min;
		hMin = 0;
		//hMax = 124; // night values/???
		hMax = 255;
		//sMin = 95;
		sMin = 126;
		sMax = 255;
		//vMin = 139;
		vMin = 173;
		vMax = 255;
		area_min = 100;
		Mat smoothed, hsvImg, t_img;
		createTrackbar("blob min area","Tracking" ,&area_min ,1000);
		createTrackbar("Hue Min", "Tracking", &hMin, 255);
		createTrackbar("Hue Max", "Tracking", &hMax, 255);
		createTrackbar("Sat Min", "Tracking", &sMin, 255);
		createTrackbar("Sat Max", "Tracking", &sMax, 255);
		createTrackbar("Val Min", "Tracking", &vMin, 255);
		createTrackbar("Val MaX", "Tracking", &vMax, 255);
		while(ros::ok())
		{
			Mat source = imageB;
			Mat copy = imageB.clone();
			GaussianBlur(source, smoothed, Size(9,9), 4);
			cvtColor(smoothed, hsvImg, CV_BGR2HSV);
			inRange(hsvImg, Scalar(hMin, sMin, vMin), Scalar(hMax, sMax, vMax), t_img);

			CBlobResult blob;
			IplImage i_img = t_img;
			blob = CBlobResult(&i_img,NULL,0);
			int num_blobs = blob.GetNumBlobs();

			blob.Filter(blob, B_INCLUDE, CBlobGetArea(), B_INSIDE, area_min, blob_area_absolute_max_);
			num_blobs = blob.GetNumBlobs();

			std::string reference_frame = "/virtual_table"; // Table frame at ball_radius above the actual table plane

			tf::StampedTransform transform;
			tf_.waitForTransform(reference_frame, model.tfFrame(), ros::Time(0), ros::Duration(0.5));
			tf_.lookupTransform(reference_frame, model.tfFrame(), ros::Time(0), transform);

			for(int i =0;i<num_blobs;i++)
			{
				CBlob* bl = blob.GetBlob(i);
				Point2d uv(CBlobGetXCenter()(*bl), CBlobGetYCenter()(*bl));
				//Use the width as the height
				uv.y = bl->MinY() + (bl->MaxX() - bl->MinX()) * 0.5;
				circle(copy,uv,50,Scalar(255,0,0),5);

				cv::Point3d xyz;
				model.projectPixelTo3dRay(uv, xyz);
		
				// Intersect ray with plane in virtual table frame
				//Origin of camera frame wrt virtual table frame
				tf::Point P0 = transform.getOrigin();
				//Point at end of unit ray wrt virtual table frame
				tf::Point P1 = transform * tf::Point(xyz.x, xyz.y, xyz.z);
				// Origin of virtual table frame
				tf::Point V0 = tf::Point(0.0,0.0,0.0);
				// normal to the table plane
				tf::Vector3 n(0, 0, 1);
				// finding scaling value
				double scale = (n.dot(V0-P0))/(n.dot(P1-P0));
				tf::Point ball_pos = P0 + (P1-P0)*scale;
				cout <<ball_pos.x() << " " << ball_pos.y() << " " << ball_pos.z() <<endl;
			}
			imshow(WINDOW, copy);
			waitKey(3);

			imshow("edited", t_img);
			waitKey(3);

			ros::spinOnce();
		}
	}
IplImage* blobDetection2(IplImage* imgThreshRed, IplImage* imgThreshGreen) {
    // get blobs and filter them using its area
    int i, j;
    //  int areaBlob = 100;
    float distMark = 10;
    CBlobResult blobsRed, blobsGreen, whiteRedBlobs, whiteGreenBlobs;
    CBlob *currentBlob;
    double px, py;

    // Create Image
    IplImage* displayedImage = cvCreateImage(cvGetSize(imgThreshRed), IPL_DEPTH_8U, 3);

    // find all the RED related blobs in the image
    blobsRed = CBlobResult(imgThreshRed, NULL, 0);
    // find all the GREEN related blobs in the image
    blobsGreen = CBlobResult(imgThreshGreen, NULL, 0);

    // select the ones with mean gray-level equal to 255 (white) and put
    // them in the whiteBlobs variable
    blobsRed.Filter(whiteRedBlobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 1.0);
    blobsGreen.Filter(whiteGreenBlobs, B_EXCLUDE, CBlobGetArea(), B_LESS, 1.0);

#ifdef DEBUG_PRINT    
    printf("White Blobs: %d\n", whiteBlobs.GetNumBlobs());
#endif

    // display filtered blobs
    cvMerge(imgThreshRed, imgThreshRed, imgThreshRed, NULL, displayedImage);

    // RED
    CvPoint2D32f redCenter[whiteRedBlobs.GetNumBlobs()];

    for (i = 0; i < whiteRedBlobs.GetNumBlobs(); i++) {
        currentBlob = whiteRedBlobs.GetBlob(i);
        px = (currentBlob->MaxX() + currentBlob->MinX()) / 2.0;
        py = (currentBlob->MaxY() + currentBlob->MinY()) / 2.0;
        redCenter[i] = cvPoint2D32f(px, py);

#ifdef DEBUG_PRINT    
        printf("%2.2f\t%2.2f\n", px, py);
#endif

        if (currentBlob->Area() > areaBlob) {
            // Add Cross to the image
            currentBlob->FillBlob(displayedImage, CV_RGB(255, 0, 0));
            cvCircle(displayedImage, cvPointFrom32f(redCenter[i]), 2, cvScalar(255, 0, 0), 10, 8, 0);
        }
    }

    // GREEN
    CvPoint2D32f greenCenter[whiteGreenBlobs.GetNumBlobs()];

    for (i = 0; i < whiteGreenBlobs.GetNumBlobs(); i++) {
        currentBlob = whiteGreenBlobs.GetBlob(i);
        px = (currentBlob->MaxX() + currentBlob->MinX()) / 2.0;
        py = (currentBlob->MaxY() + currentBlob->MinY()) / 2.0;
        greenCenter[i] = cvPoint2D32f(px, py);

#ifdef DEBUG_PRINT    
        printf("%2.2f\t%2.2f\n", px, py);
#endif

        if (currentBlob->Area() > areaBlob) {
            // Add Cross to the image
            currentBlob->FillBlob(displayedImage, CV_RGB(255, 0, 0));
            cvCircle(displayedImage, cvPointFrom32f(greenCenter[i]), 2, cvScalar(0, 255, 0), 10, 8, 0);
        }
    }

    // Populating the list of potential robots
    RobotList potRobList;
    potRobList.robNum = 0;

    for (i = 0; i < robMax; i++)
        potRobList.robList[i].active = 0;

    int redUsage[whiteRedBlobs.GetNumBlobs()];
    int greenUsage[whiteGreenBlobs.GetNumBlobs()];

    for (i = 0; i < whiteRedBlobs.GetNumBlobs(); i++)
        redUsage[i] = 0;

    for (j = 0; j < whiteGreenBlobs.GetNumBlobs(); j++)
        greenUsage[j] = 0;



    // Detect Robots
    float distCenter[whiteRedBlobs.GetNumBlobs()][whiteGreenBlobs.GetNumBlobs()];
    for (i = 0; i < min(whiteRedBlobs.GetNumBlobs(), robMax); i++) {
        currentBlob = whiteRedBlobs.GetBlob(i);
        if (currentBlob->Area() > areaBlob) {
            for (j = 0; j < min(whiteGreenBlobs.GetNumBlobs(), robMax); j++) {
                currentBlob = whiteGreenBlobs.GetBlob(j);
                if (currentBlob->Area() > areaBlob) {
                    distCenter[i][j] = computeDist(redCenter[i], greenCenter[j]);
                    //printf("[%d] - [%d]: %2.2f\n", i, j, distCenter[i][j]);
                    //printf("[%d] - [%d]: %2.2f\n", i, j, distCenter[i][j]);
                    // Print a connection line if this could be a robot
                    if (redUsage[i] == 0 && greenUsage[j] == 0 && checkDistMarker(distCenter[i][j], distMark)) {
                        cvLine(displayedImage, cvPointFrom32f(redCenter[i]), cvPointFrom32f(greenCenter[j]), cvScalar(0, 255, 255), 2, 8, 0);
                        // Check Robot
                        potRobList.robList[potRobList.robNum] = createRobot(redCenter[i], greenCenter[j]);

                        potRobList.robNum++;
                        redUsage[i] = 1;
                        greenUsage[j] = 1;
                        //                        printRobot(potRobList.robList[potRobList.robNum - 1]);


                        CvBox2D tmp;
                        tmp.angle = potRobList.robList[potRobList.robNum - 1].orientation;
                        tmp.center = potRobList.robList[potRobList.robNum - 1].center;
                        tmp.size = cvSize2D32f(30, 50);
                        cvEllipseBox(displayedImage, tmp, cvScalar(255, 255, 0), 4, 3, 0);
                        //			printRobot(potRobList.robList[potRobList.robNum-1]);

                    }

                }
            }
        }
    }


    // Matching The List of Potential Robots with previous List of Robots
    //    updateRobotListAndrea(&avRobList, potRobList);
    updateRobotList(&avRobList, potRobList);
     makelistRobot();
   for (int i=0; i<robMax;i++)
    printf("Robot %d : (%f, %f, %d)\n", i, avRobList.robList[i].coord.x, avRobList.robList[i].coord.y, avRobList.robList[i].lost);

    /*
        // Print robots
        for (i = 0; i < robMax; i++) {
            if (avRobList.robList[i].active == 1) {
                CvBox2D tmp;
                tmp.angle = avRobList.robList[i].orientation;
                tmp.center = avRobList.robList[i].center;
                tmp.size = cvSize2D32f(50, 30);
                cvEllipseBox(displayedImage, tmp, cvScalar(255, 255, 0), 4, 3, 0);
                printRobot(avRobList.robList[i]);
            }
        }
     */



    /* Control Law */

    return displayedImage;



}
Beispiel #15
0
/* Find the center of a given blob. */
CvPoint MarkerCapture::blob_center(CBlob blob){
    CvPoint point;
    point.x = blob.GetBoundingBox().x + (blob.GetBoundingBox().width / 2);
    point.y = blob.GetBoundingBox().y + (blob.GetBoundingBox().height / 2);
    return point;
}
Beispiel #16
0
float thresholdSegmentation(Rect r, ntk::RGBDImage* current_frame, Mat& dst){
	Mat depth = current_frame->depth();
	Rect& rr = r;
	Mat depthROI = depth(rr), maskROI;
	Mat& rDepthROI = depthROI, &rMaskROI = maskROI;
	double var = 0.3;

	// maskROI for nonZero values in the Face Region
	inRange(depthROI, Scalar::all(0.001), Scalar::all(255), maskROI);
	// Mean depth of Face Region
	Scalar mFace = cv::mean(rDepthROI, rMaskROI);
	//mFace[0]  = mFace[0] - mFace[0] * var;
	inRange(depthROI, Scalar::all(0.001), mFace, maskROI);
	mFace = cv::mean(rDepthROI, rMaskROI);
	//inRange(depthROI, Scalar::all(0.001), mFace, maskROI);
	//mFace = cv::mean(rDepthROI, rMaskROI);
	


	
	// Mask for nearer than the mean of face.
	inRange(depth, Scalar::all(0.001), mFace, dst);
	Mat rgbImage = current_frame->rgb();
	Mat outFrame = cvCreateMat(rgbImage.rows, rgbImage.cols, CV_32FC3);
	rgbImage.copyTo(outFrame, dst);
	Mat outFrameROI;
	outFrameROI = outFrame(rr);
	//cvCopy(&rgbImage, &outFrame, &dst);
	//rgbImageROI = rgbImageROI(rr);
	
	imshow("ROI", outFrameROI);
	//imshow("thresholdSeg", dst);

	// For debug of cvblobslib
	// Display the color image	

	//imshow("faceRIO", maskROI);
	imshow("faceRIO", outFrameROI);
	bool iswrite;
	const int nchannel = 1;
	vector<Rect> faces;
	//iswrite = imwrite("faceROI.png", maskROI);
	iswrite = imwrite("faceROI.png", outFrameROI);
	//iswrite = cvSaveImage("faceROI.jpeg", pOutFrame, &nchannel);

	// ---- blob segmentation on maskROI by using cvblobslib ----
	// ---		Third Trial	---
	//visualizeBlobs("faceROI.png", "faceRIO");




	// ---		First Trial Not Successful		---
	//Mat maskROIThr=cvCreateMat(maskROI.rows, maskROI.cols, CV_8UC1);	
	//maskROIThr = maskROI;
	//IplImage imgMaskROIThr = maskROIThr;
	//IplImage* pImgMaskROIThr = &imgMaskROIThr;
	//cvThreshold(pImgMaskROIThr, pImgMaskROIThr, 0.1, 255, CV_THRESH_BINARY_INV);

	// ---		Second Trial	---
	IplImage* original = cvLoadImage("faceROI.png", 0);
	IplImage* originalThr = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U, 1);
	IplImage* displayBiggestBlob = cvCreateImage(cvGetSize(original), IPL_DEPTH_8U, 3);
	CBlobResult blobs;
	CBlob biggestBlob;
	//IplImage source = maskROIThr;	IplImage* pSource = &source;
	//blobs = CBlobResult(
	cvThreshold(original, originalThr, 0.1, 255, CV_THRESH_BINARY_INV);
	blobs =  CBlobResult( originalThr, NULL, 255);
	printf("%d blobs \n", blobs.GetNumBlobs());
	blobs.GetNthBlob(CBlobGetArea(), 0, biggestBlob);
	biggestBlob.FillBlob(displayBiggestBlob, CV_RGB(255, 0, 0));

	// Drawing the eclipse and Rect on the blob
	Mat mat(displayBiggestBlob);

	cv::RotatedRect blobEllipseContour;
	cv::Rect blobRectContour;
	//RotatedRect blobEllipseContour;
	blobEllipseContour = biggestBlob.GetEllipse();
	blobRectContour = biggestBlob.GetBoundingBox();
	//cv::ellipse(
	cv::ellipse(mat, blobEllipseContour, cv::Scalar(0,255, 0), 3, CV_AA);
	cv::rectangle(mat, blobRectContour, cv::Scalar(255, 0, 0), 3, CV_AA);
	//cv::ellipse(mat, blobEllipseContour);
	float headOritation = blobEllipseContour.angle;
	if (headOritation <= 180)
		headOritation = headOritation - 90;
	else
		headOritation = headOritation - 270;
	cv::putText(mat,
			cv::format("%f degree", headOritation),
			Point(10,20), 0, 0.5, Scalar(255,0,0,255));

	cv::imshow("faceRIO", mat);
	return(headOritation);
}
Beispiel #17
0
/**
- FUNCTION: Moment
- FUNCTIONALITY: Calculates the pq moment of the blob
- PARAMETERS:
- RESULT:
	- returns the pq moment or 0 if the moment it is not implemented
- RESTRICTIONS:
	- Currently, implemented moments up to 3
- AUTHOR: Ricard Borràs
- CREATION DATE: 20-07-2004.
- MODIFICATION: Date. Author. Description.
*/
double CBlobGetMoment::operator()(CBlob &blob)
{
	return blob.Moment(m_p, m_q);
}
Beispiel #18
0
void iptask::markerDetect(void)
{
     IplImage * frame,*img_hsv,*img_proc,* new1;
     CvMemStorage * storage = cvCreateMemStorage(0);
     ros::NodeHandle n;
     ros::Publisher marker = n.advertise<ikat_ip_data::ip_marker_data>("marker_data",3);
     ros::Rate looprate(5);
     int count = 0;
     CvSeq * contours,*final_contour;
     int total_con;
     double maxarea;
     marker_data * Data =(marker_data *)malloc(sizeof(marker_data));
     CBlobResult blobs;
     CBlob * currentblob;
     CvPoint2D32f vertices[4];
     //CvCapture * img_video=cvCaptureFromAVI("downward-pipe-15_56_17.avi");
     frame=cvQueryFrame(img);
     cvNamedWindow("Image Actual");
     cvNamedWindow("final Image");
     img_hsv=cvCreateImage(cvGetSize(frame),8,3);
     img_proc=cvCreateImage(cvGetSize(frame),8,1);
     new1=cvCreateImage(cvGetSize(frame),8,1);
     while(ros::ok())
     {
         ikat_ip_data::ip_marker_data msg;
         IplImage * img_con=cvCreateImage(cvGetSize(frame),8,1);
         frame=cvQueryFrame(img);
         if(!frame)
                 break;
         cvShowImage("Image Actual",frame);
         cvCvtColor(frame,img_hsv,CV_RGB2HSV);
         cvInRangeS(img_hsv,cvScalar(100,100,100),cvScalar(120,170,255),img_proc);
         cvSmooth(img_proc,img_proc,CV_GAUSSIAN,11,11);
         cvErode(img_proc,img_proc);
         blobs=CBlobResult(img_proc,NULL,0);
         blobs.Filter(blobs,B_EXCLUDE,CBlobGetArea(),B_LESS,75);
         for (int i = 0; i < blobs.GetNumBlobs(); i++ )
         {
                 currentblob = blobs.GetBlob(i);
                 currentblob->FillBlob(img_proc,cvScalar(255));
         }
         cvCanny(img_proc,img_proc,10,200);
         total_con=cvFindContours(img_proc,storage,&contours,sizeof(CvContour),CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
         if(contours->total==0)
             continue;
         final_contour=cvApproxPoly(contours,sizeof(CvContour),storage,CV_POLY_APPROX_DP,1,1);
         maxarea=0;
         cvZero(img_con);
         CvBox2D rect;
         while(final_contour)
         {
              rect=cvMinAreaRect2(final_contour, storage);
              if(rect.size.height*rect.size.width>maxarea)
              {
                  Data->center.x=rect.center.x;
                  Data->center.y=rect.center.y;
                  Data->size.x=rect.size.width;
                  Data->size.y=rect.size.height;
                  Data->angle=rect.angle;
                  maxarea=rect.size.height*rect.size.width;
                  msg.Marker_data[0]=Data->center.x;
                  msg.Marker_data[1]=Data->center.y;
                  msg.Marker_data[2]=Data->angle;
              }
              final_contour=final_contour->h_next;
         }
         cvBoxPoints(rect,vertices);
         cvLine(frame,cvPointFrom32f(vertices[0]),cvPointFrom32f(vertices[1]),cvScalarAll(255),2);
         cvLine(frame,cvPointFrom32f(vertices[1]),cvPointFrom32f(vertices[2]),cvScalarAll(255),2);
         cvLine(frame,cvPointFrom32f(vertices[2]),cvPointFrom32f(vertices[3]),cvScalarAll(255),2);
         cvLine(frame,cvPointFrom32f(vertices[3]),cvPointFrom32f(vertices[0]),cvScalarAll(255),2);
         ROS_INFO("center x :[%f]",msg.Marker_data[0]);
         ROS_INFO("center y :[%f]",msg.Marker_data[1]);
         ROS_INFO("angle : [%f]",msg.Marker_data[2]);
         marker.publish(msg);
         cvShowImage("final Image",frame);
         char c=cvWaitKey(33);
         if  (c==27)
         break;
         ros::spinOnce();
         ++count;
         looprate.sleep();
     }
     cvDestroyWindow("Image Actual");
     cvDestroyWindow("final Image");
     free(Data);
}
Beispiel #19
0
 int main() {
  CvPoint pt1,pt2;
  CvRect regt;
  CvPoint cir_center;
  CvPoint frame_center;
  CvPoint A,B,C,D;
  CvPoint temp;
  double angle,spinsize;
  int cir_radius=1; 
  int frame_width=160, frame_height=120;
   CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
   if ( !capture ) {
     fprintf(stderr, "ERROR: capture is NULL \n" );
     getchar();
     return -1;
   }
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,frame_width);// 120x160 
  cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,frame_height);
  //cvSetCaptureProperty(capture, CV_CAP_PROP_FPS,10);
//  cvSetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES,5);  
 // Create a window in which the captured images will be presented
   cvNamedWindow( "mywindow", CV_WINDOW_AUTOSIZE );
   // Show the image captured from the camera in the window and repeat
   while ( 1 ) {
     // Get one frame
     IplImage* frame = cvQueryFrame( capture );
     if ( !frame ) {
       fprintf( stderr, "ERROR: frame is null...\n" );
       getchar();
       break;
     }
     int modfheight, modfwidth;

     modfheight = frame->height;
     modfwidth = frame->width;
     // create modified frame with 1/4th the original size
     IplImage* modframe = cvCreateImage(cvSize((int)(modfwidth/4),(int)(modfheight/4)),frame->depth,frame->nChannels); //cvCreateImage(size of frame, depth, noofchannels)
     cvResize(frame, modframe,CV_INTER_LINEAR);
     // create HSV(Hue, Saturation, Value) frame
     IplImage* hsvframe = cvCreateImage(cvGetSize(modframe),8, 3);
     cvCvtColor(modframe, hsvframe, CV_BGR2HSV); //cvCvtColor(input frame,outputframe,method)
     // create a frame within threshold.
     IplImage* threshframe = cvCreateImage(cvGetSize(hsvframe),8,1);
     cvInRangeS(hsvframe,cvScalar(15, 100, 100),cvScalar(60, 220, 220),threshframe); //cvInRangeS(input frame, cvScalar(min range),cvScalar(max range),output frame)
     // created dilated image
     IplImage* dilframe = cvCreateImage(cvGetSize(threshframe),8,1);
     cvDilate(threshframe,dilframe,NULL,2); //cvDilate(input frame, output frame, mask, number of times to dilate)

     CBlobResult blobs;
     blobs = CBlobResult(dilframe,NULL,0); // CBlobresult(inputframe, mask, threshold) Will filter all white parts of image
     blobs.Filter(blobs,B_EXCLUDE,CBlobGetArea(),B_LESS,50);//blobs.Filter(input, cond, criteria, cond, const) Filter all images whose area is less than 50 pixels
     CBlob biggestblob;
     blobs.GetNthBlob(CBlobGetArea(),0,biggestblob); //GetNthBlob(criteria, number, output) Get only the largest blob based on CblobGetArea()
     // get 4 points to define the rectangle
     pt1.x = biggestblob.MinX()*4;
     pt1.y = biggestblob.MinY()*4;
     pt2.x = biggestblob.MaxX()*4;
     pt2.y = biggestblob.MaxY()*4;
     cir_center.x=(pt1.x+pt2.x)/2;
     cir_center.y=(pt1.y+pt2.y)/2;
     frame_center.x=frame_width/2;
     frame_center.y=frame_height/2;
     A.x=frame_center.x-4;
     A.y=frame_center.y;
     B.x=frame_center.x+4;
     B.y=frame_center.y;
     C.y=frame_center.y-4;
     C.x=frame_center.x;
     D.y=frame_center.y+4;
     D.x=frame_center.x;
     cvRectangle(frame,pt1,pt2,cvScalar(255,0,0),1,8,0); // draw rectangle around the biggest blob
     cvCircle( frame, cir_center, cir_radius, cvScalar(0,255,255), 1, 8, 0 ); // center point of the rectangle
     cvLine(frame, A, B,cvScalar(255,0,255),2,8,0);
     cvLine(frame, C, D,cvScalar(255,0,255),2,8,0);
     if (cir_center.x!=0&&cir_center.y!=0){
     spinsize=sqrt((cir_center.x-frame_center.x)*(cir_center.x-frame_center.x) +(cir_center.y-frame_center.y)*(cir_center.y-frame_center.y));
     angle = atan2((double)cir_center.y-frame_center.y,(double)cir_center.x-frame_center.x);
     temp.x=(int)(frame_center.x+spinsize/5*cos(angle+3.1416/4));
     temp.y=(int)(frame_center.y+spinsize/5*sin(angle+3.1415/4));
     cvLine(frame, temp, frame_center,cvScalar(0,255,0),1,8,0);	

     temp.x=(int)(frame_center.x+spinsize/5*cos(angle-3.1416/4));
     temp.y=(int)(frame_center.y+spinsize/5*sin(angle-3.1415/4));
     cvLine(frame, temp, frame_center,cvScalar(0,255,0),1,8,0);	
	
     cvLine(frame, cir_center, frame_center,cvScalar(0,255,0),1,8,0);

     //cvCircle( frame, frame_center, cir_radius, cvScalar(0,255,255), 2, 8, 0 );
}
     cvShowImage( "mywindow", frame); // show output image
     // Do not release the frame!
     //If ESC key pressed, Key=0x10001B under OpenCV 0.9.7(linux version),
     //remove higher bits using AND operator
     if ( (cvWaitKey(10) & 255) == 27 ) break;
   }
   // Release the capture device housekeeping
   cvReleaseCapture( &capture );
   cvDestroyWindow( "mywindow" );
   return 0;
 }
Beispiel #20
0
void
Auvsi_Recognize::extractShape( void )
{
	typedef cv::Vec<T, 1> VT;

	// Reduce input to two colors
	cv::Mat reducedColors = doClustering<T>( _image, 2 );	
	cv::Mat grayScaled, binary;

	// Make output grayscale
	grayScaled = convertToGray( reducedColors );
	//cv::cvtColor( reducedColors, grayScaled, CV_RGB2GRAY );

	// Make binary
	double min, max;
	cv::minMaxLoc( grayScaled, &min, &max );
	cv::threshold( grayScaled, binary, min, 1.0, cv::THRESH_BINARY );	

	// ensure that background is black, image white
	if( binary.at<VT>(0, 0)[0] > 0.0f )
		cv::threshold( grayScaled, binary, min, 1.0, cv::THRESH_BINARY_INV );

	binary.convertTo( binary, CV_8U, 255.0f );

	// Fill in all black regions smaller than largest black region with white
	CBlobResult blobs;
	CBlob * currentBlob;
	IplImage binaryIpl = binary;
	blobs = CBlobResult( &binaryIpl, NULL, 255 );
	
	// Get area of biggest blob
	CBlob biggestBlob;
	blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );

	// Remove all blobs of smaller area
	blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() );

	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{
    	currentBlob = blobs.GetBlob(i);
		currentBlob->FillBlob( &binaryIpl, cvScalar(255));
	}
	


	// Fill in all small white regions black 
	blobs = CBlobResult( &binaryIpl, NULL, 0 );
	blobs.GetNthBlob( CBlobGetArea(), 0, biggestBlob );

	blobs.Filter( blobs, B_EXCLUDE, CBlobGetArea(), B_GREATER_OR_EQUAL, biggestBlob.Area() );

	for (int i = 0; i < blobs.GetNumBlobs(); i++ )
	{
    	currentBlob = blobs.GetBlob(i);
		currentBlob->FillBlob( &binaryIpl, cvScalar(0));
	}

	binary = cv::Scalar(0);
	biggestBlob.FillBlob( &binaryIpl, cvScalar(255));

	_shape = binary;
}