Ejemplo n.º 1
0
/*!
  Initialize any OpenCV parameters.
*/
void vpKeyPointSurf::init()
{
#if (VISP_HAVE_OPENCV_VERSION >= 0x020400) // Require opencv >= 2.4.0
  cv::initModule_nonfree();
#endif

  storage = cvCreateMemStorage(0);
  params = cvSURFParams(hessianThreshold, descriptorType);
}
Ejemplo n.º 2
0
int main(int argc, char** argv)
{
    const char* filename = argv[1];
    int x;
    int y;
    int size;
    sscanf(argv[2], "%d", &x);
    sscanf(argv[3], "%d", &y);
    sscanf(argv[4], "%d", &size);
    
    int SURF_EXTENDED = 0;
    int SURF_HESSIAN_THRESHOLD = 500;
    int SURF_NOCTAVES = 2;
    int SURF_NOCTAVELAYERS = 2; 
    int MATCH_THRESHOLD = 20;  

    CvSURFParams params = cvSURFParams(SURF_HESSIAN_THRESHOLD, SURF_EXTENDED);
    params.nOctaves=SURF_NOCTAVES;
    params.nOctaveLayers=SURF_NOCTAVELAYERS;
    CvMemStorage* storage = cvCreateMemStorage(0);
    IplImage* image = cvLoadImage(filename, CV_LOAD_IMAGE_GRAYSCALE );
    CvSeq *objectKeypoints = 0, *objectDescriptors = 0;

    // set up the keypoint
    int useProvidedKeyPoints = 1;
    CvMemStorage* kp_storage = cvCreateMemStorage(0);
    CvSeq* surf_kp = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvSURFPoint), kp_storage);
    int laplacian = 1; 
    int direction = 0; 
    int hessian = SURF_HESSIAN_THRESHOLD+1; 
    CvSURFPoint point = cvSURFPoint(cvPoint2D32f(x, y), laplacian, size, direction, hessian);
    cvSeqPush(surf_kp, &point);

    // extract descriptor
    cvExtractSURF(image, 0, &surf_kp, &objectDescriptors, storage, params, useProvidedKeyPoints);

    // print to stdout
    /*
    // if keypoint info also wanted
    CvSeqReader kp_reader;
    cvStartReadSeq(surf_kp, &kp_reader);
    const CvSURFPoint* kp = (const CvSURFPoint*)kp_reader.ptr;
    printf("%.2f %.2f %.2f %d %d",kp->pt.x,kp->pt.y,kp->hessian,kp->laplacian,kp->size);
    */
    
    const float* des = (const float*)cvGetSeqElem(objectDescriptors, 0);
    for (int i=0;i<64;i++){
        printf("%.5f ",des[i]);
    } 
    cvReleaseImage( &image );
}
Ejemplo n.º 3
0
void SurfFD::extractKeypoints(cv::Mat* m)
{
  //memcpy( output_image_, input_image_, sizeof(input_image_) );//Replace by opencv copying

  //CvSURFParams params = cvSURFParams(500, 1);
  //CvMemStorage* storage = cvCreateMemStorage(0);
  //(*outputEK).extractKeypoints();

  CvSURFParams params = cvSURFParams(500, 0);
  CvMemStorage* storage = cvCreateMemStorage(0);

  IplImage _img(*m);
  cvExtractSURF(&_img, 0, &keypoints_, &descriptors_, storage, params);
}
Ejemplo n.º 4
0
void rspfOpenCVSURFFeatures::runUcharTransformation(rspfImageData* tile)
{   

	IplImage *input;
	IplImage *output;
	IplImage *temp;

	char* bSrc;
	char* bDst;

	//int nChannels = tile->getNumberOfBands();

	//for(int k=0; k<nChannels; k++) {
	input = cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
	output = cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
	temp = cvCreateImage(cvGetSize(input),32,1);

	CvMemStorage* storage = cvCreateMemStorage(0);
	
	bSrc = static_cast<char*>(tile->getBuf(0));
	input->imageData=bSrc;
	bDst = static_cast<char*>(theTile->getBuf());
	output->imageData=bDst;
    
	CvSeq *imageKeypoints = NULL;
	cvCopy(input,output);
	
	CvSURFParams params = cvSURFParams(theHessianThreshold, 1);

	cvExtractSURF(input,NULL,&imageKeypoints,NULL,storage,params);

	int numKeyPoints = imageKeypoints->total;

	for (int i=0;i<numKeyPoints;i++){
		CvSURFPoint* corner = (CvSURFPoint*)cvGetSeqElem(imageKeypoints,i);
		theKeyPoints.push_back(rspfDpt(corner->pt.x,corner->pt.y)+tile->getOrigin());         
		cvCircle(output,cvPointFrom32f(corner->pt),1,cvScalar(0),1);
	}
	cvReleaseImageHeader(&input);
	cvReleaseImageHeader(&output);
	cvReleaseImage(&temp);
	//}

	theTile->validate(); 
}
Ejemplo n.º 5
0
void ObjectDefinition::Init(IplImage* image)
{
	m_params = cvSURFParams(hessianThreshold, 1);
	m_corners[0] = cvPoint(0,0);
	m_corners[1] = cvPoint(image->width,0);
	m_corners[2] = cvPoint(image->width,image->height);
	m_corners[3] = cvPoint(0,image->height);
	if(image->nChannels == 3)
	{
		IplImage* grayImage = cvCreateImage(cvSize(image->width, image->height), 8, 1); 
		cvCvtColor(image, grayImage, CV_RGB2GRAY);
		cvExtractSURF(grayImage, 0, &m_keypoints, &m_descriptor, m_storage, m_params);
		cvReleaseImage(&grayImage);
	}
	else{
		cvExtractSURF(image, 0, &m_keypoints, &m_descriptor, m_storage, m_params);
	}
	cvReleaseImage(&image);
}
Ejemplo n.º 6
0
void ControlWidget::SURFImage()
{
    if(this->m_storage_SURF == NULL) {
        this->surf_image = cvCreateImage(cvGetSize(this->imagerd), IPL_DEPTH_8U, 1);
        this->m_storage_SURF = cvCreateMemStorage(0);
    }
    else {
        this->surf_image = cvCreateImage(cvGetSize(this->imagerd), IPL_DEPTH_8U, 1);
        cvClearMemStorage(this->m_storage_SURF);
    }

    CvSURFParams params;
    params = cvSURFParams(this->surf_Hessian, 1);

    cvExtractSURF(this->gray_image, 0, &image_Keypoints, &image_Descriptors, this->m_storage_SURF,
                  params, 0);

    this->surf_image = cvCloneImage(this->imagerd);

    for(int i = 0; i < image_Keypoints->total; ++i)
    {
        CvSURFPoint* point = (CvSURFPoint*)cvGetSeqElem(image_Keypoints, i);
        CvPoint center;
        int radius;

        center.x = cvRound(point->pt.x);
        center.y = cvRound(point->pt.y);
//        radius = cvRound(point->size * 1.2 / 9.0 * 2.0);
//        cvCircle(this->surf_image, center, radius, cvScalar(0, 0, 255), 1, 8, 0);

        cvCircle(this->surf_image, center, 2, cvScalar(0, 0, 255), -1, 0, 0);
    }

    QImage SURF_Image = QImage((const unsigned char*)(this->surf_image->imageData),
                              this->surf_image->width, this->surf_image->height,
                              QImage::Format_RGB888).rgbSwapped();

    this->bufferSurfImage = new QPixmap();
    *bufferSurfImage = QPixmap::fromImage(SURF_Image);
    *bufferSurfImage = bufferSurfImage->scaled(250, 200);

    cvZero(this->surf_image);
}
vector<VisionRecognitionResult> KitechSurfObjectRecognitionComp::Recognize(vector<unsigned char> image,int width,int height,int pixelBytes)
{
	vector<VisionRecognitionResult> _recognitionResult(0);

	IplImage *cvImage = cvCreateImageHeader( cvSize(width, height), 8, pixelBytes );
	cvImage->imageData = (char *)&image[0];

	IplImage *grayImage = cvCreateImage( cvGetSize(cvImage), 8, 1 );
	cvCvtColor( cvImage, grayImage, CV_BGR2GRAY );

    CvMemStorage *imageStorage = cvCreateMemStorage(0);
    CvSeq *imageKeypoints, *imageDescriptors;

	cvExtractSURF( grayImage, 0, &imageKeypoints, &imageDescriptors, imageStorage, cvSURFParams(500,1) );

	CvPoint src_corners[4] = {{0,0}, {_orgWidth,0}, {_orgWidth, _orgHeight}, {0, _orgHeight}};
	CvPoint dst_corners[4];

	if( LocatePlanarObject( _objectKeypoints, _objectDescriptors, imageKeypoints, imageDescriptors, src_corners, dst_corners ) ) {
		_recognitionResult.resize(1);

		_recognitionResult[0].name = _objName;
		_recognitionResult[0].point1X = dst_corners[0].x;
		_recognitionResult[0].point1Y = dst_corners[0].y;
		_recognitionResult[0].point2X = dst_corners[1].x;
		_recognitionResult[0].point2Y = dst_corners[1].y;
		_recognitionResult[0].point3X = dst_corners[2].x;
		_recognitionResult[0].point3Y = dst_corners[2].y;
		_recognitionResult[0].point4X = dst_corners[3].x;
		_recognitionResult[0].point4Y = dst_corners[3].y;
		//PrintMessage("KitechSurfObjectRecognitionComp::recognize() -> I found data.(%s)\n", _recognitionResult[0].name.c_str());
	}

	cvReleaseMemStorage( &imageStorage );

	cvReleaseImage( &grayImage );
	cvReleaseImageHeader( &cvImage );

	return _recognitionResult;
}
Ejemplo n.º 8
0
JNIEXPORT void JNICALL Java_com_userinterface_OpenCV_extractSURFFeature(
		JNIEnv* env, jobject thiz) {
	IplImage *pWorkImage=cvCreateImage(cvGetSize(pImage),IPL_DEPTH_8U,1);
	cvCvtColor(pImage,pWorkImage,CV_BGR2GRAY);
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
	CvSURFParams params = cvSURFParams(2000, 0);
	cvExtractSURF( pWorkImage, 0, &imageKeypoints, &imageDescriptors, storage, params );
	// show features
	for( int i = 0; i < imageKeypoints->total; i++ )
	{
		CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, i );
		CvPoint center;
		int radius;
		center.x = cvRound(r->pt.x);
		center.y = cvRound(r->pt.y);
		radius = cvRound(r->size*1.2/9.*2);
		cvCircle( pImage, center, radius, CV_RGB(255,0,0), 1, CV_AA, 0 );
	}
	cvReleaseImage(&pWorkImage);
	cvReleaseMemStorage(&storage);
}
Ejemplo n.º 9
0
// initialize the new element
// instantiate pads and add them to element
// set pad calback functions
// initialize instance structure
static void
gst_surf_tracker_init(GstSURFTracker *filter, GstSURFTrackerClass *gclass) {
    filter->sinkpad = gst_pad_new_from_static_template(&sink_factory, "sink");
    gst_pad_set_setcaps_function(filter->sinkpad, GST_DEBUG_FUNCPTR(gst_surf_tracker_set_caps));
    gst_pad_set_getcaps_function(filter->sinkpad, GST_DEBUG_FUNCPTR(gst_pad_proxy_getcaps));
    gst_pad_set_chain_function(filter->sinkpad, GST_DEBUG_FUNCPTR(gst_surf_tracker_chain));

    filter->srcpad = gst_pad_new_from_static_template(&src_factory, "src");
    gst_pad_set_getcaps_function(filter->srcpad, GST_DEBUG_FUNCPTR(gst_pad_proxy_getcaps));

    gst_element_add_pad(GST_ELEMENT(filter), filter->sinkpad);
    gst_element_add_pad(GST_ELEMENT(filter), filter->srcpad);

    filter->verbose              = FALSE;
    filter->display              = FALSE;
    filter->display_features     = FALSE;
    filter->params               = cvSURFParams(100, 1);
    filter->static_count_objects = 0;
    filter->frames_processed     = 0;
    filter->rect_timestamp       = 0;
    filter->rect_array           = g_array_new(FALSE, FALSE, sizeof(CvRect));
    filter->stored_objects       = g_array_new(FALSE, FALSE, sizeof(InstanceObject));
}
Ejemplo n.º 10
0
bool Features::featuresBasedTransform(IplImage* object, IplImage* image, IplImage* img1, IplImage* img2, QTransform &transform)
{    
    CvMemStorage* storage = cvCreateMemStorage(0);

    //Búsqueda de features para ambas imágenes.
    CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
    CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
    CvSURFParams params = cvSURFParams(500, 1);
    double tt = (double)cvGetTickCount();
    cvExtractSURF( object, 0, &objectKeypoints, &objectDescriptors, storage, params );
    cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, storage, params );    
    tt = (double)cvGetTickCount() - tt;
    if(VERBOSE){
        qDebug() << "Features hallados en la imagen 1:" << objectDescriptors->total;
        qDebug() << "Features hallados en la imagen 2:" << imageDescriptors->total;
        qDebug() << "Tiempo de extraccion:" << tt/(cvGetTickFrequency()*1000.) << "ms." << endl;
    }

    //En caso de ser insuficiente la cantidad de features para alguna de las imágenes, se retorna sin resultado.
    if(objectKeypoints->total < MINPAIRS || imageKeypoints->total < MINPAIRS){
        if(VERBOSE)
            qDebug() << "La cantidad de features encontrados es insuficiente para calcular una transformacion." << endl;       
        cvReleaseMemStorage(&storage);
        return false;
    }

    //Busqueda de correspondencia entre puntos característicos (features).
    vector<int> ptpairs;
    #ifdef USE_FLANN
        Features::flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
    #else
        Features::findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
    #endif

    //Utils::printPairsInfo(objectKeypoints, imageKeypoints, ptpairs);
    /**************************************************************************************************************/
    Features::filterByDirection(objectKeypoints, imageKeypoints, ptpairs);
    /**************************************************************************************************************/

    //En caso de ser insuficiente la cantidad de puntos de correspondencia, se retorna sin resultado.
    if(VERBOSE)
        qDebug() << "Cantidad de puntos de correspondencia:" << (int)(ptpairs.size()/2) << endl;
    if(ptpairs.size()/2 < MINPAIRS){
        if(VERBOSE)
            qDebug() << "La cantidad de puntos de correspondencia entre las imagenes es insuficiente para calcular una transformacion." << endl;        
        cvReleaseMemStorage(&storage);
        return false;
    }    

    //Búsqueda de los triángulos que determinarán la transformación a realizar.
    vector<int> objTri, imgTri, objSize;
    objTri.reserve(3);
    imgTri.reserve(3);
    objSize.push_back(object->width);
    objSize.push_back(object->height);

    bool goodTriangle = Features::findGoodTriangles(objectKeypoints, imageKeypoints, ptpairs, objSize, objTri, imgTri);
    //En caso de no hallarse los triángulos de manera satisfactoria, se retorna sin resultado.
    if(!goodTriangle){
        if(VERBOSE)
            qDebug() << "Los triangulos de correspondencia hallados no permiten realizar una transformacion correcta." << endl;        
        cvReleaseMemStorage(&storage);
        return false;
    }

    //Se procede a calcular la transformación.
    transform = getTransformation(objectKeypoints, imageKeypoints, objTri, imgTri);

    //EIGENVALUES**************************************************************************************************************
    //Features::checkEigenvalues(QTransform(0.0, 1.5, -1.0, 1.5, 1.0, 0.0, -1.0, 0.0, 1.0));
    if(!Features::checkEigenvalues(transform)){
        if(VERBOSE)
            qDebug() << "Los triangulos de correspondencia hallados no permiten realizar una transformacion correcta." << endl;        
        cvReleaseMemStorage(&storage);
        return false;
    }

    if(GRAPHIC){
        IplImage* result = Utils::drawResultImage(img1, img2, objectKeypoints, imageKeypoints, objTri, imgTri, ptpairs);
        cvNamedWindow("Imagen de correspondencia", 1);
        cvMoveWindow("Imagen de correspondencia", 100, 100);
        cvShowImage("Imagen de correspondencia", result);
        cvWaitKey(0);        
        cvDestroyAllWindows();
        cvReleaseImage(&result);
    }

    cvReleaseMemStorage(&storage);

    return true;
}
Ejemplo n.º 11
0
void FindObjectMain::process_surf()
{

	if(!object_image)
	{
// Only does greyscale
		object_image = cvCreateImage( 
			cvSize(object_image_w, object_image_h), 
			8, 
			1);
	}

	if(!scene_image)
	{
// Only does greyscale
		scene_image = cvCreateImage( 
			cvSize(scene_image_w, scene_image_h), 
			8, 
			1);
	}

// Select only region with image size
// Does this do anything?
	cvSetImageROI( object_image, cvRect( 0, 0, object_w, object_h ) );
	cvSetImageROI( scene_image, cvRect( 0, 0, scene_w, scene_h ) );

	if(!prev_object) prev_object = new unsigned char[object_image_w * object_image_h];
	memcpy(prev_object, object_image->imageData, object_image_w * object_image_h);
	grey_crop((unsigned char*)scene_image->imageData, 
		get_input(scene_layer), 
		scene_x1, 
		scene_y1, 
		scene_x2, 
		scene_y2,
		scene_image_w,
		scene_image_h);


	grey_crop((unsigned char*)object_image->imageData, 
		get_input(object_layer), 
		object_x1, 
		object_y1, 
		object_x2, 
		object_y2,
		object_image_w,
		object_image_h);


	if(!storage) storage = cvCreateMemStorage(0);
	CvSURFParams params = cvSURFParams(500, 1);


//printf("FindObjectMain::process_surf %d\n", __LINE__);

// Only compute keypoints if the image changed
	if(memcmp(prev_object, object_image->imageData, object_image_w * object_image_h))
	{
		if(object_keypoints) cvClearSeq(object_keypoints);
		if(object_descriptors) cvClearSeq(object_descriptors);
		cvExtractSURF(object_image, 
			0, 
			&object_keypoints, 
			&object_descriptors, 
			storage, 
			params,
			0);
	}

//printf("FindObjectMain::process_surf %d object keypoints=%d\n", __LINE__, object_keypoints->total);
// Draw the keypoints
// 		for(int i = 0; i < object_keypoints->total; i++)
// 		{
//         	CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( object_keypoints, i );
// 			int size = r1->size / 4;
// 			draw_rect(frame[object_layer], 
//   				r1->pt.x + object_x1 - size, 
//   				r1->pt.y + object_y1 - size, 
//   				r1->pt.x + object_x1 + size, 
//  				r1->pt.y + object_y1 + size);
// 		}


//printf("FindObjectMain::process_surf %d\n", __LINE__);

// TODO: make the surf data persistent & check for image changes instead
	if(scene_keypoints) cvClearSeq(scene_keypoints);
	if(scene_descriptors) cvClearSeq(scene_descriptors);
	cvExtractSURF(scene_image, 
		0, 
		&scene_keypoints, 
		&scene_descriptors, 
		storage, 
		params,
		0);

// Draw the keypoints
// 		for(int i = 0; i < scene_keypoints->total; i++)
// 		{
//         	CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( scene_keypoints, i );
// 			int size = r1->size / 4;
// 			draw_rect(frame[scene_layer], 
//   				r1->pt.x + scene_x1 - size, 
//   				r1->pt.y + scene_y1 - size, 
//   				r1->pt.x + scene_x1 + size, 
//  				r1->pt.y + scene_y1 + size);
// 		}

// printf("FindObjectMain::process_surf %d %d %d scene keypoints=%d\n", 
// __LINE__, 
// scene_w,
// scene_h,
// scene_keypoints->total);

	int *point_pairs = 0;
	int total_pairs = 0;
	CvPoint src_corners[4] = 
	{
		{ 0, 0 }, 
		{ object_w, 0 }, 
		{ object_w, object_h }, 
		{ 0, object_h }
	};

	CvPoint dst_corners[4] = 
	{
		{ 0, 0 },
		{ 0, 0 },
		{ 0, 0 },
		{ 0, 0 }
	};

//printf("FindObjectMain::process_surf %d\n", __LINE__);
	if(scene_keypoints->total &&
		object_keypoints->total &&
		locatePlanarObject(object_keypoints, 
		object_descriptors, 
		scene_keypoints, 
		scene_descriptors, 
		src_corners, 
		dst_corners,
		&point_pairs,
		&total_pairs))
	{





// Draw keypoints in the scene & object layer
		if(config.draw_keypoints)
		{
//printf("FindObjectMain::process_surf %d total pairs=%d\n", __LINE__, total_pairs);
			for(int i = 0; i < total_pairs; i++)
			{
        		CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( object_keypoints, point_pairs[i * 2] );
        		CvSURFPoint* r2 = (CvSURFPoint*)cvGetSeqElem( scene_keypoints, point_pairs[i * 2 + 1] );


				int size = r2->size * 1.2 / 9 * 2;
				draw_rect(get_input(scene_layer), 
  					r2->pt.x + scene_x1 - size, 
  					r2->pt.y + scene_y1 - size, 
  					r2->pt.x + scene_x1 + size, 
 					r2->pt.y + scene_y1 + size);
				draw_rect(get_input(object_layer), 
  					r1->pt.x + object_x1 - size, 
  					r1->pt.y + object_y1 - size, 
  					r1->pt.x + object_x1 + size, 
 					r1->pt.y + object_y1 + size);
			}
		}


//printf("FindObjectMain::process_surf %d\n", __LINE__);
// Get object outline in the scene layer
		border_x1 = dst_corners[0].x + scene_x1;
		border_y1 = dst_corners[0].y + scene_y1;
		border_x2 = dst_corners[1].x + scene_x1;
		border_y2 = dst_corners[1].y + scene_y1;
		border_x3 = dst_corners[2].x + scene_x1;
		border_y3 = dst_corners[2].y + scene_y1;
		border_x4 = dst_corners[3].x + scene_x1;
		border_y4 = dst_corners[3].y + scene_y1;
//printf("FindObjectMain::process_surf %d\n", __LINE__);


		
	}
//printf("FindObjectMain::process_surf %d\n", __LINE__);



// for(int i = 0; i < object_y2 - object_y1; i++)
// {
// 	unsigned char *dst = get_input(object_layer)->get_rows()[i];
// 	unsigned char *src = (unsigned char*)object_image->imageData + i * (object_x2 - object_x1);
// 	for(int j = 0; j < object_x2 - object_x1; j++)
// 	{
// 		*dst++ = *src;
// 		*dst++ = 0x80;
// 		*dst++ = 0x80;
// 		src++;
// 	}
// }


// Frees the image structures
	if(point_pairs) free(point_pairs);
}
Ejemplo n.º 12
0
void ControlWidget::MatchingImage()
{
    if(this->m_storage_Matching == NULL) {
        this->matching_image = cvCreateImage(cvGetSize(this->list_imagerd), IPL_DEPTH_8U, 1);
        this->m_storage_Matching = cvCreateMemStorage(0);
    }
    else {
        this->matching_image = cvCreateImage(cvGetSize(this->list_imagerd), IPL_DEPTH_8U, 1);
        cvClearMemStorage(this->m_storage_Matching);
    }

    for(int i = 0; i < 12; ++i)
    {
        this->True_Point[i] = 0;
    }

    CvSeq* keypoints2;
    CvSeq* descriptors2;

    CvSURFParams params;
    params = cvSURFParams(this->surf_Hessian, 1);

    cvExtractSURF(this->gray_list_image, 0, &keypoints2, &descriptors2, this->m_storage_Matching,
                  params);

    cv::Vector<int> ptpairs;

    this->findPairs(this->image_Keypoints, this->image_Descriptors, keypoints2, descriptors2,
                    ptpairs);

    this->matching_image = cvCloneImage(this->list_imagerd);

    for(int i = 0; i < (int)ptpairs.size(); i += 2)
    {
        CvSURFPoint* pt1 = (CvSURFPoint*)cvGetSeqElem(this->image_Keypoints, ptpairs[i]);
        CvSURFPoint* pt2 = (CvSURFPoint*)cvGetSeqElem(keypoints2, ptpairs[i + 1]);

        CvPoint center;
        int radius;

        center.x = cvRound(pt2->pt.x);
        center.y = cvRound(pt2->pt.y);
//        radius = cvRound(pt2->size * 1.2 / 9.0 * 2.0);
//        cvCircle(this->matching_image, center, radius, cvScalar(0, 0, 255), 1, 8, 0);
        cvCircle(this->matching_image, center, 2, cvScalar(0, 0, 255), -1, 0, 0);

        this->JudgePairs(center.x, center.y);
    }

    QImage Matching_Image = QImage((const unsigned char*)(this->matching_image->imageData),
                              this->matching_image->width, this->matching_image->height,
                              QImage::Format_RGB888).rgbSwapped();

    this->bufferMatchingImage = new QPixmap();
    *bufferMatchingImage = QPixmap::fromImage(Matching_Image);
    *bufferMatchingImage = bufferMatchingImage->scaled(800, 300);

    cvZero(this->matching_image);
    cvClearSeq(keypoints2);
    cvClearSeq(descriptors2);
}
Ejemplo n.º 13
0
int openCV_SURFDetector(struct Image * pattern,struct Image * img)
{
   StartTimer(FIND_OBJECTS_DELAY);

    monochrome(img);
    IplImage  * image = cvCreateImage( cvSize(img->width,img->height), IPL_DEPTH_8U, img->depth);
    char * opencvImagePointerRetainer = image->imageData; // UGLY HACK
    image->imageData = (char*) img->pixels; // UGLY HACK
    //cvCvtColor( image, image, CV_RGB2GRAY);

    monochrome(pattern);
    IplImage  * object = cvCreateImage( cvSize(pattern->width,pattern->height), IPL_DEPTH_8U, pattern->depth);
    char * opencvObjectPointerRetainer = object->imageData; // UGLY HACK
    object->imageData = (char*) pattern->pixels; // UGLY HACK
    //cvCvtColor( object, object, CV_RGB2GRAY);


    CvMemStorage* storage = cvCreateMemStorage(0);
    static CvScalar colors[] = { {{0,0,255}}, {{0,128,255}}, {{0,255,255}}, {{0,255,0}}, {{255,128,0}}, {{255,255,0}}, {{255,0,0}}, {{255,0,255}}, {{255,255,255}} };

    IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);
    cvCvtColor( object, object_color, CV_GRAY2BGR );

    CvSeq* objectKeypoints = 0, *objectDescriptors = 0;
    CvSeq* imageKeypoints = 0, *imageDescriptors = 0;
    int i;
    CvSURFParams params = cvSURFParams(500, 1);

    double tt = (double)cvGetTickCount();

    cvExtractSURF( object, 0, &objectKeypoints, &objectDescriptors, storage, params , 0 );
    //printf("Object Descriptors: %d\n", objectDescriptors->total);

    cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, storage, params , 0 );
    //printf("Image Descriptors: %d\n", imageDescriptors->total);
    tt = (double)cvGetTickCount() - tt;

    //printf( "Extraction time = %gms\n", tt/(cvGetTickFrequency()*1000.));

    CvPoint src_corners[4] = {{0,0}, {object->width,0}, {object->width, object->height}, {0, object->height}};
    CvPoint dst_corners[4];

    //IplImage* correspond = cvCreateImage( cvSize(image->width, object->height+image->height), 8, 1 );
    //cvSetImageROI( correspond, cvRect( 0, 0, object->width, object->height ) );
    //cvCopy( object, correspond , 0 );
    //cvSetImageROI( correspond, cvRect( 0, object->height, correspond->width, correspond->height ) );
    //cvCopy( image, correspond , 0 );
    //cvResetImageROI( correspond );



    if( locatePlanarObject( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, src_corners, dst_corners ))
    {
        for( i = 0; i < 4; i++ )
        {
            CvPoint r1 = dst_corners[i%4];
            CvPoint r2 = dst_corners[(i+1)%4];
            //cvLine( correspond, cvPoint(r1.x, r1.y+object->height ), cvPoint(r2.x, r2.y+object->height ), colors[8] , 1 ,8 ,0  );
        }
    }

    struct pairList * ptpairs = 0;
    findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );


    printf(" Found %u pairs \n",(int) ptpairs->currentItems);



    image->imageData = opencvImagePointerRetainer; // UGLY HACK
    cvReleaseImage( &image );

    image->imageData = opencvObjectPointerRetainer; // UGLY HACK
    cvReleaseImage( &object );

    EndTimer(FIND_OBJECTS_DELAY);
/*

    for( i = 0; i < (int)ptpairs->currentItems; i++ )
    {
        CvSURFPoint* r1 = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, ptpairs->item[i].p1; );
        CvSURFPoint* r2 = (CvSURFPoint*)cvGetSeqElem( imageKeypoints,  ptpairs->item[i].p2; );
        cvLine( correspond, cvPointFrom32f(r1->pt),
            cvPoint(cvRound(r2->pt.x), cvRound(r2->pt.y+object->height)), colors[8] );
    }
    cvShowImage( "Object Correspond", correspond );
    for( i = 0; i < objectKeypoints->total; i++ )
    {
        CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( objectKeypoints, i );
        CvPoint center;
        int radius;
        center.x = cvRound(r->pt.x);
        center.y = cvRound(r->pt.y);
        radius = cvRound(r->size*1.2/9.*2);
        cvCircle( object_color, center, radius, colors[0], 1, 8, 0 );
    }
    cvShowImage( "Object", object_color );

    cvWaitKey(0);

    cvDestroyWindow("Object");
    cvDestroyWindow("Object Correspond");
*/
    return 1;
}
ReturnType KitechSurfObjectRecognitionComp::onStart()
{
	PrintMessage ("SUCCESS : KitechSurfObjectRecognitionComp::onStart()\n");
	//	dll 파일이름을 확인하여 없으면 에러 리턴
	if( !parameter.FindName("DBName") ) {
		PrintMessage("ERROR : KitechSurfObjectRecognitionComp::onStart() -> Can't find DataBase %s.\n", parameter.GetValue("DBName").c_str());
		return OPROS_FIND_PROPERTY_ERROR;
	}

	_objName = parameter.GetValue("DBName").c_str();
	IplImage *cvImage = cvLoadImage( _objName.c_str() );
	if( cvImage ) 	{
		IplImage *grayImage = cvCreateImage( cvGetSize(cvImage), 8, 1 );
		cvCvtColor( cvImage, grayImage, CV_BGR2GRAY );

		_objectStorage = cvCreateMemStorage(0);

		cvExtractSURF( grayImage, 0, &_objectKeypoints, &_objectDescriptors, _objectStorage, cvSURFParams(500,1) );

		PrintMessage("SUCCESS:SURF_ObjectRecognition::onStart() -> Successfully load %s\n", _objName.c_str());

		_orgWidth = grayImage->width;
		_orgHeight = grayImage->height;

		cvReleaseImage( &grayImage );
		cvReleaseImage( &cvImage );
	}
	else {
		PrintMessage("ERROR:SURF_ObjectRecognition::enable() -> Fail to load %s\n", _objName.c_str());
		return OPROS_CALLER_ERROR;
	}

	return OPROS_SUCCESS;
}
Ejemplo n.º 15
0
cv::Mat CBIR::getClustersIndices(QString path){
    QString imgName = path.section('/', -1);
    IplImage* img = Utils::loadImage(path.toAscii().data(), true);
    if(img == NULL){
        qDebug() << "La imagen no fue cargada.";
        exit(1);
    }
    //Computo los features.
    CvSeq *imgKeypoints, *imgDescriptors;
    CvSURFParams params = cvSURFParams(500, 1);
    imgKeypoints = 0;
    imgDescriptors = 0;
    CvMemStorage* storage = cvCreateMemStorage(0);
    cvExtractSURF(img, 0, &imgKeypoints, &imgDescriptors, storage, params);
    qDebug() << "Imagen" << path << "cargada con exito. Features:" << imgKeypoints->total;

    //Copio los descriptores a una Mat.
    cv::Mat queryDescriptorsMat(imgDescriptors->total, DESCRIPTOR_DIMS, CV_32F);
    float* img_ptr = queryDescriptorsMat.ptr<float>(0);
    CvSeqReader img_reader;
    cvStartReadSeq(imgDescriptors, &img_reader);
    for(int j=0; j<imgDescriptors->total; j++){
        const float* descriptor = (const float*)img_reader.ptr;
        CV_NEXT_SEQ_ELEM(img_reader.seq->elem_size, img_reader);
        memcpy(img_ptr, descriptor, DESCRIPTOR_DIMS*sizeof(float));
        img_ptr += DESCRIPTOR_DIMS;
    }
    cvReleaseMemStorage(&storage);
    cvReleaseImage(&img);

    if(clustersMat.data == NULL)
        exit(1);

    //Creo el índice para los cluster centers.
    cv::flann::KDTreeIndexParams kdtiParams = cv::flann::KDTreeIndexParams(8);
    cv::flann::Index clustersIndex(clustersMat, kdtiParams);

    //Clusterizo cada feature de la query según Knn-Search.
    cv::Mat indices(queryDescriptorsMat.rows, 1, CV_32S);
    cv::Mat dists(queryDescriptorsMat.rows, 1, CV_32F);
    clustersIndex.knnSearch(queryDescriptorsMat, indices, dists, 1, cv::flann::SearchParams(1024));

    /***************************************************************************************************************/
    //Guardo el archivo para realizar la query al índice.
    QFile query("cbir/lemur/query/" + imgName + ".query");
    QTextStream stream(&query);
    if (!query.open(QIODevice::WriteOnly | QIODevice::Text))
        qDebug() << "Ocurrio un error al intentar abrir el archivo" + imgName + ".query";
    stream << "<DOC 1>" << endl;

    // Itero sobre todos los features de la imagen.
    for(int i=0; i<queryDescriptorsMat.rows ;i++){
        stream << indices.at<int>(i, 0) << endl;
    }
    stream << "</DOC>";
    query.close();

    //Guardo el archivo con los parámetros de la query.
    QFile qP("cbir/lemur/query/query_params");
    QTextStream qPStream(&qP);
    if (!qP.open(QIODevice::WriteOnly | QIODevice::Text))
        qDebug() << "Ocurrio un error al intentar abrir el archivo query_params";
    qPStream << "<parameters>" << endl <<
                    "<index>e:\\Proyectos\\Git\\keepc\\release\\cbir\\lemur\\index\\index.key</index>" << endl <<
                    "<retModel>tfidf</retModel>" << endl <<
                    "<textQuery>e:\\Proyectos\\Git\\keepc\\release\\cbir\\lemur\\query\\" << imgName << ".query</textQuery>" << endl <<
                    "<resultFile>e:\\Proyectos\\Git\\keepc\\release\\cbir\\lemur\\query\\" << imgName << ".results</resultFile>" << endl <<
                    "<TRECResultFormat>1</TRECResultFormat>" << endl <<
                    "<resultCount>10</resultCount>" << endl <<
                "</parameters>";
    qP.close();

    return indices;
}
Ejemplo n.º 16
0
int main()
{

	if(run_tests_only)
	{
		MyLine3D::runTest();
		return 0;
	}

	//CvMat *camera_inner_calibration_matrix; 
	bool show_surf_example=false;
	bool show_calibration_from_camera_and_undistortion=false;
	if(show_calibration_from_camera_and_undistortion)
	{
		CvMat *object_points_all=0;
		CvMat *image_points_all=0;
		CvMat *points_count_all=0;
		CvMat *camera_matr=0;
		CvMat *distor_coefs=0;
		CvMat *rotation_vecs=0;
		CvMat *transpose_vecs=0;
		vector<CvPoint2D32f> qu_calibr_points;
		IplImage* frameCam1;
		cvNamedWindow("WindowCam1",CV_WINDOW_KEEPRATIO);
		CvCapture *captureCam1=cvCreateCameraCapture(0);
		IplImage *quarterFrame;
		CvPoint2D32f *cornersFounded= new CvPoint2D32f[100];
		int cornersCount=0;
		int result_Found=0;
		// getting snapshots for inner camera calibration from video camera
		bool capture_flag=false;
		while(true)
		{
			frameCam1=cvQueryFrame(captureCam1);
			quarterFrame=cvCreateImage(cvSize((frameCam1->width),(frameCam1->height)),IPL_DEPTH_8U,3);
		
			cvCopy(frameCam1,quarterFrame);
			if(capture_flag)
			{
				result_Found=cvFindChessboardCorners(quarterFrame,cvSize(chess_b_szW,chess_b_szH),cornersFounded,&cornersCount);//,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS |CV_CALIB_CB_FAST_CHECK);
				cvDrawChessboardCorners(quarterFrame,cvSize(chess_b_szW,chess_b_szH),cornersFounded,cornersCount,result_Found);
				if(result_Found>0)
					AddPointsToInnerCalibrate(qu_calibr_points,cornersFounded,cornersCount);
				capture_flag=false;
				cvShowImage("WindowCam1",quarterFrame);
				if(result_Found>0)
					cvWaitKey(0);
			}
			char c=cvWaitKey(33);
			if(c==27)
				break;
			if(c==32 || c=='y' || c=='Y')
				capture_flag=true;
			cvShowImage("WindowCam1",quarterFrame);
			cvReleaseImage(&quarterFrame);
		
		}
		cvReleaseImage(&quarterFrame);
	
		cvReleaseCapture(&captureCam1);
		cvDestroyWindow("WindowCam1");
	
		PrintAllPointsForInnerCalibrate(qu_calibr_points,chess_b_szW*chess_b_szH);
		InitCvMatPointsParametersForInnerCallibration_part1(qu_calibr_points,chess_b_szW*chess_b_szH,object_points_all,image_points_all,points_count_all,chess_b_szW,chess_b_szH);
		InitOtherCameraParametersForInnerCallibration_part2(qu_calibr_points.size()/(chess_b_szW*chess_b_szH),camera_matr,distor_coefs,rotation_vecs,transpose_vecs);
		double calibration_error_result=cvCalibrateCamera2(object_points_all,
													image_points_all,
													points_count_all,
													cvSize(imgW,imgH),
													camera_matr,
													distor_coefs,
													rotation_vecs,
													transpose_vecs,
													CV_CALIB_FIX_PRINCIPAL_POINT|CV_CALIB_FIX_ASPECT_RATIO|CV_CALIB_ZERO_TANGENT_DIST
													);
		WriteMatrixCoef(camera_matr);
		WriteMatrixCoef(distor_coefs);
		//camera_inner_calibration_matrix=cvCreateMat(3,3,CV_32FC1);
		//cvCopy(camera_matr,camera_inner_calibration_matrix);
		cvSave("camera_calibration_inner.txt",camera_matr,"camera_inner_calibration_matrix");
		cvSave("camera_calibration_dist.txt",distor_coefs,"distor_coefs","coeficients of distortions");
		cout<<"Total Error:"<<calibration_error_result<<endl;
		cout<<"Average Calibration Error :"<<(calibration_error_result)/qu_calibr_points.size()<<endl;
	//undistortion example
		IplImage *frame_cur;
		IplImage *undistor_image;
		cvNamedWindow("cameraUndistor",CV_WINDOW_KEEPRATIO);
		CvCapture *captureCam2=cvCreateCameraCapture(0);
		bool undist_flag=false;
		while(true)
		{
			frame_cur= cvQueryFrame(captureCam2);
			undistor_image=cvCreateImage(cvSize((frame_cur->width),(frame_cur->height)),IPL_DEPTH_8U,3);
			if(undist_flag)
			{
				cvUndistort2(frame_cur,undistor_image,camera_matr,distor_coefs);
			}
			else
			{
				cvCopy(frame_cur,undistor_image);
			}
			cvShowImage("cameraUndistor",undistor_image);
			char c=cvWaitKey(33);
			if(c==27)
				break;
			if(c=='u'||c=='U')
				undist_flag=!undist_flag;

			cvReleaseImage(&undistor_image);

		}
		cvReleaseImage(&undistor_image);
		cvReleaseCapture(&captureCam2);
		cvDestroyWindow("cameraUndistor");
	}//ending undistortion_example
	
	if(show_surf_example)
	{
		//using SURF
		
		initModule_nonfree();// added at 16.04.2013
		CvCapture* capture_cam_3=cvCreateCameraCapture(0);
		cvNamedWindow("SURF from Cam",CV_WINDOW_KEEPRATIO);
		cvCreateTrackbar("Hessian Level","SURF from Cam",0,1000,onTrackbarSlide1);
		IplImage* buf_frame_3=0;
		IplImage* gray_copy=0;
		IplImage* buf_frame_3_copy=0;
	
		CvSeq *kp1,*descr1;
		CvMemStorage *storage=cvCreateMemStorage(0);
	
		CvSURFPoint *surf_pt;
		bool surf_flag=false;
		while(true)
		{
			buf_frame_3=cvQueryFrame(capture_cam_3);
		
			if(surf_flag)
			{
				surf_flag=false;
				gray_copy=cvCreateImage(cvSize((buf_frame_3->width),(buf_frame_3->height)),IPL_DEPTH_8U,1);
				buf_frame_3_copy=cvCreateImage(cvSize((buf_frame_3->width),(buf_frame_3->height)),IPL_DEPTH_8U,3);
			
				cvCvtColor(buf_frame_3,gray_copy,CV_RGB2GRAY);
				//cvSetImageROI(gray_copy,cvRect(280,200,40,40));
				cvExtractSURF(gray_copy,NULL,&kp1,&descr1,storage,cvSURFParams(0.0,0));
				cvReleaseImage(&gray_copy);
				re_draw=true;
			
				while(true)
				{
					if(re_draw)
					{
			
						cvCopy(buf_frame_3,buf_frame_3_copy);
						double pi=acos(-1.0);
						for(int i=0;i<kp1->total;i++)
						{
							surf_pt=(CvSURFPoint*)cvGetSeqElem(kp1,i);
							if(surf_pt->hessian<min_hessian)
								continue;
							int pt_x,pt_y;
							pt_x=(int)(surf_pt->pt.x);
							pt_y=(int)(surf_pt->pt.y);
							int sz=surf_pt->size;
							int rad_angle=(surf_pt->dir*pi)/180;
				
							cvCircle(buf_frame_3_copy,cvPoint(pt_x,pt_y),1/*sz*/,CV_RGB(0,255,0));
							cvLine(buf_frame_3_copy,cvPoint(pt_x,pt_y),cvPoint(pt_x+sz*cosl(rad_angle),pt_y-sz*sinl(rad_angle)),CV_RGB(0,0,255));
						}
						cvShowImage("SURF from Cam",buf_frame_3_copy);
					
					}
					char c=cvWaitKey(33);
					if(c==27)
					{
					
					
						break;
					}
				}
				cvReleaseImage(&buf_frame_3_copy);
			}
			
			cvShowImage("SURF from Cam",buf_frame_3);
			char ch=cvWaitKey(33);
			if(ch==27)
				break;
			if(ch==32)
				surf_flag=true;
		
		}
		if(gray_copy!=0)
			cvReleaseImage(&gray_copy);
		cvReleaseCapture(&capture_cam_3);
		cvDestroyWindow("SURF from Cam");
	}//ending SURF_example

	CvFont my_font=cvFont(1,1);
	cvInitFont(&my_font,CV_FONT_HERSHEY_SIMPLEX,1.0,1.0);

	cvNamedWindow("twoSnapshots",CV_WINDOW_KEEPRATIO);
	cvCreateTrackbar("Select LLine","twoSnapshots",0,1000,onTrackbarSlideSelectLine);
	CvCapture *capture_4 = 0;
	
	IplImage* left_img=0;
	IplImage* right_img=0;
	IplImage* cur_frame_buf=0;
	IplImage* gray_img_left=0;
	IplImage* gray_img_right=0;
	IplImage* merged_images=0;
	IplImage* merged_images_copy=0;
	CvMat *fundamentalMatrix = 0;
	vector<KeyPoint> key_points_left;
	Mat descriptors_left; 
	vector<KeyPoint> key_points_right;
	Mat descriptors_right;
	//CvMemStorage *mem_stor=cvCreateMemStorage(0);*/
	float min_hessian_value=1001.0f;

	double startValueOfFocus = 350;

	char* left_image_file_path = "camera_picture_left.png";
	char* right_image_file_path = "camera_picture_right.png";

	Array left_points, right_points;
	left_points.init(1,1);
	right_points.init(1,1);
	Array forReconstructionLeftPoints, forReconstructionRightPoints;
	forReconstructionLeftPoints.init(1,1);
	forReconstructionRightPoints.init(1,1);

	

	while(true)
	{
		char ch=cvWaitKey(33);
		if(ch==27)
			break;
		// open left and right images
		if(ch == 'o' || ch == 'O')
		{
			openTwoImages(left_image_file_path, right_image_file_path, left_img, right_img );
			MergeTwoImages(left_img,right_img,merged_images);
		}
		// save both left and right images from camera
		if(ch == 's' || ch == 'S')
		{
			if( left_img != 0 )
				cvSaveImage(left_image_file_path, left_img);
			if( right_img != 0)
				cvSaveImage(right_image_file_path, right_img);
		}

		if(ch=='l'||ch=='L')
		{
			if(capture_4 == 0)
			{
				capture_4=cvCreateCameraCapture(0);	
			}
			
			cur_frame_buf=cvQueryFrame(capture_4);
			if(left_img==0)
				left_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
			cvCopy(cur_frame_buf,left_img);

			if(right_img == 0)
			{
				right_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
				cvCopy(cur_frame_buf,right_img);
			}

			MergeTwoImages(left_img,right_img,merged_images);
		}
		if(ch=='r'||ch=='R')
		{
			if(capture_4 == 0)
			{
				capture_4=cvCreateCameraCapture(0);	
			}
			cur_frame_buf=cvQueryFrame(capture_4);
			if(right_img==0)
				right_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
			cvCopy(cur_frame_buf,right_img);

			if(left_img == 0)
			{
				left_img=cvCreateImage(cvSize(cur_frame_buf->width,cur_frame_buf->height),IPL_DEPTH_8U,3);
				cvCopy(cur_frame_buf,left_img);
			}
			MergeTwoImages(left_img,right_img,merged_images);
		}
		if(ch=='b'||ch=='B')
		{
			if(capture_4 == 0)
			{
				capture_4=cvCreateCameraCapture(0);	
			}
			cur_frame_buf=cvQueryFrame(capture_4);
			cvCopy(cur_frame_buf,left_img);
			cvCopy(cur_frame_buf,right_img);
		}
		if(ch=='q'||ch=='Q' && left_img!=0)
		{
			//proceed left
			extractFeaturesFromImage(left_img, min_hessian_value, gray_img_left, key_points_left, descriptors_left);

		}
		if(ch=='w'||ch=='W' && right_img!=0)
		{
			//proceed right
			extractFeaturesFromImage(right_img, min_hessian_value, gray_img_right, key_points_right, descriptors_right);			

		}
		if(ch=='m'||ch=='M' && left_img!=0 && right_img!=0)
		{
			//merge two images in to bigger one
			MergeTwoImages(left_img,right_img,merged_images);
		}
		if(ch=='c'||ch=='C' && merged_images!=0)
		{
			//comparison of two images
			if(fundamentalMatrix != 0)
			{
				cvReleaseMat(& fundamentalMatrix);
				fundamentalMatrix = 0;
			}
			left_to_right_corresponding_points.clear();
			right_to_left_corresponding_points.clear();
			
			GetCorrespondingPointsForSURF(key_points_left,descriptors_left,key_points_right,descriptors_right,left_to_right_corresponding_points,right_to_left_corresponding_points);
		}

		if(ch == 'E' || ch == 'e')
		{
			//drawing lines for corresponding points
			KeyPoint *leftPoint,*rightPoint,*leftPoint2,*rightPoint2;
			int width_part=merged_images->width>>1;
			/*for(int iL=0;iL<left_to_right_corresponding_points.size();iL++)
			{
				leftPoint=(CvSURFPoint*)cvGetSeqElem(key_points_left,left_to_right_corresponding_points[iL].first);
				rightPoint=(CvSURFPoint*)cvGetSeqElem(key_points_right,left_to_right_corresponding_points[iL].second);
				cvLine(merged_images,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y),CV_RGB(255,0,0));
			}*/
			
			int sizeOfAccepptedLeftToRightCorrespondings = left_to_right_corresponding_points.size();
			bool* acceptedLeftToRightCorrespondings = 0;
			getAcceptedCorrespondingsForFindingModelParameters(left_to_right_corresponding_points,
				key_points_left,
				key_points_right,
				fundamentalMatrix,
				acceptedLeftToRightCorrespondings,
				sizeOfAccepptedLeftToRightCorrespondings);

			
			while(true)
			{
				merged_images_copy=cvCreateImage(cvSize(merged_images->width,merged_images->height),merged_images->depth,3);
				cvCopy(merged_images,merged_images_copy);
				int iL=selectedLeftLine;
				int iR=iL;
				if(iL>=left_to_right_corresponding_points.size())
					iL=left_to_right_corresponding_points.size()-1;
				if(iR>=right_to_left_corresponding_points.size())
					iR=right_to_left_corresponding_points.size()-1;
				char str[100]={0};
				if(iL >= 0 )
				{
					bool isLeftToRightLineIsAccepted = acceptedLeftToRightCorrespondings[iL];
				
					// difference value
					sprintf(str,"%f",left_to_right_corresponding_points[iL].comparer_value);
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-40),&my_font,CV_RGB(0,255,0));
					// count of Matches
					sprintf(str,"%d",left_to_right_corresponding_points[iL].counterOfMatches);
					cvPutText(merged_images_copy,str,cvPoint(200,merged_images_copy->height-40),&my_font,CV_RGB(255,255,0));
					// median of compared values
					sprintf(str,"%lf",left_to_right_corresponding_points[iL].medianOfComparedMatches);
					cvPutText(merged_images_copy,str,cvPoint(250,merged_images_copy->height-40),&my_font,CV_RGB(255,0,0));

					// Variance of compared values
					sprintf(str,"V=%lf",left_to_right_corresponding_points[iL].Variance());
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-80),&my_font,CV_RGB(0,255,0));

					// Standard deviation of compared values
					sprintf(str,"SD=%lf",sqrt( left_to_right_corresponding_points[iL].Variance() ));
					cvPutText(merged_images_copy,str,cvPoint(250,merged_images_copy->height-80),&my_font,CV_RGB(0,255,0));

					double SD = sqrt( left_to_right_corresponding_points[iL].Variance() ) ;
					double median = left_to_right_corresponding_points[iL].medianOfComparedMatches;
					double compValue = left_to_right_corresponding_points[iL].comparer_value;
					double mark_1_5 = median - 1.5 * SD - compValue;

					// Mark 1.5
					sprintf(str,"m1.5=%lf", mark_1_5);
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-120),&my_font,CV_RGB(0,255,0));

					sprintf(str,"angle=%lf", left_to_right_corresponding_points[iL].degreesBetweenDeltaVector);
					cvPutText(merged_images_copy,str,cvPoint(0,merged_images_copy->height-150),&my_font,CV_RGB(0,255,0));

					

					leftPoint= &(key_points_left[ left_to_right_corresponding_points[iL].comp_pair.first ]);
					rightPoint=&(key_points_right[ left_to_right_corresponding_points[iL].comp_pair.second ]);
				
					cvLine(merged_images_copy,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y),CV_RGB(0,255,0));

					drawEpipolarLinesOnLeftAndRightImages(merged_images_copy, cvPoint(leftPoint->pt.x,leftPoint->pt.y),
						cvPoint(rightPoint->pt.x,rightPoint->pt.y), fundamentalMatrix);

					CvScalar color = CV_RGB(255, 0, 0);
					if(isLeftToRightLineIsAccepted)
					{
						color = CV_RGB(0,255,0);
					}

					cvCircle(merged_images_copy, cvPoint(leftPoint->pt.x,leftPoint->pt.y), 5, color);
					cvCircle(merged_images_copy, cvPoint(rightPoint->pt.x+width_part,rightPoint->pt.y), 5, color);
				}
				//cvLine(merged_images_copy,cvPoint(leftPoint->pt.x,leftPoint->pt.y),cvPoint(rightPoint->pt.x,rightPoint->pt.y),CV_RGB(255,0,255));
				if(iR >= 0 )
				{
					sprintf(str,"%f",right_to_left_corresponding_points[iR].comparer_value);
					cvPutText(merged_images_copy,str,cvPoint(width_part,merged_images_copy->height-40),&my_font,CV_RGB(255,0,0));
					rightPoint2= &(key_points_right [right_to_left_corresponding_points[iR].comp_pair.first]);
					leftPoint2= &(key_points_left [right_to_left_corresponding_points[iR].comp_pair.second]);
					cvLine(merged_images_copy,cvPoint(leftPoint2->pt.x,leftPoint2->pt.y),cvPoint(rightPoint2->pt.x+width_part,rightPoint2->pt.y),CV_RGB(255,0,0));
				}
				//cvLine(merged_images_copy,cvPoint(leftPoint2->pt.x+width_part,leftPoint2->pt.y),cvPoint(rightPoint2->pt.x+width_part,rightPoint2->pt.y),CV_RGB(255,0,255));
				
				cvShowImage("twoSnapshots",merged_images_copy);
				cvReleaseImage(&merged_images_copy);
				char ch2=cvWaitKey(33);
				if(ch2==27)
					break;
				if(ch2=='z' && selectedLeftLine>0)
				{
					selectedLeftLine--;
				}
				if(ch2=='x' && selectedLeftLine<1000)
				{
					selectedLeftLine++;
				}
				if( ch2 == 'a' || ch2 == 'A')
				{
					acceptedLeftToRightCorrespondings[selectedLeftLine] = true;
				}
				if( ch2 == 'd' || ch2 == 'D')
				{
					acceptedLeftToRightCorrespondings[selectedLeftLine] = false;
				}
			}//end of while(true)

			SaveAcceptedCorresspondings(
					left_to_right_corresponding_points,
					right_to_left_corresponding_points,
					key_points_left,
					key_points_right,
					acceptedLeftToRightCorrespondings,
					sizeOfAccepptedLeftToRightCorrespondings
					);
			ConvertAcceptedCorresspondingsToMyArray(left_to_right_corresponding_points,
					right_to_left_corresponding_points,
					key_points_left,
					key_points_right,
					acceptedLeftToRightCorrespondings,
					sizeOfAccepptedLeftToRightCorrespondings,
					left_points,
					right_points
					);


			delete[] acceptedLeftToRightCorrespondings;
		}
		if( ch == 'T' || ch == 't')
		{
			clock_t startTime = clock();

			openTwoImages(left_image_file_path, right_image_file_path, left_img, right_img );
			// proceed left
			extractFeaturesFromImage(left_img, min_hessian_value, gray_img_left, key_points_left, descriptors_left);
			//proceed right
			extractFeaturesFromImage(right_img, min_hessian_value, gray_img_right, key_points_right, descriptors_right);	
			//comparison of two images
			if(fundamentalMatrix != 0)
			{
				cvReleaseMat(& fundamentalMatrix);
				fundamentalMatrix = 0;
			}
			left_to_right_corresponding_points.clear();
			right_to_left_corresponding_points.clear();
			
			GetCorrespondingPointsForSURF(key_points_left,descriptors_left,key_points_right,descriptors_right,left_to_right_corresponding_points,right_to_left_corresponding_points);

			// searching fundamental matrix and corresponding points
			findFundamentalMatrixAndCorrespondingPointsForReconstruction(
				left_to_right_corresponding_points,
				right_to_left_corresponding_points,
				fundamentalMatrix,
				key_points_left,
				key_points_right,
				descriptors_left,
				descriptors_right,
				left_img,
				right_img,
				gray_img_left,
				gray_img_right,
				forReconstructionLeftPoints,
				forReconstructionRightPoints,
				min_hessian_value, 450);
			// selecting points for finding model parameters

			int sizeOfAccepptedLeftToRightCorrespondings = left_to_right_corresponding_points.size();
			bool* acceptedLeftToRightCorrespondings = 0;
			getAcceptedCorrespondingsForFindingModelParameters(left_to_right_corresponding_points,
				key_points_left,
				key_points_right,
				fundamentalMatrix,
				acceptedLeftToRightCorrespondings,
				sizeOfAccepptedLeftToRightCorrespondings);

			ConvertAcceptedCorresspondingsToMyArray(left_to_right_corresponding_points,
					right_to_left_corresponding_points,
					key_points_left,
					key_points_right,
					acceptedLeftToRightCorrespondings,
					sizeOfAccepptedLeftToRightCorrespondings,
					left_points,
					right_points
					);

			delete[] acceptedLeftToRightCorrespondings;

			// start process of determination parameters of model and reconstruction of scene
			cv::Mat mat_left_img(left_img, true);
			cv::Mat mat_right_img(right_img, true);
			mainLevenbergMarkvardt_LMFIT(startValueOfFocus, "currentPLYExportFile", left_points, right_points, 
				mat_left_img, mat_right_img,
				forReconstructionLeftPoints, forReconstructionRightPoints);
			mat_left_img.release();
			mat_right_img.release();


			cout << "Code execution time: "<< double( clock() - startTime ) / (double)CLOCKS_PER_SEC<< " seconds." << endl;
		}
		if( ch == 'I' || ch == 'i')
		{	

			//-- Step 3: Matching descriptor vectors using FLANN matcher
			FlannBasedMatcher matcher;
			std::vector< DMatch > matches;
			matcher.match( descriptors_left, descriptors_right, matches );

			//double max_dist = 0; double min_dist = 100;

			////-- Quick calculation of max and min distances between keypoints
			//for( int i = 0; i < descriptors_left.rows; i++ )
			//{ double dist = matches[i].distance;
			//	if( dist < min_dist ) min_dist = dist;
			//	if( dist > max_dist ) max_dist = dist;
			//}

			//printf("-- Max dist : %f \n", max_dist );
			//printf("-- Min dist : %f \n", min_dist );

			//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
			//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
			//-- small)
			//-- PS.- radiusMatch can also be used here.
			//std::vector< DMatch > good_matches;
			
			left_to_right_corresponding_points.clear();
			right_to_left_corresponding_points.clear();

			for( int i = 0; i < descriptors_left.rows; i++ )
			{ 
				//if( matches[i].distance <= max(2*min_dist, 0.02) )
				{
					//good_matches.push_back( matches[i]); 
					left_to_right_corresponding_points.push_back( ComparedIndexes(matches[i].distance, pair<int, int> (i, matches[i].trainIdx)) );
				}
			}
			
			cout<< "Count of good matches :" << left_to_right_corresponding_points.size() << endl;

			stable_sort(left_to_right_corresponding_points.begin(),left_to_right_corresponding_points.end(),my_comparator_for_stable_sort);
		}

		//if( ch == 'K' || ch == 'k')
		//{
		//	CvSURFPoint *leftPoint;
		//	//proceed left
		//	gray_img_left=cvCreateImage(cvSize((left_img->width),(left_img->height)),IPL_DEPTH_8U,1);
		//	cvCvtColor(left_img,gray_img_left,CV_RGB2GRAY);
		//	cvExtractSURF(gray_img_left,NULL,&key_points_left,&descriptors_left,mem_stor,cvSURFParams(min_hessian_value,0));

		//	cv::Mat mat_gray_leftImage(gray_img_left, true);
		//	cvReleaseImage(&gray_img_left);
		//	// proceed right
		//	gray_img_right=cvCreateImage(cvSize((right_img->width),(right_img->height)),IPL_DEPTH_8U,1);
		//	cvCvtColor(right_img,gray_img_right,CV_RGB2GRAY);
		//	cv::Mat mat_gray_rightImage(gray_img_right, true);
		//	cvReleaseImage(&gray_img_right);
		//	vector<Point2f> LK_left_points;
		//	vector<Point2f> LK_right_points;

		//	LK_right_points.resize(key_points_left->total);

		//	for( int i = 0; i < key_points_left->total; i++)
		//	{
		//		leftPoint=(CvSURFPoint*)cvGetSeqElem(key_points_left, i);
		//		LK_left_points.push_back(Point2f( leftPoint->pt.x, leftPoint->pt.y));
		//	}
		//	
		//	vector<uchar> status;
  //          vector<float> err;

		//	cv::calcOpticalFlowPyrLK(
		//		mat_gray_leftImage,
		//		mat_gray_rightImage, 
		//		LK_left_points,
		//		LK_right_points, 
		//		status,
		//		err);
		//	int width_part=merged_images->width>>1;
		//	
		//	float minErr = err[0];

		//	for(int k = 0; k < err.size(); k++)
		//	{
		//		if(status[k] && err[k] < minErr) 
		//		{
		//			minErr = err[k];
		//		}
		//	}

		//	cout<< "Lucass Kanade min error: " << minErr<< endl;

		//	int i = 0;
		//	merged_images_copy=cvCreateImage(cvSize(merged_images->width,merged_images->height),merged_images->depth,3);
		//	cvCopy(merged_images,merged_images_copy);
		//	for(; i < LK_left_points.size(); ++i)
		//	{
		//		if(err[i] < 5 * minErr && status[i])
		//		{
		//			cvLine(merged_images_copy,cvPoint(LK_left_points[i].x,LK_left_points[i].y),cvPoint(LK_right_points[i].x+width_part,LK_right_points[i].y),
		//					CV_RGB(100 + (( i *3) % 155), 100+ ((i*7)%155), 100+ ((i*13)%155)));
		//		}
		//	}

		//	cvShowImage("twoSnapshots",merged_images_copy);
		//		
		//	while(true)
		//	{

		//		char ch2=cvWaitKey(33);
		//		if(ch2==27)
		//			break;
		//		
		//	}
		//	
		//	cvReleaseImage(&merged_images_copy);

		//	status.clear();
		//	err.clear();
		//	LK_left_points.clear();
		//	LK_right_points.clear();
		//	mat_gray_leftImage.release();
		//	mat_gray_rightImage.release();
		//}

		if( ch == 'F' || ch == 'f')
		{
			findFundamentalMatrixAndCorrespondingPointsForReconstruction(
				left_to_right_corresponding_points,
				right_to_left_corresponding_points,
				fundamentalMatrix,
				key_points_left,
				key_points_right,
				descriptors_left,
				descriptors_right,
				left_img,
				right_img,
				gray_img_left,
				gray_img_right,
				forReconstructionLeftPoints,
				forReconstructionRightPoints,
				min_hessian_value);


		}
		if( ch == 'P' || ch == 'p')
		{
			cv::Mat mat_left_img(left_img, true);
			cv::Mat mat_right_img(right_img, true);
			mainLevenbergMarkvardt_LMFIT(startValueOfFocus, "currentPLYExportFile", left_points, right_points, 
				mat_left_img, mat_right_img,
				forReconstructionLeftPoints, forReconstructionRightPoints);
			mat_left_img.release();
			mat_right_img.release();
		}
		if(merged_images!=0)
		{
			cvShowImage("twoSnapshots",merged_images);
		}
		
	}
Ejemplo n.º 17
0
void CBIR::buildDescriptorsMatrix(QString path){
    QString rootG = "../img/keepcon2-g/";
    //QString root = "../img/keepcon2/";
    QString root = path;
    QStringList images;
    QStringList fileDirs;

    //Carga todas las imágenes.
    QDir dir(root);
    dir.setFilter(QDir::Files | QDir::NoDotAndDotDot);
    dir.setSorting(QDir::Name);
    images << dir.entryList();

    //Carga las imágenes de los grupos indicados.
    /*
    fileDirs << "auto" << "converse" << "doki";
    for(int i=0; i<fileDirs.count(); i++){
        QDir dir(rootG + fileDirs[i] + "/");
        dir.setFilter(QDir::Files | QDir::NoDotAndDotDot);
        dir.setSorting(QDir::Name | QDir::Reversed);
        images << dir.entryList();
    }
    */

    //Carga de imágenes específicas.
    //images << "auto.jpg" << "auto-f.jpg" << "converse.jpg";
    cout << "Cantidad de imagenes: " << images.count() << endl;

    //int length = (int)(imgDescriptors->elem_size/sizeof(float));
    //descriptorsMat = cv::Mat(1106, DESCRIPTOR_DIMS, CV_32F);
    //float* img_ptr = descriptorsMat.ptr<float>(0);
    //CvSeqReader img_reader;

    CvSeq* totalDescriptors = 0;
    CvSeq* nextSeq = 0;

    IplImage* img;
    CvSeq *imgKeypoints, *imgDescriptors;
    CvSURFParams params = cvSURFParams(500, 1);
    //Calcula features para cada imagen
    for(int i=0; i<images.count(); i++){
        img = NULL;
        imgKeypoints = 0;
        imgDescriptors = 0;
        qDebug() << "Intento de carga de la imagen" << images[i] << ".";
        img = Utils::loadImage((root + images[i]).toAscii().data(), true);
        if(img != NULL){
            CvMemStorage* storage = cvCreateMemStorage(0);
            cvExtractSURF(img, 0, &imgKeypoints, &imgDescriptors, storage, params);
            qDebug() << "Imagen" << images[i] << "cargada con exito. Features:" << imgKeypoints->total;
            featuresCount.append(QPair<QString, int>(images[i], imgDescriptors->total));

            //Linkeo la nueva secuencia de descriptores a la lista.
            if(totalDescriptors == 0){
                totalDescriptors = imgDescriptors;
                nextSeq = totalDescriptors;
            }else{
                nextSeq->h_next = imgDescriptors;
                nextSeq = nextSeq->h_next;                
            }
            //Copiar los descriptores a la matriz de features
            /*
            cvStartReadSeq(imgDescriptors, &img_reader);
            for(int j=0; j<imgDescriptors->total; j++){ // j<1 para cargar un solo descriptor por cada imagen (prueba).
                const float* descriptor = (const float*)img_reader.ptr;
                CV_NEXT_SEQ_ELEM(img_reader.seq->elem_size, img_reader);
                memcpy(img_ptr, descriptor, DESCRIPTOR_DIMS*sizeof(float));
                img_ptr += DESCRIPTOR_DIMS;
            }
            */
            //cvReleaseMemStorage(&storage);
            cvReleaseImage(&img);
        }
    }    
    int descriptorsCount = 0;
    int sequencesCount = 0;
    CvSeq* iterateSeq = totalDescriptors;

    while(iterateSeq != nextSeq){
        descriptorsCount += iterateSeq->total;
        iterateSeq = iterateSeq->h_next;
        sequencesCount++;
    }
    if(iterateSeq != 0){
        descriptorsCount += iterateSeq->total;
        sequencesCount++;
    }
    qDebug() << "Total de secuencias:" << sequencesCount;
    qDebug() << "Total de descriptores:" << descriptorsCount;

    //Creo la matriz de descriptores ahora que se conoce la cantidad.
    descriptorsMat = cv::Mat(descriptorsCount, DESCRIPTOR_DIMS, CV_32F);
    float* img_ptr = descriptorsMat.ptr<float>(0);
    CvSeqReader img_reader;

    //Copia los descriptores de la lista de secuencias a la matriz.
    iterateSeq = totalDescriptors;
    CvSeq* deallocateSeq = 0;
    for(int i=0; i<sequencesCount; i++){
        deallocateSeq = iterateSeq;
        cvStartReadSeq(iterateSeq, &img_reader);
        int j;
        for(j=0; j<iterateSeq->total; j++){
            const float* descriptor = (const float*)img_reader.ptr;
            CV_NEXT_SEQ_ELEM(img_reader.seq->elem_size, img_reader);
            memcpy(img_ptr, descriptor, DESCRIPTOR_DIMS*sizeof(float));
            img_ptr += DESCRIPTOR_DIMS;
        }
        iterateSeq = iterateSeq->h_next;
        //Probar que efectivamente se libera la memoria.
        cvReleaseMemStorage(&deallocateSeq->storage);
    }
}
Ejemplo n.º 18
0
void Features::getFeatures(IplImage* img, CvSeq* &keypoints, CvSeq* &descriptors){
    keypoints=0; descriptors=0;
    CvMemStorage* storage = cvCreateMemStorage(0);
    CvSURFParams params = cvSURFParams(500, 1);
    cvExtractSURF(img, 0, &keypoints, &descriptors, storage, params);    
}
Ejemplo n.º 19
0
bool Classifier::extract(TTrainingFileList& fileList)
{
    cout << "Classes:" << endl;
    for (int i = 0; i < (int)fileList.classes.size(); i++) {
        cout << fileList.classes[i] << " (";
        int count = 0;
        for (int j = 0; j < (int)fileList.files.size(); j++) {
            if (fileList.files[j].label == fileList.classes[i]) {
                count += 1;
            }
        }
        cout << count << " samples)" << endl;
    }
    cout << endl;

    IplImage *image;
    
    int maxImages = INT_MAX;

    cout << "Processing images..." << endl;

    //vector<int> numIpoints(kNumObjectTypes);
    int minfiles = min(maxImages, (int)fileList.files.size());
    for (int i = 0; i < minfiles; i++) 
    {
        // skip too many others...
        if (fileList.files[i].label == "other" && --max_others < 0) continue;

        // show progress
        if (i % 10 == 0) showProgress(i, minfiles);

        image = cvLoadImage(fileList.files[i].filename.c_str(), 0);
        
        if (image == NULL) {
            cerr << "ERROR: could not load image " << fileList.files[i].filename.c_str() << endl;
            continue;
        }
        
        string label = fileList.files[i].label;

        CvSeq *keypoints = 0, *descriptors = 0;
        CvSURFParams params = cvSURFParams(100, SURF_SIZE == 128);
        cvExtractSURF(image, 0, &keypoints, &descriptors, storage, params);
        
        if (descriptors->total == 0) continue;
        
        vector<float> desc;
        desc.resize(descriptors->total * descriptors->elem_size/sizeof(float));
        cvCvtSeqToArray(descriptors, &desc[0]);
        
        vector<float *> features;
        int where = 0;
        for (int pt = 0; pt < keypoints->total; ++pt) {
            float *f = new float[SURF_SIZE];
            for (int j = 0; j < SURF_SIZE; ++j) {
                f[j] = desc[where];
                ++where;
            }
            features.push_back(f);
        }
        
        if (i % test_to_train == 1 || !test_on) 
            set_train.push(stringToClassInt(label), features);
        else set_test.push(stringToClassInt(label), features);
    }
    
    
    cout << endl << "Extracted surf features. " << endl;

    cout << "Train Set" << endl;
    set_train.recount();
    set_train.stats();
    
    cout << "Test Set" << endl;
    set_train.recount();
    set_test.stats();
    
    return true;
}
Ejemplo n.º 20
0
static void computeVectors( IplImage* img, IplImage* dst, short wROI, short hROI){
	if(DEBUG){
		std::cout << "-- VECTOR COMPUTING" << std::endl;		
	}
	double timestamp = (double)clock()/CLOCKS_PER_SEC; // get current time in seconds
	CvSize size = cvSize(img->width,img->height); // get current frame size 640x480
	int i, idx1 = last, idx2;
	CvSeq* seq;
	CvRect comp_rect;
	CvRect roi;
	double count;
	double angle;
	CvPoint center;
	double magnitude;
	CvScalar color;

	//--SURF CORNERS--
	if(DEBUG){
		std::cout << "--- SURF CORNERS" << std::endl;
	}
        color = CV_RGB(0,255,0);
        CvMemStorage* storage2 = cvCreateMemStorage(0);
        CvSURFParams params = cvSURFParams(SURF_THRESHOLD, 1);
        CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
        cvExtractSURF( dst, 0, &imageKeypoints, &imageDescriptors, storage2, params );
        if(DEBUG){
			printf("Image Descriptors: %d\n", imageDescriptors->total);
		}
        for( int j = 0; j < imageKeypoints->total; j++ ){
            CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, j );
			center.x = cvRound(r->pt.x);
            center.y = cvRound(r->pt.y);
			if(DEBUG){
				printf("j: %d \t", j);               
				printf("total: %d \t", imageKeypoints->total);               
				printf("valor hessiano: %f \t", r->hessian);
				printf("x: %d \t", center.x);
				printf("y: %d \n", center.y);
			}
			// Agrego el Punto en donde es la region que nos interesa
			cvCircle( dst, center, cvRound(r->hessian*0.02), color, 3, CV_AA, 0 );
			// Lleno la matriz con los vectores
			relevancePointToVector(center.x, center.y, wROI, hROI, 5);
		}
	//--SURF CORNERS


	// calculate motion gradient orientation and valid orientation mask
	cvCalcMotionGradient( mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3 );
	
	// Compute Motion on 4x4 Cuadrants
	if(DEBUG){
		std::cout << "--- MOTION CUADRANTS" << std::endl;
	}
	i	 = 25;
	color = CV_RGB(255,0,0);
	magnitude = 30;
	for (int r = 0; r < size.height; r += hROI){
		for (int c = 0; c < size.width; c += wROI){
			comp_rect.x = c;
			comp_rect.y = r;
			comp_rect.width = (c + wROI > size.width) ? (size.width - c) : wROI;
			comp_rect.height = (r + hROI > size.height) ? (size.height - r) : hROI;

			cvSetImageROI( mhi, comp_rect );
			cvSetImageROI( orient, comp_rect );
			cvSetImageROI( mask, comp_rect );
			cvSetImageROI( silh, comp_rect );
			cvSetImageROI( img, comp_rect );

			// Process Motion
			angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
			angle = 360.0 - angle;  // adjust for images with top-left origin
			count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
			roi = cvGetImageROI(mhi);
			center = cvPoint( (comp_rect.x + comp_rect.width/2),
					  (comp_rect.y + comp_rect.height/2) );
			cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
			cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
			cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );	
			
			if(DEBUG){
				std::cout << "Motion " << i << " -> x: " << roi.x << " y: " << roi.y << " count: " << count << " angle: " << angle << std::endl; // print the roi
			}
			cvResetImageROI( mhi );
			cvResetImageROI( orient );
			cvResetImageROI( mask );
			cvResetImageROI( silh );
			cvResetImageROI(img);
			relevanceDirectionToVector(i, angle);
			++i;
		}
	}

	// Compute Global Motion
	if(DEBUG){
		std::cout << "--- MOTION GLOBAL" << std::endl;
	}
	comp_rect = cvRect( 0, 0, size.width, size.height );
	color = CV_RGB(255,255,255);
	magnitude = 100;
	angle = cvCalcGlobalOrientation( orient, mask, mhi, timestamp, MHI_DURATION);
	angle = 360.0 - angle;  // adjust for images with top-left origin
	count = cvNorm( silh, 0, CV_L1, 0 ); // calculate number of points within silhouette ROI
	roi = cvGetImageROI(mhi);
	center = cvPoint( (comp_rect.x + comp_rect.width/2),
			  (comp_rect.y + comp_rect.height/2) );
	cvCircle( dst, center, cvRound(magnitude*1.2), color, 3, CV_AA, 0 );
	cvLine( dst, center, cvPoint( cvRound( center.x + magnitude*cos(angle*CV_PI/180)),
	cvRound( center.y - magnitude*sin(angle*CV_PI/180))), color, 3, CV_AA, 0 );
	if(DEBUG){
		std::cout << "Motion Main-> x: " << roi.x << " y: " << roi.y << " count: " << count << std::endl; // print the roi
	}
	relevanceDirectionToVector(50, angle);
}
Ejemplo n.º 21
0
bool Classifier::run(const IplImage *frame, CObjectList *objects, bool scored)
{
    double xDiff = 0, yDiff = 0;
    optical_flow(frame, &xDiff, &yDiff);
    
    totalXDiff += xDiff;
    totalYDiff += yDiff;
    if (!scored)
        return true;
    
    cout << "--------------------------------------" << endl;
    cout << "\t\tRun" << endl;
    
    assert((frame != NULL) && (objects != NULL));
    
    printf("Let's go!\n");
    
    for (int i = 0; i < (int)prevObjects.size(); ++i) {
        if (prevObjects[i].rect.x > -20 && prevObjects[i].rect.x < frame->width 
         && prevObjects[i].rect.y > -20 && prevObjects[i].rect.y < frame->height) {
            objects->push_back(prevObjects[i]);
            cout << prevObjects[i].label << " is now at (" << prevObjects[i].rect.x << ", " << prevObjects[i].rect.y << ")" << endl;
        }
    }
    
    //printf("HEY OPTICAL FLOW!!!! %f %f\n", totalXDiff, totalYDiff);
    
    // move old objects
    for (int i = 0; i < (int)objects->size(); ++i) {
        (*objects)[i].rect.x -= totalXDiff * 3;
        (*objects)[i].rect.y -= totalYDiff * 3;
    }
    
    cout << "Flow: " << totalXDiff << " " << totalYDiff << endl;
    totalYDiff = 0;
    totalXDiff = 0;
    
    
    // Convert to grayscale.
    IplImage *gray  = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
    cvCvtColor(frame, gray, CV_BGR2GRAY);
    
    // Resize by half first, as per the handout.
    double scale = 2.0;
    IplImage *dst = cvCreateImage(cvSize(gray->width  / scale, gray->height  / scale), gray->depth,  gray->nChannels);
    cvResize(gray, dst);

    printf("About to do SURF\n");
    CvSeq *keypoints = 0, *descriptors = 0;
    CvSURFParams params = cvSURFParams(100, SURF_SIZE == 128);
    cvExtractSURF(dst, 0, &keypoints, &descriptors, storage, params);
    
    cout << "desc: " << descriptors->total << endl;
    if (descriptors->total == 0) return false;
    
    vector<float> desc;
    desc.resize(descriptors->total * descriptors->elem_size/sizeof(float));
    cvCvtSeqToArray(descriptors, &desc[0]);
    
    vector<CvSURFPoint> keypts;
    keypts.resize(keypoints->total);
    cvCvtSeqToArray(keypoints, &keypts[0]);
    
    vector<float *> features;
    int where = 0;
    for (int pt = 0; pt < keypoints->total; ++pt) {
        float *f = new float[SURF_SIZE];
        for (int j = 0; j < SURF_SIZE; ++j) {
            f[j] = desc[where];
            ++where;
        }
        features.push_back(f);
    }
    printf("Done SURF\n");

    printf("Clustering...\n");
    vector<int> cluster(features.size());
    for (int i = 0; i < (int)features.size(); ++i) {
        cluster[i] = best_cluster(centers, features[i]);
    }
    printf("Done clustering...\n");
    
    vector<FoundObject> newObjects;
    run_boxscan(dst, cluster, keypts, features, newObjects, objects);
    for (int i = 0; i < (int)newObjects.size(); ++i) {
        if (newObjects[i].object.rect.x > -20 && newObjects[i].object.rect.x < frame->width 
         && newObjects[i].object.rect.y > -20 && newObjects[i].object.rect.y < frame->height) {
            objects->push_back(newObjects[i].object);
            cout << "Found object: " << newObjects[i].object.label << " at (" ;
            cout << newObjects[i].object.rect.x << ", " << newObjects[i].object.rect.y << ")" << endl;
        }
    }
    
    prevObjects = *objects;
    
    cvReleaseImage(&gray);
  
    return true;
}