Exemplo n.º 1
0
int main(int argc, char* argv[])
{
	// Set up variables
	CvPoint2D32f srcTri[3], dstTri[3];
	CvMat* rot_mat = cvCreateMat(2,3,CV_32FC1);
	CvMat* warp_mat = cvCreateMat(2,3,CV_32FC1);
	IplImage *src, *dst;
	const char* name = "Affine_Transform";

	// Load image
	src=cvLoadImage("airplane.jpg");
	dst = cvCloneImage( src );
	dst->origin = src->origin;
	cvZero( dst );
	cvNamedWindow( name, 1 );

	// Create angle and scale
	double angle = 0.0;
	double scale = 1.0;

	// Create trackbars
	cvCreateTrackbar( "Angle", name, &angle_switch_value, 4, switch_callback_a );
	cvCreateTrackbar( "Scale", name, &scale_switch_value, 4, switch_callback_s );

	// Compute warp matrix
	srcTri[0].x = 0;
	srcTri[0].y = 0;
	srcTri[1].x = src->width - 1;
	srcTri[1].y = 0;
	srcTri[2].x = 0;
	srcTri[2].y = src->height - 1;

	dstTri[0].x = src->width*0.0;
	dstTri[0].y = src->height*0.25;
	dstTri[1].x = src->width*0.90;
	dstTri[1].y = src->height*0.15;
	dstTri[2].x = src->width*0.10;
	dstTri[2].y = src->height*0.75;

	cvGetAffineTransform( srcTri, dstTri, warp_mat );
	cvWarpAffine( src, dst, warp_mat );
	cvCopy ( dst, src );

	while( 1 ) {
		switch( angleInt ){
			case 0:
				angle = 0.0;
				break;
			case 1:
				angle = 20.0;
				break;
			case 2:
				angle = 40.0;
				break;
			case 3:
				angle = 60.0;
				break;
			case 4:
				angle = 90.0;
				break;
		}
		switch( scaleInt ){
			case 0:
				scale = 1.0;
				break;
			case 1:
				scale = 0.8;
				break;
			case 2:
				scale = 0.6;
				break;
			case 3:
				scale = 0.4;
				break;
			case 4:
				scale = 0.2;
				break;
		}

		// Compute rotation matrix
		CvPoint2D32f center = cvPoint2D32f( src->width/2, src->height/2 );
		cv2DRotationMatrix( center, angle, scale, rot_mat );

		// Do the transformation
		cvWarpAffine( src, dst, rot_mat );

		cvShowImage( name, dst );

		if( cvWaitKey( 15 ) == 27 )
			break;
	}

	cvReleaseImage( &dst );
	cvReleaseMat( &rot_mat );
	cvReleaseMat( &warp_mat );

	return 0;
}
Exemplo n.º 2
0
int main(int argc, char** argv)
{
    // use an ArgumentParser object to manage the program arguments.
    osg::ArgumentParser arguments(&argc,argv);

    // set up the usage document, in case we need to print out how to use this program.
    arguments.getApplicationUsage()->setApplicationName(arguments.getApplicationName());
    arguments.getApplicationUsage()->setDescription(arguments.getApplicationName()+" example demonstrates the use of ImageStream for rendering movies as textures.");
    arguments.getApplicationUsage()->setCommandLineUsage(arguments.getApplicationName()+" [options] filename ...");
    arguments.getApplicationUsage()->addCommandLineOption("-h or --help","Display this information");
    arguments.getApplicationUsage()->addCommandLineOption("--texture2D","Use Texture2D rather than TextureRectangle.");
    arguments.getApplicationUsage()->addCommandLineOption("--shader","Use shaders to post process the video.");
    arguments.getApplicationUsage()->addCommandLineOption("--interactive","Use camera manipulator to allow movement around movie.");
    arguments.getApplicationUsage()->addCommandLineOption("--flip","Flip the movie so top becomes bottom.");


    // construct the viewer.
    osgViewer::Viewer viewer(arguments);

    if (arguments.argc()<1)
    {
        arguments.getApplicationUsage()->write(std::cout,osg::ApplicationUsage::COMMAND_LINE_OPTION);
        return 1;
    }

    osg::ref_ptr<osg::Group> root = new osg::Group;



    /*    osg::Light* light = new osg::Light();
    light->setPosition(osg::Vec4d(-500.0, 1000.0, 500.0, 1.0));
    light->setDirection(osg::Vec3d(5.0, -10.0, -5.0));
    light->setSpotCutoff(70);
    light->setAmbient(osg::Vec4d(0.05, 0.05, 0.05, 1.0));
    light->setDiffuse(osg::Vec4d(0.5, 0.5, 0.5, 1.0));
    //light->setQuadraticAttenuation(0.001);

    osg::LightSource* lightSource = new osg::LightSource();
    lightSource->setLight(light);

    //osg::Light * attachedlight = lightSource->getLight();

    //attache light to root group
    root->addChild(lightSource);

	//activate light
	osg::StateSet* stateSet = root->getOrCreateStateSet();
    lightSource->setStateSetModes(*stateSet, osg::StateAttribute::ON);

    osg::StateSet* stateset = root->getOrCreateStateSet();
    stateset->setMode(GL_LIGHTING,osg::StateAttribute::ON);
*/
    osg::ref_ptr<osg::Geode> geode = new osg::Geode;



    //OpenCV-AR
    CvCapture* cameraCapture;
    CvCapture* fileCapture;
    //cameraCapture = cvCreateCameraCapture(0);

    fileCapture = cvCreateFileCapture("video/whal.avi");
    cameraCapture = fileCapture;

    if(!cameraCapture) {
		fprintf(stderr,"OpenCV: Create camera capture failed\n");
		return 1;
	}

    //printf("%f\n", cvGetCaptureProperty(cameraCapture, CV_CAP_PROP_FPS));

    //cvSetCaptureProperty(cameraCapture, CV_CAP_PROP_FRAME_WIDTH, 1280);
    //cvSetCaptureProperty(cameraCapture, CV_CAP_PROP_FRAME_HEIGHT, 960);
    //cvSetCaptureProperty(cameraCapture, CV_CAP_PROP_FPS, 15);

	IplImage* frame = cvQueryFrame(cameraCapture);
	IplImage* flipFrame = cvCreateImage(cvGetSize(frame), frame->depth, frame->nChannels);



    //osg::Image* image = osgDB::readImageFile("aclib-large.png");
    osg::Image* image = new osg::Image();
    //image->setPixelBufferObject( new osg::PixelBufferObject(image));

    image->setDataVariance( osg::Object::DYNAMIC );
    iplImageToOsgImage(flipFrame, image);

    //load model
    osg::ref_ptr<osg::PositionAttitudeTransform> modelPat = new osg::PositionAttitudeTransform();

    //osg::ref_ptr<osg::Node> loadedModel = osgDB::readNodeFile("models/Cars/AstonMartin-DB9.3ds");
    osg::ref_ptr<osg::Node> loadedModel = osgDB::readNodeFile("models/ferrari_car_2.osg");
    modelPat->addChild(loadedModel);
    modelPat->setScale(osg::Vec3(0.5, 0.5, 0.5));
    modelPat->setAttitude(osg::Quat(3.14 / 2, osg::Vec3d(-1.0, 0.0, 0.0)));

    if (!loadedModel) {
    	std::cout << "No model data loaded" << std::endl;
    	return 1;
    }



    //C_BODY

    std::vector<osg::MatrixTransform*> topMtList = getMatrixTransformListByName("C_TOP", loadedModel);
    std::vector<osg::MatrixTransform*> leftDoorMtList = getMatrixTransformListByName("C_LDOOR", loadedModel);
    std::vector<osg::MatrixTransform*> rightDoorMtList = getMatrixTransformListByName("C_RDOOR", loadedModel);
    std::vector<osg::MatrixTransform*> leftWheelsMtList = getMatrixTransformListByName("C_LWS", loadedModel);
    std::vector<osg::MatrixTransform*> rightWheelsMtList = getMatrixTransformListByName("C_RWS", loadedModel);
    std::vector<osg::MatrixTransform*> forwardBumperMtList = getMatrixTransformListByName("C_BUMP_F", loadedModel);
    std::vector<osg::MatrixTransform*> backBumperMtList = getMatrixTransformListByName("C_BUMP_B", loadedModel);
    std::vector<osg::MatrixTransform*> engineMtList = getMatrixTransformListByName("C_ENGINE", loadedModel);
    std::vector<osg::MatrixTransform*> bodyMtList = getMatrixTransformListByName("C_BODY", loadedModel);
    std::vector<osg::MatrixTransform*> salonMtList = getMatrixTransformListByName("C_SALON", loadedModel);

    /*    //findNodeVisitor findNode("C_BODY");
    FindNamedNode findNode("C_BODY");
    loadedModel->accept(findNode);

    std::vector<osg::Node*> foundNodeList = findNode.getNodeList();
    int listCount = foundNodeList.size();
    printf("%d\n", listCount);
    std::vector<osg::MatrixTransform*> bodyMtList;

    //vector<int>::const_iterator i;
    for(int i = 0; i < listCount; i++) {
        bodyMtList.push_back(new osg::MatrixTransform());
        //obj4Mt->setName("obj4Mt");

        osg::Group* foundNodeParent = foundNodeList[i]->getParent(0);

        bodyMtList[i]->addChild(foundNodeList[i]);

        foundNodeParent->addChild(bodyMtList[i]);

        foundNodeParent->removeChild(foundNodeList[i]);
    }
*/
    osg::Matrix translateMatrix;


    //osg::Node* foundNode = NULL;
    //foundNode = findNamedNode("obj5", loadedModel);

    //osg::ref_ptr<osg::MatrixTransform> obj5Mt = new osg::MatrixTransform();
    //obj4Mt->setName("obj5Mt");

    //osg::Group* foundNodeParent = foundNode->getParent(0);

    //obj5Mt->addChild(foundNode);

    //foundNodeParent->addChild(obj5Mt);

    //foundNodeParent->removeChild(foundNode);


    osg::Matrix rotateMatrix;
    float theta(M_PI * 0.1f);
    osg::Vec3f axis (1.0, 1.0, 0.1);
    osg::Quat wheelAxis( theta, axis);



    osg::BoundingSphere modelBoundingSphere = modelPat->getBound();
    printf("%f\n", modelBoundingSphere.radius());
    modelBoundingSphere.radius() *= 1.5f;

    osg::BoundingBox modelBoundingBox;
    modelBoundingBox.expandBy(modelBoundingSphere);



    //Light group

    //create light
    root->addChild(createLights(modelBoundingBox, root->getOrCreateStateSet()));


    //collect scene

    // only clear the depth buffer
    viewer.getCamera()->setClearMask(GL_DEPTH_BUFFER_BIT);

    // create a HUD as slave camera attached to the master view.

    viewer.setUpViewAcrossAllScreens();

    osgViewer::Viewer::Windows windows;
    viewer.getWindows(windows);

    if (windows.empty()) return 1;

    osg::Camera* screenCamera = createScreen(image);

    // set up cameras to rendering on the first window available.
    screenCamera->setGraphicsContext(windows[0]);
    screenCamera->setViewport(0,0,windows[0]->getTraits()->width, windows[0]->getTraits()->height);
    //screenCamera->setViewport(0, 0, 6.4, 4.8);


    viewer.addSlave(screenCamera, false);


    //root->addChild( geode.get());
    //root->addChild( createPyramid());
    //root->addChild( createScreen());//100.0, 100.0, image));
    root->addChild(modelPat);
    //root->addChild(objectPat);

    // set the scene to render
    viewer.setSceneData(root.get());



        viewer.realize();
        
        viewer.getCamera()->setClearColor(osg::Vec4(0.0f,0.0f,0.0f,1.0f));
/*
        //viewer.getCamera()->setProjameraCaptureectionMatrixAsOrtho(topleft.x(),bottomright.x(),topleft.y(),bottomright.y(), -10, 10);
        //viewer.getCamera()->setProjectionMatrixAsPerspective(60.0, screenAspectRatio, 100.0, -1.0);
*/
        viewer.getCamera()->setViewMatrixAsLookAt(osg::Vec3d(100.0, 100.0, 100.0), osg::Vec3d(0.0, 0.0, 0.0), osg::Vec3d(0.0, 1.0, 0.0));






        //Define vector of OpenCV-AR template
        vector<CvarTemplate> openCvArTemplateList;


        //load template
        CvarTemplate openCvArTemplate1;
        cvarLoadTemplateTag(&openCvArTemplate1,"aclib.png");
        //cvarLoadTemplateTag(&openCvArTemplate1,"markers/431.jpg");
        openCvArTemplateList.push_back(openCvArTemplate1);

        CvarTemplate openCvArTemplate2;
        cvarLoadTemplate(&openCvArTemplate2,"aclib.png",1);
        //cvarLoadTemplate(&openCvArTemplate2,"markers/431.jpg", 1);
        openCvArTemplateList.push_back(openCvArTemplate2);

        //Define OpenCV-AR marker;
        vector<CvarMarker> openCvArMarker;

        //Create OpenCV-AR camera
        CvarCamera openCvArCamera;
        //IplImage* frame = osgImage2IplImage(image);

        //cvarReadCamera("camera.yml", &openCvArCamera);
        cvarReadCamera(NULL, &openCvArCamera);
        cvarCameraScale(&openCvArCamera,frame->width,frame->height);
        viewer.getCamera()->setProjectionMatrix(osg::Matrixd(openCvArCamera.projection));


        //CvarOpticalFlow *flow;

       // srand(time(NULL));

        //int thresh = 60;
        double matchThresh = 0.7;
        //int state = 0;


        int counter = 0;
        while(!viewer.done())
        {
        	counter++;

        	char c = 0;//cvWaitKey(33);
        	//printf("%d\n", c);
        	if (c == 27) { // нажата ESC
        		printf("esc\n");
        		break;
        	}
        	if (c == 107) { // matchThresh up
        	  	matchThresh = matchThresh + 0.01;
        	}
        	if (c == 106) { // matchThresh down
        		matchThresh = matchThresh - 0.01;
        	}


        	if ((counter >= 300) and (counter < 310)) { // matchThresh down
        		//Top
        		translateMatrixTransformList(topMtList, 0.0, -1.2, 0.0);

        		//Engine
        		translateMatrixTransformList(engineMtList, 0.0, -1.0, 0.0);

        		//Body
        		translateMatrixTransformList(bodyMtList, 0.0, -0.8, 0.0);

        		//Salon
        		translateMatrixTransformList(salonMtList, 0.0, -0.4, 0.0);


        		//leftWeels
        		translateMatrixTransformList(leftWheelsMtList, -0.5, 0.0, 0.0);

        		//rightWeels
        		translateMatrixTransformList(rightWheelsMtList, 0.5, 0.0, 0.0);

        		//Left doors
        		translateMatrixTransformList(leftDoorMtList, -0.5, 0.0, 0.0);

        		//Right doors
           		translateMatrixTransformList(rightDoorMtList, 0.5, 0.0, 0.0);


           		//Forward bumper
        		translateMatrixTransformList(forwardBumperMtList, 0.0, 0.0, 0.5);

        		//back bumper
        		translateMatrixTransformList(backBumperMtList, 0.0, 0.0, -0.5);


        	}


        	//rotateMatrix.makeRotate(rotateMatrix.getRotate() * wheelAxis);
        	//obj5Mt->setMatrix(rotateMatrix);

        	//thresh = rand() % 256;
        	//printf("Match thresh value: %f\n", matchThresh);
        	frame = cvQueryFrame(cameraCapture);

        	cvCopy(frame, flipFrame);

        	cvFlip(flipFrame, flipFrame);
        	//cvNamedWindow("Original", 1);
        	//cvShowImage("Original", frame);
        	iplImageToOsgImage(frame, image);

        	image->dirty();

        	//osg::Image* = osg::Image(*image);


        	//frame = osgImage2IplImage(image);
        	//AR detection
        	//GLdouble modelview[16] = {0};

        	//Detect marker
        	int arDetect = cvarArMultRegistration(flipFrame,&openCvArMarker,openCvArTemplateList,&openCvArCamera, 60, 0.91);
			//printf("Marker found: %d\n",  arDetect);

			viewer.getCamera()->setViewMatrixAsLookAt(osg::Vec3d(0.0, 0.0, 100.0), osg::Vec3d(0.0, 0.0, 1000.0), osg::Vec3d(0.0, 1.0, 0.0));

			for(int i=0;i<arDetect;i++) {
				//if(openCvArMarker[i].tpl == 0);
				osg::Matrixf osgModelViewMatrix;
				for (int column = 0; column < 4; column++)	{
					for (int row = 0; row < 4; row++)	{
						osgModelViewMatrix(column, row) = openCvArMarker[i].modelview[column * 4 + row];
					}
				}

				viewer.getCamera()->setViewMatrix(osgModelViewMatrix);
			}

			viewer.frame();
        }
        return 0;

}
Exemplo n.º 3
0
/*
// Getting feature pyramid  
//
// API
// int getFeaturePyramid(IplImage * image, const filterObject **all_F, 
                      const int n_f,
                      const int lambda, const int k, 
                      const int startX, const int startY, 
                      const int W, const int H, featurePyramid **maps);
// INPUT
// image             - image
// lambda            - resize scale
// k                 - size of cells
// startX            - X coordinate of the image rectangle to search
// startY            - Y coordinate of the image rectangle to search
// W                 - width of the image rectangle to search
// H                 - height of the image rectangle to search
// OUTPUT
// maps              - feature maps for all levels
// RESULT
// Error status
*/
int getFeaturePyramid(IplImage * image,
                      const int lambda, const int k, 
                      const int startX, const int startY, 
                      const int W, const int H, CvLSVMFeaturePyramid **maps)
{
    IplImage *img2, *imgTmp, *imgResize;
    float   step, tmp;
    int      cntStep;
    int      maxcall;
    int i;
    int err;
    CvLSVMFeatureMap *map;
    
    //geting subimage
    cvSetImageROI(image, cvRect(startX, startY, W, H));
    img2 = cvCreateImage(cvGetSize(image), image->depth, image->nChannels);
    cvCopy(image, img2, NULL);
    cvResetImageROI(image);

    if(img2->depth != IPL_DEPTH_32F)
    {
        imgResize = cvCreateImage(cvSize(img2->width , img2->height) , IPL_DEPTH_32F , 3);
        cvConvert(img2, imgResize);
    }
    else
    {
        imgResize = img2;
    }
    
    step = powf(2.0f, 1.0f/ ((float)lambda));
    maxcall = W/k;
    if( maxcall > H/k )
    {
        maxcall = H/k;
    }
    cntStep = (int)(logf((float)maxcall/(5.0f))/logf(step)) + 1;
    //printf("Count step: %f %d\n", step, cntStep);

    allocFeaturePyramidObject(maps, lambda, cntStep + lambda);

    for(i = 0; i < lambda; i++)
    {
        tmp = 1.0f / powf(step, (float)i);
        imgTmp = resize_opencv (imgResize, tmp);
        //imgTmp = resize_article_dp(img2, tmp, 4);
        err = getFeatureMaps_dp(imgTmp, 4, &map);
        err = normalizationAndTruncationFeatureMaps(map, 0.2f);
        err = PCAFeatureMaps(map);
        (*maps)->pyramid[i] = map;
        //printf("%d, %d\n", map->sizeY, map->sizeX);
        cvReleaseImage(&imgTmp);
    }

    /**********************************one**************/
    for(i = 0; i <  cntStep; i++)
    {
        tmp = 1.0f / powf(step, (float)i);
        imgTmp = resize_opencv (imgResize, tmp);
        //imgTmp = resize_article_dp(imgResize, tmp, 8);
	    err = getFeatureMaps_dp(imgTmp, 8, &map);
        err = normalizationAndTruncationFeatureMaps(map, 0.2f);
        err = PCAFeatureMaps(map);
        (*maps)->pyramid[i + lambda] = map;
        //printf("%d, %d\n", map->sizeY, map->sizeX);
		cvReleaseImage(&imgTmp);
    }/*for(i = 0; i < cntStep; i++)*/

    if(img2->depth != IPL_DEPTH_32F)
    {
        cvReleaseImage(&imgResize);
    }

    cvReleaseImage(&img2);
    return LATENT_SVM_OK;
}
Exemplo n.º 4
0
public:void principal() {
        CvCapture* capture = 0;
        IplImage *frame, *frame_copy = 0;
        IplImage *frameAnterior;
        cascade = (CvHaarClassifierCascade*)cvLoad(cascadeFile, 0, 0, 0 );

        if( !cascade )
        {
            fprintf( stderr,"ERROR: Could not load classifier cascade\n" );
        }
        storageHaart = cvCreateMemStorage(0);

        capture = cvCaptureFromAVI(aviFile);


        cvNamedWindow( "Detecta", 1 );
        cvNamedWindow( "bluedetect", 1 );
        cvNamedWindow( "onlyhaart", 1 );

        if (capture) {
            for(int c=0; c<1; c++) {
                if( !cvGrabFrame( capture ))
                    break;
                frame = cvRetrieveFrame( capture );
                CvSize size = cvSize(frame->width,frame->height); // get current frame size
                if( !frame )
                    break;
                if( !frame_copy ) {
                    frame_copy = cvCreateImage(size,
                                                IPL_DEPTH_8U, frame->nChannels );
                    frameAnterior = cvCreateImage(size,
                                            IPL_DEPTH_8U, frame->nChannels );
                }
                if( frame->origin == IPL_ORIGIN_TL ) {
                    cvCopy( frame, frame_copy, 0 );
                } else {
                    cvFlip( frame, frame_copy, 0 );
                }


                if( cvWaitKey( 10 ) >= 0 ){
                    break;
                }

                if( !mhi) {
                    if( buf == 0 ) {
                        buf = (IplImage**)malloc(N*sizeof(buf[0]));
                        memset( buf, 0, N*sizeof(buf[0]));
                    }

                    for(int i = 0; i < N; i++ ) {
                        cvReleaseImage( &buf[i] );
                        buf[i] = cvCreateImage( size, IPL_DEPTH_8U, 1 );
                        cvZero( buf[i] );
                    }
                    cvReleaseImage( &mhi );
                    cvReleaseImage( &orient );
                    cvReleaseImage( &segmask );
                    cvReleaseImage( &mask );

                    mhi = cvCreateImage( size, IPL_DEPTH_32F, 1 );
                    cvZero( mhi ); // clear MHI at the beginning
                    orient = cvCreateImage( size, IPL_DEPTH_32F, 1 );
                    segmask = cvCreateImage( size, IPL_DEPTH_32F, 1 );
                    mask = cvCreateImage( size, IPL_DEPTH_8U, 1 );
                }
                detect_and_draw(frame_copy, frameAnterior);
            }

            cvReleaseImage( &frame_copy );
            cvReleaseImage( &frameAnterior );
            cvReleaseCapture( &capture );
        }
        cvDestroyWindow("Detecta");
        cvDestroyWindow("bluedetect");
        cvDestroyWindow("onlyhaart");
    }
Exemplo n.º 5
0
int main( int argc, char** argv )
{
	int frameNum = 0;
	TrackerInfo tracker;
	DescInfo hogInfo;
	DescInfo hofInfo;
	DescInfo mbhInfo;

	char* video = argv[1];
	arg_parse(argc, argv);
	Video capture(video);

//	std::cerr << "start_frame: " << start_frame << " end_frame: " << end_frame << " track_length: " << track_length << std::endl;
//	std::cerr << "min_distance: " << min_distance << " patch_size: " << patch_size << " nxy_cell: " << nxy_cell << " nt_cell: " << nt_cell << std::endl;

	InitTrackerInfo(&tracker, track_length, init_gap);
	InitDescInfo(&hogInfo, 8, 0, 1, patch_size, nxy_cell, nt_cell);
	InitDescInfo(&hofInfo, 9, 1, 1, patch_size, nxy_cell, nt_cell);
	InitDescInfo(&mbhInfo, 8, 0, 1, patch_size, nxy_cell, nt_cell);

        if( show_track == 1 ){
		cvNamedWindow( "DenseTrack", 0 );
                cvNamedWindow("Original", 0);
        }


	std::vector<std::list<Track> > xyScaleTracks;
	int init_counter = 0; // indicate when to detect new feature points
	while( true ) {
		IplImageWrapper frame = 0;
		int i, j, c;

		// get a new frame
		frame = capture.getFrame();
		frameNum = capture.getFrameIndex();
		if( !frame ) {
			printf("break");
			break;
		}
		if( frameNum >= start_frame && frameNum <= end_frame ) {
		if( !image ) {
			// initailize all the buffers
			image = IplImageWrapper( cvGetSize(frame), 8, 3 );
			image->origin = frame->origin;
			prev_image= IplImageWrapper( cvGetSize(frame), 8, 3 );
			prev_image->origin = frame->origin;
			grey = IplImageWrapper( cvGetSize(frame), 8, 1 );
			grey_pyramid = IplImagePyramid( cvGetSize(frame), 8, 1, scale_stride );
			prev_grey = IplImageWrapper( cvGetSize(frame), 8, 1 );
			prev_grey_pyramid = IplImagePyramid( cvGetSize(frame), 8, 1, scale_stride );
			eig_pyramid = IplImagePyramid( cvGetSize(frame), 32, 1, scale_stride );

			cvCopy( frame, image, 0 );
			cvCvtColor( image, grey, CV_BGR2GRAY );
			grey_pyramid.rebuild( grey );

			// how many scale we can have
			scale_num = std::min<std::size_t>(scale_num, grey_pyramid.numOfLevels());
			fscales = (float*)cvAlloc(scale_num*sizeof(float));
			xyScaleTracks.resize(scale_num);

			for( int ixyScale = 0; ixyScale < scale_num; ++ixyScale ) {
				std::list<Track>& tracks = xyScaleTracks[ixyScale];
				fscales[ixyScale] = pow(scale_stride, ixyScale);

				// find good features at each scale separately
				IplImage *grey_temp = 0, *eig_temp = 0;
				std::size_t temp_level = (std::size_t)ixyScale;
				grey_temp = cvCloneImage(grey_pyramid.getImage(temp_level));
				eig_temp = cvCloneImage(eig_pyramid.getImage(temp_level));
				std::vector<CvPoint2D32f> points(0);
				cvDenseSample(grey_temp, eig_temp, points, quality, min_distance);

				// save the feature points
				for( i = 0; i < points.size(); i++ ) {
					Track track(tracker.trackLength);
					PointDesc point(hogInfo, hofInfo, mbhInfo, points[i]);
					track.addPointDesc(point);
					tracks.push_back(track);
				}

				cvReleaseImage( &grey_temp );
				cvReleaseImage( &eig_temp );
			}
		}

		// build the image pyramid for the current frame
		cvCopy( frame, image, 0 );
		cvCvtColor( image, grey, CV_BGR2GRAY );
		grey_pyramid.rebuild(grey);

		if( frameNum > 0 ) {
		init_counter++;
		for( int ixyScale = 0; ixyScale < scale_num; ++ixyScale ) {
			// track feature points in each scale separately
			std::vector<CvPoint2D32f> points_in(0);
			std::list<Track>& tracks = xyScaleTracks[ixyScale];
			for (std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); ++iTrack) {
				CvPoint2D32f point = iTrack->pointDescs.back().point;
				points_in.push_back(point); // collect all the feature points
			}
			int count = points_in.size();
			IplImage *prev_grey_temp = 0, *grey_temp = 0;
			std::size_t temp_level = ixyScale;
			prev_grey_temp = cvCloneImage(prev_grey_pyramid.getImage(temp_level));
			grey_temp = cvCloneImage(grey_pyramid.getImage(temp_level));

			cv::Mat prev_grey_mat = cv::cvarrToMat(prev_grey_temp);
			cv::Mat grey_mat = cv::cvarrToMat(grey_temp);

			std::vector<int> status(count);
			std::vector<CvPoint2D32f> points_out(count);

			// compute the optical flow
			IplImage* flow = cvCreateImage(cvGetSize(grey_temp), IPL_DEPTH_32F, 2);
			cv::Mat flow_mat = cv::cvarrToMat(flow);
			cv::calcOpticalFlowFarneback( prev_grey_mat, grey_mat, flow_mat,
							sqrt(2)/2.0, 5, 10, 2, 7, 1.5, cv::OPTFLOW_FARNEBACK_GAUSSIAN );
			// track feature points by median filtering
			OpticalFlowTracker(flow, points_in, points_out, status);

			int width = grey_temp->width;
			int height = grey_temp->height;
			// compute the integral histograms
			DescMat* hogMat = InitDescMat(height, width, hogInfo.nBins);
			HogComp(prev_grey_temp, hogMat, hogInfo);

			DescMat* hofMat = InitDescMat(height, width, hofInfo.nBins);
			HofComp(flow, hofMat, hofInfo);

			DescMat* mbhMatX = InitDescMat(height, width, mbhInfo.nBins);
			DescMat* mbhMatY = InitDescMat(height, width, mbhInfo.nBins);
			MbhComp(flow, mbhMatX, mbhMatY, mbhInfo);

			i = 0;
			for (std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); ++i) {
			if( status[i] == 1 ) { // if the feature point is successfully tracked
				PointDesc& pointDesc = iTrack->pointDescs.back();
				CvPoint2D32f prev_point = points_in[i];
				// get the descriptors for the feature point
				CvScalar rect = getRect(prev_point, cvSize(width, height), hogInfo);
				pointDesc.hog = getDesc(hogMat, rect, hogInfo);
				pointDesc.hof = getDesc(hofMat, rect, hofInfo);
				pointDesc.mbhX = getDesc(mbhMatX, rect, mbhInfo);
				pointDesc.mbhY = getDesc(mbhMatY, rect, mbhInfo);

				PointDesc point(hogInfo, hofInfo, mbhInfo, points_out[i]);
				iTrack->addPointDesc(point);

				// draw this track
				if( show_track == 1 ) {
					std::list<PointDesc>& descs = iTrack->pointDescs;
					std::list<PointDesc>::iterator iDesc = descs.begin();
					float length = descs.size();
					CvPoint2D32f point0 = iDesc->point;
					point0.x *= fscales[ixyScale]; // map the point to first scale
					point0.y *= fscales[ixyScale];

					float j = 0;
					for (iDesc++; iDesc != descs.end(); ++iDesc, ++j) {
						CvPoint2D32f point1 = iDesc->point;
						point1.x *= fscales[ixyScale];
						point1.y *= fscales[ixyScale];

						cvLine(image, cvPointFrom32f(point0), cvPointFrom32f(point1),
							   CV_RGB(0,cvFloor(255.0*(j+1.0)/length),0), 2, 8,0);
						point0 = point1;
					}
					cvCircle(image, cvPointFrom32f(point0), 2, CV_RGB(255,0,0), -1, 8,0);
				}
				++iTrack;
			}
			else // remove the track, if we lose feature point
				iTrack = tracks.erase(iTrack);
			}
			ReleDescMat(hogMat);
			ReleDescMat(hofMat);
			ReleDescMat(mbhMatX);
			ReleDescMat(mbhMatY);
			cvReleaseImage( &prev_grey_temp );
			cvReleaseImage( &grey_temp );
			cvReleaseImage( &flow );
		}

		for( int ixyScale = 0; ixyScale < scale_num; ++ixyScale ) {
		std::list<Track>& tracks = xyScaleTracks[ixyScale]; // output the features for each scale
		for( std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); ) {
			if( iTrack->pointDescs.size() >= tracker.trackLength+1 ) { // if the trajectory achieves the length we want
				std::vector<CvPoint2D32f> trajectory(tracker.trackLength+1);
				std::list<PointDesc>& descs = iTrack->pointDescs;
				std::list<PointDesc>::iterator iDesc = descs.begin();

				for (int count = 0; count <= tracker.trackLength; ++iDesc, ++count) {
					trajectory[count].x = iDesc->point.x*fscales[ixyScale];
					trajectory[count].y = iDesc->point.y*fscales[ixyScale];
				}
				float mean_x(0), mean_y(0), var_x(0), var_y(0), length(0);
				if( isValid(trajectory, mean_x, mean_y, var_x, var_y, length) == 1 ) {
					printf("%d\t", frameNum);
					printf("%f\t%f\t", mean_x, mean_y);
					printf("%f\t%f\t", var_x, var_y);
					printf("%f\t", length);
					printf("%f\t", fscales[ixyScale]);

					for (int count = 0; count < tracker.trackLength; ++count)
						printf("%f\t%f\t", trajectory[count].x,trajectory[count].y );

					iDesc = descs.begin();
					int t_stride = cvFloor(tracker.trackLength/hogInfo.ntCells);
					for( int n = 0; n < hogInfo.ntCells; n++ ) {
						std::vector<float> vec(hogInfo.dim);
						for( int t = 0; t < t_stride; t++, iDesc++ )
							for( int m = 0; m < hogInfo.dim; m++ )
								vec[m] += iDesc->hog[m];
						for( int m = 0; m < hogInfo.dim; m++ )
							printf("%f\t", vec[m]/float(t_stride));
					}

					iDesc = descs.begin();
					t_stride = cvFloor(tracker.trackLength/hofInfo.ntCells);
					for( int n = 0; n < hofInfo.ntCells; n++ ) {
						std::vector<float> vec(hofInfo.dim);
						for( int t = 0; t < t_stride; t++, iDesc++ )
							for( int m = 0; m < hofInfo.dim; m++ )
								vec[m] += iDesc->hof[m];
						for( int m = 0; m < hofInfo.dim; m++ )
							printf("%f\t", vec[m]/float(t_stride));
					}

					iDesc = descs.begin();
					t_stride = cvFloor(tracker.trackLength/mbhInfo.ntCells);
					for( int n = 0; n < mbhInfo.ntCells; n++ ) {
						std::vector<float> vec(mbhInfo.dim);
						for( int t = 0; t < t_stride; t++, iDesc++ )
							for( int m = 0; m < mbhInfo.dim; m++ )
								vec[m] += iDesc->mbhX[m];
						for( int m = 0; m < mbhInfo.dim; m++ )
							printf("%f\t", vec[m]/float(t_stride));
					}

					iDesc = descs.begin();
					t_stride = cvFloor(tracker.trackLength/mbhInfo.ntCells);
					for( int n = 0; n < mbhInfo.ntCells; n++ ) {
						std::vector<float> vec(mbhInfo.dim);
						for( int t = 0; t < t_stride; t++, iDesc++ )
							for( int m = 0; m < mbhInfo.dim; m++ )
								vec[m] += iDesc->mbhY[m];
						for( int m = 0; m < mbhInfo.dim; m++ )
							printf("%f\t", vec[m]/float(t_stride));
					}

					printf("\n");
				}
				iTrack = tracks.erase(iTrack);
			}
			else
				iTrack++;
		}
		}

		if( init_counter == tracker.initGap ) { // detect new feature points every initGap frames
		init_counter = 0;
		for (int ixyScale = 0; ixyScale < scale_num; ++ixyScale) {
			std::list<Track>& tracks = xyScaleTracks[ixyScale];
			std::vector<CvPoint2D32f> points_in(0);
			std::vector<CvPoint2D32f> points_out(0);
			for(std::list<Track>::iterator iTrack = tracks.begin(); iTrack != tracks.end(); iTrack++, i++) {
				std::list<PointDesc>& descs = iTrack->pointDescs;
				CvPoint2D32f point = descs.back().point; // the last point in the track
				points_in.push_back(point);
			}

			IplImage *grey_temp = 0, *eig_temp = 0;
			std::size_t temp_level = (std::size_t)ixyScale;
			grey_temp = cvCloneImage(grey_pyramid.getImage(temp_level));
			eig_temp = cvCloneImage(eig_pyramid.getImage(temp_level));

			cvDenseSample(grey_temp, eig_temp, points_in, points_out, quality, min_distance);
			// save the new feature points
			for( i = 0; i < points_out.size(); i++) {
				Track track(tracker.trackLength);
				PointDesc point(hogInfo, hofInfo, mbhInfo, points_out[i]);
				track.addPointDesc(point);
				tracks.push_back(track);
			}
			cvReleaseImage( &grey_temp );
			cvReleaseImage( &eig_temp );
		}
		}
		}

		cvCopy( frame, prev_image, 0 );
		cvCvtColor( prev_image, prev_grey, CV_BGR2GRAY );
		prev_grey_pyramid.rebuild(prev_grey);
		}

		if( show_track == 1 ) {
			cvShowImage( "DenseTrack", image);
                        cvShowImage("Original", frame);
			c = cvWaitKey(3);
			if((char)c == 27) break;
		}
		// get the next frame
		if (!capture.nextFrame())
			break;
	}

	if( show_track == 1 )
		cvDestroyWindow("DenseTrack");

	return 0;
}
Exemplo n.º 6
0
UINT WINAPI
//DWORD WINAPI
#elif defined(POSIX_SYS)
// using pthread
void *
#endif
	ChessRecognition::HoughLineThread(
#if defined(WINDOWS_SYS)
	LPVOID
#elif defined(POSIX_SYS)
	void *
#endif
	Param) {
	// 실제로 뒤에서 동작하는 windows용 thread함수.
	// 함수 인자로 클래스를 받아옴.
	ChessRecognition *_TChessRecognition = (ChessRecognition *)Param;
	_TChessRecognition->_HoughLineBased = new HoughLineBased();

	CvSeq *_TLineX, *_TLineY;
	double _TH[] = { -1, -7, -15, 0, 15, 7, 1 };

	CvMat _TDoGX = cvMat(1, 7, CV_64FC1, _TH);
	CvMat* _TDoGY = cvCreateMat(7, 1, CV_64FC1);
	cvTranspose(&_TDoGX, _TDoGY); // transpose(&DoGx) -> DoGy

	double _TMinValX, _TMaxValX, _TMinValY, _TMaxValY, _TMinValT, _TMaxValT;
	int _TKernel = 1;

	// Hough 사용되는 Image에 대한 Initialize.
	IplImage *iplTemp = cvCreateImage(cvSize(_TChessRecognition->_Width, _TChessRecognition->_Height), IPL_DEPTH_32F, 1);                   
	IplImage *iplDoGx = cvCreateImage(cvGetSize(iplTemp), IPL_DEPTH_32F, 1);  
	IplImage *iplDoGy = cvCreateImage(cvGetSize(iplTemp), IPL_DEPTH_32F, 1);  
	IplImage *iplDoGyClone = cvCloneImage(iplDoGy);
	IplImage *iplDoGxClone = cvCloneImage(iplDoGx);
	IplImage *iplEdgeX = cvCreateImage(cvGetSize(iplTemp), 8, 1);
	IplImage *iplEdgeY = cvCreateImage(cvGetSize(iplTemp), 8, 1);

	CvMemStorage* _TStorageX = cvCreateMemStorage(0), *_TStorageY = cvCreateMemStorage(0);

	while (_TChessRecognition->_EnableThread != false) {
		// 이미지를 받아옴. main루프와 동기를 맞추기 위해서 critical section 사용.
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.lock();
		//EnterCriticalSection(&(_TChessRecognition->cs));
		cvConvert(_TChessRecognition->_ChessBoardDetectionInternalImage, iplTemp);
		//LeaveCriticalSection(&_TChessRecognition->cs);
		_TChessRecognition->_ChessBoardDetectionInternalImageProtectMutex.unlock();

		// 각 X축 Y축 라인을 검출해 내기 위해서 filter 적용.
		cvFilter2D(iplTemp, iplDoGx, &_TDoGX); // 라인만 축출해내고.
		cvFilter2D(iplTemp, iplDoGy, _TDoGY);
		cvAbs(iplDoGx, iplDoGx);
		cvAbs(iplDoGy, iplDoGy);

		// 이미지 내부에서 최댓값과 최소값을 구하여 정규화.
		cvMinMaxLoc(iplDoGx, &_TMinValX, &_TMaxValX);
		cvMinMaxLoc(iplDoGy, &_TMinValY, &_TMaxValY);
		cvMinMaxLoc(iplTemp, &_TMinValT, &_TMaxValT);
		cvScale(iplDoGx, iplDoGx, 2.0 / _TMaxValX); // 정규화.
		cvScale(iplDoGy, iplDoGy, 2.0 / _TMaxValY);
		cvScale(iplTemp, iplTemp, 2.0 / _TMaxValT);

		cvCopy(iplDoGy, iplDoGyClone);
		cvCopy(iplDoGx, iplDoGxClone);

		// NMS진행후 추가 작업
		_TChessRecognition->_HoughLineBased->NonMaximumSuppression(iplDoGx, iplDoGyClone, _TKernel);
		_TChessRecognition->_HoughLineBased->NonMaximumSuppression(iplDoGy, iplDoGxClone, _TKernel);

		cvConvert(iplDoGx, iplEdgeY); // IPL_DEPTH_8U로 다시 재변환.
		cvConvert(iplDoGy, iplEdgeX);

		double rho = 1.0; // distance resolution in pixel-related units.
		double theta = 1.0; // angle resolution measured in radians.
		int threshold = 20;

		if (threshold == 0)
			threshold = 1;

		// detecting 해낸 edge에서 hough line 검출.
		_TLineX = cvHoughLines2(iplEdgeX, _TStorageX, CV_HOUGH_STANDARD, 1.0 * rho, CV_PI / 180 * theta, threshold, 0, 0);
		_TLineY = cvHoughLines2(iplEdgeY, _TStorageY, CV_HOUGH_STANDARD, 1.0 * rho, CV_PI / 180 * theta, threshold, 0, 0);

		// cvSeq를 vector로 바꾸기 위한 연산.
		_TChessRecognition->_Vec_ProtectionMutex.lock();
		_TChessRecognition->_HoughLineBased->CastSequence(_TLineX, _TLineY);
		_TChessRecognition->_Vec_ProtectionMutex.unlock();

		Sleep(2);
	}

	// mat 할당 해제.
	cvReleaseMat(&_TDoGY);

	// 내부 연산에 사용된 이미지 할당 해제.
	cvReleaseImage(&iplTemp);
	cvReleaseImage(&iplDoGx);
	cvReleaseImage(&iplDoGy);
	cvReleaseImage(&iplDoGyClone);
	cvReleaseImage(&iplDoGxClone);
	cvReleaseImage(&iplEdgeX);
	cvReleaseImage(&iplEdgeY);

	// houghline2에 사용된 opencv 메모리 할당 해제.
	cvReleaseMemStorage(&_TStorageX);
	cvReleaseMemStorage(&_TStorageY);

	delete _TChessRecognition->_HoughLineBased;
#if defined(WINDOWS_SYS)
	_endthread();
#elif defined(POSIX_SYS)

#endif
	_TChessRecognition->_EndThread = true;
	return 0;
}
Exemplo n.º 7
0
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
CvSeq* findSquares4(IplImage* imgSrc, CvMemStorage* storage)
{
	CvSeq* contours;
	int i, c, l, N = 11;
	CvSize sz = cvSize(imgSrc->width & -2, imgSrc->height & -2);

	IplImage* imgTmp = cvCloneImage(imgSrc); // make a copy of input image
	IplImage* imgGray = cvCreateImage(sz, 8, 1);
	IplImage* imgPyr = cvCreateImage(cvSize(sz.width / 2, sz.height / 2), 8, 3);
	IplImage* imgGrayTmp;
	CvSeq* result;
	double s, t;

	// create empty sequence that will contain points -
	// 4 points per square (the square's vertices)
	CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvPoint), storage);

	// select the maximum ROI in the image
	// with the width and height divisible by 2
	cvSetImageROI(imgTmp, cvRect(0, 0, sz.width, sz.height));

	// down-scale and up-scale the image to filter out the noise
	cvPyrDown(imgTmp, imgPyr, 7);
	cvPyrUp(imgPyr, imgTmp, 7);
	imgGrayTmp = cvCreateImage(sz, 8, 1);

	// find squares in every color plane of the image
	for (c = 0; c < 3; c++)
	{
		// extract the c-th color plane
		cvSetImageCOI(imgTmp, c + 1);
		cvCopy(imgTmp, imgGrayTmp, 0);

		// try several threshold levels
		for (l = 0; l < N; l++)
		{
			// hack: use Canny instead of zero threshold level.
			// Canny helps to catch squares with gradient shading
			if (l == 0)
			{
				// apply Canny. Take the upper threshold from slider
				// and set the lower to 0 (which forces edges merging)
				cvCanny(imgGrayTmp, imgGray, 0, gThresh, 5);

				// dilate canny output to remove potential
				// holes between edge segments
				cvDilate(imgGray, imgGray, 0, 1);
			}
			else
			{
				// apply threshold if l!=0:
				//     tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
				cvThreshold(imgGrayTmp, imgGray, (l + 1) * 255 / N, 255, CV_THRESH_BINARY);
			}

			// find contours and store them all as a list
			cvFindContours(imgGray, storage, &contours, sizeof(CvContour),
					CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0, 0));

			// test each contour
			while (contours)
			{
				// approximate contour with accuracy proportional
				// to the contour perimeter
				result = cvApproxPoly(contours, sizeof(CvContour), storage,
						CV_POLY_APPROX_DP, cvContourPerimeter(contours) * 0.02, 0);

				// square contours should have 4 vertices after approximation
				// relatively large area (to filter out noisy contours)
				// and be convex.
				// Note: absolute value of an area is used because
				// area may be positive or negative - in accordance with the
				// contour orientation
				if (result->total == 4 &&
						fabs(cvContourArea(result, CV_WHOLE_SEQ)) > 1000 &&
						cvCheckContourConvexity(result))
				{
					s = 0;

					for (i = 0; i < 5; i++)
					{
						// find minimum angle between joint edges (maximum of cosine)
						if (i >= 2)
						{
							t = fabs(angle(
									(CvPoint*) cvGetSeqElem(result, i),
									(CvPoint*) cvGetSeqElem(result, i - 2),
									(CvPoint*) cvGetSeqElem(result, i - 1)));
							s = s > t ? s : t;
						}
					}

					// if cosines of all angles are small
					// (all angles are ~90 degree) then write quad range
					// vertices to resultant sequence
					if (s < 0.3)
						for (i = 0; i < 4; i++)
							cvSeqPush(squares, (CvPoint*) cvGetSeqElem(result, i));
				}

				// take the next contour
				contours = contours->h_next;
			}
		}
	}

	// release all the temporary images
	cvReleaseImage(&imgGray);
	cvReleaseImage(&imgPyr);
	cvReleaseImage(&imgGrayTmp);
	cvReleaseImage(&imgTmp);

	return squares;
}
Exemplo n.º 8
0
List<Garbage> 
garbageList(IplImage * src){

	
	
	cvNamedWindow("output",CV_WINDOW_AUTOSIZE);
	//frame from video
	IplImage * src;
	//object model
	IplImage * model;
	
	if( argc != 2 || (capture=cvCreateFileCapture( argv[1]))== 0 ){
		perror("Invalid capture");
		return 0;
	}
	
	
	//image for the histogram-based filter
	//could be a parameter
	model=cvLoadImage("../images/colilla-sinBlanco.png",1);
	CvHistogram * testImageHistogram=getHShistogramFromRGB(model,HIST_H_BINS,HIST_S_BINS);
	
	//~ int frameWidth=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);
	//~ int frameHeight=cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);
	
	
	
	//gets a frame for setting  image size
	//CvSize srcSize = cvSize(frameWidth,frameHeight);
	src=cvQueryFrame(capture);
	CvSize srcSize = cvGetSize(src);

	//images for HSV conversion
	IplImage* hsv = cvCreateImage( srcSize, 8, 3 );
	IplImage* h_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* s_plane = cvCreateImage( srcSize, 8, 1 );
	IplImage* v_plane = cvCreateImage( srcSize, 8, 1 );
	
	
	
	//Image for thresholding
	IplImage * threshImage=cvCreateImage(srcSize,8,1);
	
	//image for equalization
	IplImage * equalizedImage=cvCreateImage(srcSize,8,1);
	
	//image for Morphing operations(Dilate-erode)
	IplImage * morphImage=cvCreateImage(srcSize,8,1);
	
	//image for image smoothing
	IplImage * smoothImage=cvCreateImage(srcSize,8,1);
	
	//image for contour-finding operations
	IplImage * contourImage=cvCreateImage(srcSize,8,3);
	
	
	int frameCounter=1;
	int cont_index=0;
	
	//convolution kernel for morph operations
	IplConvKernel* element;
	
	CvRect boundingRect;
	
	//contours
	CvSeq ** contours;
	
	//Main loop

	
	frameCounter++;
	printf("frame number:%d\n",frameCounter);
	
	//convert image to hsv
	cvCvtColor( src, hsv, CV_BGR2HSV );
	cvCvtPixToPlane( hsv, h_plane, s_plane, v_plane, 0 );
	
	//equalize Saturation Channel image
	cvEqualizeHist(s_plane,equalizedImage);
	
	//threshold the equalized Saturation channel image
	cvThreshold(equalizedImage,threshImage,THRESHOLD_VALUE,255,
	CV_THRESH_BINARY);
	
	//apply morphologic operations
	element = cvCreateStructuringElementEx( MORPH_KERNEL_SIZE*2+1, 
		MORPH_KERNEL_SIZE*2+1, MORPH_KERNEL_SIZE, MORPH_KERNEL_SIZE, 
		CV_SHAPE_RECT, NULL);
		
	cvDilate(threshImage,morphImage,element,MORPH_DILATE_ITER);
	cvErode(morphImage,morphImage,element,MORPH_ERODE_ITER);

	//apply smooth gaussian-filter
	cvSmooth(morphImage,smoothImage,CV_GAUSSIAN,3,0,0,0);
	
	//get all contours
	contours=findContours(smoothImage);
	
	cont_index=0;
	cvCopy(src,contourImage,0);
	
	while(contours[cont_index]!=NULL){
		CvSeq * aContour=contours[cont_index];
		//apply filters
		if(perimeterFilter(aContour,MINCONTOUR_PERIMETER,
			MAXCONTOUR_PERIMETER) &&
			//areaFilter(aContour,MINCONTOUR_AREA,MAXCONTOUR_AREA) &&
			rectangularAspectFilter(aContour,CONTOUR_RECTANGULAR_MIN_RATIO,
				CONTOUR_RECTANGULAR_MAX_RATIO) &&
			boxAreaFilter(aContour,BOXFILTER_TOLERANCE) &&
			histogramMatchingFilter(src,aContour,testImageHistogram,
				HIST_H_BINS,HIST_S_BINS,HIST_MIN)){

				//if passed filters
				printContour(aContour,3,cvScalar(127,127,0,0),
					contourImage);
			
				//get contour bounding box
				boundingRect=cvBoundingRect(aContour,0);
				cvRectangle(contourImage,cvPoint(boundingRect.x,boundingRect.y),
						cvPoint(boundingRect.x+boundingRect.width,
						boundingRect.y+boundingRect.height),
						_GREEN,1,8,0);
			}
			
		cont_index++;
	}
	
	cvShowImage("output",contourImage);
	cvWaitKey(0);


	
		
}
Exemplo n.º 9
0
void icvAddNewImageToPrevious____(
                                    IplImage *newImage,//Image to add
                                    IplImage *oldImage,//Previous image
                                    CvMat *oldPoints,// previous 2D points on prev image (some points may be not visible)
                                    CvMat *oldPntStatus,//Status for each point on prev image
                                    CvMat *objPoints4D,//prev 4D points
                                    CvMat *newPoints,  //Points on new image corr for prev
                                    CvMat *newPntStatus,// New point status for new image
                                    CvMat *newFPoints2D1,//new feature points on prev image
                                    CvMat *newFPoints2D2,//new feature points on new image
                                    CvMat *newFPointsStatus,
                                    CvMat *newProjMatr,
                                    int useFilter,
                                    double threshold)//New projection matrix
{
    CvMat *points2 = 0;
    CvMat *status = 0;
    CvMat *newFPointsStatusTmp = 0;

    //CV_FUNCNAME( "icvAddNewImageToPrevious____" );
    __BEGIN__;

    /* First found correspondence points for images */

    /* Test input params */

    int numPoints;
    numPoints = oldPoints->cols;

    /* Allocate memory */

    points2 = cvCreateMat(2,numPoints,CV_64F);
    status = cvCreateMat(1,numPoints,CV_8S);
    newFPointsStatusTmp = cvCreateMat(1, newFPoints2D1->cols,CV_8S);

    int corrNum;
    corrNum = icvFindCorrForGivenPoints(    oldImage,/* Image 1 */
                                            newImage,/* Image 2 */
                                            oldPoints, 
                                            oldPntStatus,
                                            points2,
                                            status,
                                            useFilter,/*Use fundamental matrix to filter points */
                                            threshold);/* Threshold for good points in filter */

    cvCopy(status,newPntStatus);
    cvCopy(points2,newPoints);

    CvMat projMatr;
    double projMatr_dat[12];
    projMatr = cvMat(3,4,CV_64F,projMatr_dat);

    if( corrNum >= 6 )
    {/* We can compute projection matrix */
//        icvComputeProjectMatrix(objPoints4D,points2,&projMatr);
        icvComputeProjectMatrixStatus(objPoints4D,points2,status,&projMatr);
        cvCopy(&projMatr,newProjMatr);
        
        /* Create new points and find correspondence */
        icvCreateFeaturePoints(newImage, newFPoints2D2,newFPointsStatus);
        
        /* Good if we test new points before find corr points */

        /* Find correspondence for new found points */
        icvFindCorrForGivenPoints( newImage,/* Image 1 */
                                   oldImage,/* Image 2 */
                                   newFPoints2D2,
                                   newFPointsStatus,//prev status
                                   newFPoints2D1,
                                   newFPointsStatusTmp,//new status
                                   useFilter,/*Use fundamental matrix to filter points */
                                   threshold);/* Threshold for good points in filter */

        /* We generated new points on image test for exist points */

        /* Remove all new double points */

        int origNum;
        /* Find point of old image */
        origNum = icvRemoveDoublePoins( oldPoints,/* Points on prev image */
                                        newFPoints2D1,/* New points */
                                        oldPntStatus,/* Status for old points */
                                        newFPointsStatusTmp,
                                        newFPointsStatusTmp,//orig status
                                        20);/* Status for new points */

        /* Find double points on new image */
        origNum = icvRemoveDoublePoins( newPoints,/* Points on prev image */
                                        newFPoints2D2,/* New points */
                                        newPntStatus,/* Status for old points */
                                        newFPointsStatusTmp,
                                        newFPointsStatusTmp,//orig status
                                        20);/* Status for new points */



        /* Add all new good points to result */


        /* Copy new status to old */
        cvCopy(newFPointsStatusTmp,newFPointsStatus);


    }



    __END__;

    /* Free allocated memory */

    return;
}
Exemplo n.º 10
0
void BoatDetector::houghline(IplImage* edgeImage, IplImage* image, IplImage* lineImage) {

	//validation
	int points = 50; // points per row
	int rows = 3; // number of rows
	int ver_dist = 10; // vertical distance between points
	int hor_dist = image->width / points; // horizontal distance between points

	cvCopy(edgeImage, lineImage);

	CvSeq* hough_lines = 0;

	CvScalar line_color = cvScalar(120);

	hough_lines = cvHoughLines2( edgeImage, hough_storage, CV_HOUGH_STANDARD, 1, CV_PI/180, 100, 0, 0 );


	if(hough_lines->total == 0) {
		return;
	}

	bool find = false;

	CvPoint pt1, pt2;
	float* line;
	float theta;
	float rho;
	double a, b, x0, y0;
	for( int i = 0; i < min(hough_lines->total, 100); i++ )
	{
		line = (float*)cvGetSeqElem(hough_lines, i);
		theta = line[1];
		if(theta < 1.50 || theta > 1.60) {
			continue;
		}
		rho = line[0];

		a = cos(theta);
		b = sin(theta);
		x0 = a*rho;
		y0 = b*rho;
		pt1.x = cvRound(x0 + 1000*(-b));
		pt1.y = cvRound(y0 + 1000*(a));
		pt2.x = cvRound(x0 - 1000*(-b));
		pt2.y = cvRound(y0 - 1000*(a));		

		cvLine( lineImage, pt1, pt2, line_color, 2, CV_AA, 0 );
		find = true;
	}
	if(!find) {
		return;
	}

	bool run = true;
	int search_limit = lineImage->height - (ver_dist * rows);
	int line_step = lineImage->widthStep/sizeof(char);
	int img_step = image->widthStep/sizeof(char);
	int max_left, max_right;
	int tmp_limit;
	double count;
	while(run) {
		max_left = 0;
		max_right = 0;
		for(int i = ver_dist * rows; i < search_limit; i++) {
			if(((uchar)lineImage->imageData[i*line_step+3]) == line_color.val[0]) {
				if(i > max_left) {
					max_left = i;
				}
			}
			if(((uchar)lineImage->imageData[i*line_step + (lineImage->width-3)]) == line_color.val[0]) {
				if(i > max_right) {
					max_right = i;
				}
			}		
		}
		if(max_left == 0 || max_right == 0) {
			run = false;
			continue;
		}

		tmp_limit = (max_left + max_right) / 2;

		//limit validation
		count = 0;
		if(abs(max_left - max_right) < 10) {

			for(int i = tmp_limit - (ver_dist * rows), k = 0, t = rows*2; k < rows; i+=ver_dist, k++, t-=2) {
				for(int j = hor_dist; j < image->width; j+=hor_dist) {
					if(abs(image->imageData[i*img_step + j] -
						image->imageData[(i+t*ver_dist)*img_step + j] ) > 10 )
					{
						count++;
					}
				}
			}
		}
		if((count / (points * rows)) > 0.9 ) {

			sea_limit = tmp_limit;

			/*
			IplImage* ao = cvCloneImage(image);
			for(int i = tmp_limit - (ver_dist * rows), k = 0, t = rows*2; k < rows; i+=ver_dist, k++, t-=2) {
				for(int j = hor_dist; j < image->width; j+=hor_dist) {
					if(abs(image->imageData[i*img_step + j] -
							image->imageData[(i+t*ver_dist)*img_step + j] ) > 10 )
					{
						cvLine(ao, cvPoint(j,i), cvPoint(j,i), CV_RGB(0,0,0), 3);
						cvLine(ao, cvPoint(j,i+t*ver_dist), cvPoint(j,i+t*ver_dist), CV_RGB(255,255,255), 3);
					}
				}
			}

			cvShowImage("ao",ao);
			cvWaitKey(0);
			*/

			run = false;
		}
		else {
			search_limit = max(max_left, max_right);
		}
	}
}
Exemplo n.º 11
0
bool
CvGBTrees::train( const CvMat* _train_data, int _tflag,
              const CvMat* _responses, const CvMat* _var_idx,
              const CvMat* _sample_idx, const CvMat* _var_type,
              const CvMat* _missing_mask,
              CvGBTreesParams _params, bool /*_update*/ ) //update is not supported
{
    CvMemStorage* storage = 0;

    params = _params;
    bool is_regression = problem_type();

    clear();
    /*
      n - count of samples
      m - count of variables
    */
    int n = _train_data->rows;
    int m = _train_data->cols;
    if (_tflag != CV_ROW_SAMPLE)
    {
        int tmp;
        CV_SWAP(n,m,tmp);
    }

    CvMat* new_responses = cvCreateMat( n, 1, CV_32F);
    cvZero(new_responses);

    data = new CvDTreeTrainData( _train_data, _tflag, new_responses, _var_idx,
        _sample_idx, _var_type, _missing_mask, _params, true, true );
    if (_missing_mask)
    {
        missing = cvCreateMat(_missing_mask->rows, _missing_mask->cols,
                              _missing_mask->type);
        cvCopy( _missing_mask, missing);
    }

    orig_response = cvCreateMat( 1, n, CV_32F );
    int step = (_responses->cols > _responses->rows) ? 1 : _responses->step / CV_ELEM_SIZE(_responses->type);
    switch (CV_MAT_TYPE(_responses->type))
    {
        case CV_32FC1:
        {
            for (int i=0; i<n; ++i)
                orig_response->data.fl[i] = _responses->data.fl[i*step];
        }; break;
        case CV_32SC1:
        {
            for (int i=0; i<n; ++i)
                orig_response->data.fl[i] = (float) _responses->data.i[i*step];
        }; break;
        default:
            CV_Error(CV_StsUnmatchedFormats, "Response should be a 32fC1 or 32sC1 vector.");
    }

    if (!is_regression)
    {
        class_count = 0;
        unsigned char * mask = new unsigned char[n];
        memset(mask, 0, n);
        // compute the count of different output classes
        for (int i=0; i<n; ++i)
            if (!mask[i])
            {
                class_count++;
                for (int j=i; j<n; ++j)
                    if (int(orig_response->data.fl[j]) == int(orig_response->data.fl[i]))
                        mask[j] = 1;
            }
        delete[] mask;

        class_labels = cvCreateMat(1, class_count, CV_32S);
        class_labels->data.i[0] = int(orig_response->data.fl[0]);
        int j = 1;
        for (int i=1; i<n; ++i)
        {
            int k = 0;
            while ((int(orig_response->data.fl[i]) - class_labels->data.i[k]) && (k<j))
                k++;
            if (k == j)
            {
                class_labels->data.i[k] = int(orig_response->data.fl[i]);
                j++;
            }
        }
    }

    // inside gbt learning proccess only regression decision trees are built
    data->is_classifier = false;

    // preproccessing sample indices
    if (_sample_idx)
    {
        int sample_idx_len = get_len(_sample_idx);

        switch (CV_MAT_TYPE(_sample_idx->type))
        {
            case CV_32SC1:
            {
                sample_idx = cvCreateMat( 1, sample_idx_len, CV_32S );
                for (int i=0; i<sample_idx_len; ++i)
                    sample_idx->data.i[i] = _sample_idx->data.i[i];
            } break;
            case CV_8S:
            case CV_8U:
            {
                int active_samples_count = 0;
                for (int i=0; i<sample_idx_len; ++i)
                    active_samples_count += int( _sample_idx->data.ptr[i] );
                sample_idx = cvCreateMat( 1, active_samples_count, CV_32S );
                active_samples_count = 0;
                for (int i=0; i<sample_idx_len; ++i)
                    if (int( _sample_idx->data.ptr[i] ))
                        sample_idx->data.i[active_samples_count++] = i;

            } break;
            default: CV_Error(CV_StsUnmatchedFormats, "_sample_idx should be a 32sC1, 8sC1 or 8uC1 vector.");
        }
        icvSortFloat(sample_idx->data.fl, sample_idx_len, 0);
    }
    else
    {
        sample_idx = cvCreateMat( 1, n, CV_32S );
        for (int i=0; i<n; ++i)
            sample_idx->data.i[i] = i;
    }

    sum_response = cvCreateMat(class_count, n, CV_32F);
    sum_response_tmp = cvCreateMat(class_count, n, CV_32F);
    cvZero(sum_response);

    delta = 0.0f;
    /*
      in the case of a regression problem the initial guess (the zero term
      in the sum) is set to the mean of all the training responses, that is
      the best constant model
    */
    if (is_regression) base_value = find_optimal_value(sample_idx);
    /*
      in the case of a classification problem the initial guess (the zero term
      in the sum) is set to zero for all the trees sequences
    */
    else base_value = 0.0f;
    /*
      current predicition on all training samples is set to be
      equal to the base_value
    */
    cvSet( sum_response, cvScalar(base_value) );

    weak = new pCvSeq[class_count];
    for (int i=0; i<class_count; ++i)
    {
        storage = cvCreateMemStorage();
        weak[i] = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvDTree*), storage );
        storage = 0;
    }

    // subsample params and data
    rng = &cv::theRNG();

    int samples_count = get_len(sample_idx);

    params.subsample_portion = params.subsample_portion <= FLT_EPSILON ||
        1 - params.subsample_portion <= FLT_EPSILON
        ? 1 : params.subsample_portion;
    int train_sample_count = cvFloor(params.subsample_portion * samples_count);
    if (train_sample_count == 0)
        train_sample_count = samples_count;
    int test_sample_count = samples_count - train_sample_count;
    int* idx_data = new int[samples_count];
    subsample_train = cvCreateMatHeader( 1, train_sample_count, CV_32SC1 );
    *subsample_train = cvMat( 1, train_sample_count, CV_32SC1, idx_data );
    if (test_sample_count)
    {
        subsample_test  = cvCreateMatHeader( 1, test_sample_count, CV_32SC1 );
        *subsample_test = cvMat( 1, test_sample_count, CV_32SC1,
                                 idx_data + train_sample_count );
    }

    // training procedure

    for ( int i=0; i < params.weak_count; ++i )
    {
        do_subsample();
        for ( int k=0; k < class_count; ++k )
        {
            find_gradient(k);
            CvDTree* tree = new CvDTree;
            tree->train( data, subsample_train );
            change_values(tree, k);

            if (subsample_test)
            {
                CvMat x;
                CvMat x_miss;
                int* sample_data = sample_idx->data.i;
                int* subsample_data = subsample_test->data.i;
                int s_step = (sample_idx->cols > sample_idx->rows) ? 1
                             : sample_idx->step/CV_ELEM_SIZE(sample_idx->type);
                for (int j=0; j<get_len(subsample_test); ++j)
                {
                    int idx = *(sample_data + subsample_data[j]*s_step);
                    float res = 0.0f;
                    if (_tflag == CV_ROW_SAMPLE)
                        cvGetRow( data->train_data, &x, idx);
                    else
                        cvGetCol( data->train_data, &x, idx);

                    if (missing)
                    {
                        if (_tflag == CV_ROW_SAMPLE)
                            cvGetRow( missing, &x_miss, idx);
                        else
                            cvGetCol( missing, &x_miss, idx);

                        res = (float)tree->predict(&x, &x_miss)->value;
                    }
                    else
                    {
                        res = (float)tree->predict(&x)->value;
                    }
                    sum_response_tmp->data.fl[idx + k*n] =
                                    sum_response->data.fl[idx + k*n] +
                                    params.shrinkage * res;
                }
            }

            cvSeqPush( weak[k], &tree );
            tree = 0;
        } // k=0..class_count
    CvMat* tmp;
    tmp = sum_response_tmp;
    sum_response_tmp = sum_response;
    sum_response = tmp;
    tmp = 0;
    } // i=0..params.weak_count

    delete[] idx_data;
    cvReleaseMat(&new_responses);
    data->free_train_data();

    return true;

} // CvGBTrees::train(...)
Exemplo n.º 12
0
bool CvCalibFilter::Undistort( CvMat** srcarr, CvMat** dstarr )
{
    int i;

    if( !srcarr || !dstarr )
    {
        assert(0);
        return false;
    }

    if( isCalibrated )
    {
        for( i = 0; i < cameraCount; i++ )
        {
            if( srcarr[i] && dstarr[i] )
            {
                CvMat src_stub, *src;
                CvMat dst_stub, *dst;

                src = cvGetMat( srcarr[i], &src_stub );
                dst = cvGetMat( dstarr[i], &dst_stub );

                if( src->data.ptr == dst->data.ptr )
                {
                    if( !undistImg || undistImg->width != src->width ||
                        undistImg->height != src->height ||
                        CV_ARE_TYPES_EQ( undistImg, src ))
                    {
                        cvReleaseMat( &undistImg );
                        undistImg = cvCreateMat( src->height, src->width, src->type );
                    }

                    cvCopy( src, undistImg );
                    src = undistImg;
                }

            #if 1
                {
                CvMat A = cvMat( 3, 3, CV_32FC1, cameraParams[i].matrix );
                CvMat k = cvMat( 1, 4, CV_32FC1, cameraParams[i].distortion );

                if( !undistMap[i][0] || undistMap[i][0]->width != src->width ||
                     undistMap[i][0]->height != src->height )
                {
                    cvReleaseMat( &undistMap[i][0] );
                    cvReleaseMat( &undistMap[i][1] );
                    undistMap[i][0] = cvCreateMat( src->height, src->width, CV_32FC1 );
                    undistMap[i][1] = cvCreateMat( src->height, src->width, CV_32FC1 );
                    cvInitUndistortMap( &A, &k, undistMap[i][0], undistMap[i][1] );
                }

                cvRemap( src, dst, undistMap[i][0], undistMap[i][1] );
            #else
                cvUndistort2( src, dst, &A, &k );
            #endif
                }
            }
        }
    }
    else
    {
        for( i = 0; i < cameraCount; i++ )
        {
            if( srcarr[i] != dstarr[i] )
                cvCopy( srcarr[i], dstarr[i] );
        }
    }


    return true;
}
Exemplo n.º 13
0
bool CvCalibFilter::Rectify( CvMat** srcarr, CvMat** dstarr )
{
    int i;

    if( !srcarr || !dstarr )
    {
        assert(0);
        return false;
    }

    if( isCalibrated && cameraCount == 2 )
    {
        for( i = 0; i < cameraCount; i++ )
        {
            if( srcarr[i] && dstarr[i] )
            {
                IplImage src_stub, *src;
                IplImage dst_stub, *dst;

                src = cvGetImage( srcarr[i], &src_stub );
                dst = cvGetImage( dstarr[i], &dst_stub );

                if( src->imageData == dst->imageData )
                {
                    if( !undistImg ||
                        undistImg->width != src->width ||
                        undistImg->height != src->height ||
                        CV_MAT_CN(undistImg->type) != src->nChannels )
                    {
                        cvReleaseMat( &undistImg );
                        undistImg = cvCreateMat( src->height, src->width,
                                                 CV_8U + (src->nChannels-1)*8 );
                    }
                    cvCopy( src, undistImg );
                    src = cvGetImage( undistImg, &src_stub );
                }

                cvZero( dst );

                if( !rectMap[i][0] || rectMap[i][0]->width != src->width ||
                    rectMap[i][0]->height != src->height )
                {
                    cvReleaseMat( &rectMap[i][0] );
                    cvReleaseMat( &rectMap[i][1] );
                    rectMap[i][0] = cvCreateMat(stereo.warpSize.height,stereo.warpSize.width,CV_32FC1);
                    rectMap[i][1] = cvCreateMat(stereo.warpSize.height,stereo.warpSize.width,CV_32FC1);
                    cvComputePerspectiveMap(stereo.coeffs[i], rectMap[i][0], rectMap[i][1]);
                }
                cvRemap( src, dst, rectMap[i][0], rectMap[i][1] );
            }
        }
    }
    else
    {
        for( i = 0; i < cameraCount; i++ )
        {
            if( srcarr[i] != dstarr[i] )
                cvCopy( srcarr[i], dstarr[i] );
        }
    }

    return true;
}
Exemplo n.º 14
0
bool CvCalibFilter::FindEtalon( CvMat** mats )
{
    bool result = true;

    if( !mats || etalonPointCount == 0 )
    {
        assert(0);
        result = false;
    }

    if( result )
    {
        int i, tempPointCount0 = etalonPointCount*2;

        for( i = 0; i < cameraCount; i++ )
        {
            if( !latestPoints[i] )
                latestPoints[i] = (CvPoint2D32f*)
                    cvAlloc( tempPointCount0*2*sizeof(latestPoints[0]));
        }

        for( i = 0; i < cameraCount; i++ )
        {
            CvSize size;
            int tempPointCount = tempPointCount0;
            bool found = false;

            if( !CV_IS_MAT(mats[i]) && !CV_IS_IMAGE(mats[i]))
            {
                assert(0);
                break;
            }

            size = cvGetSize(mats[i]);

            if( size.width != imgSize.width || size.height != imgSize.height )
            {
                imgSize = size;
            }

            if( !grayImg || grayImg->width != imgSize.width ||
                grayImg->height != imgSize.height )
            {
                cvReleaseMat( &grayImg );
                cvReleaseMat( &tempImg );
                grayImg = cvCreateMat( imgSize.height, imgSize.width, CV_8UC1 );
                tempImg = cvCreateMat( imgSize.height, imgSize.width, CV_8UC1 );
            }

            if( !storage )
                storage = cvCreateMemStorage();

            switch( etalonType )
            {
            case CV_CALIB_ETALON_CHESSBOARD:
                if( CV_MAT_CN(cvGetElemType(mats[i])) == 1 )
                    cvCopy( mats[i], grayImg );
                else
                    cvCvtColor( mats[i], grayImg, CV_BGR2GRAY );
                found = cvFindChessBoardCornerGuesses( grayImg, tempImg, storage,
                                                       cvSize( cvRound(etalonParams[0]),
                                                       cvRound(etalonParams[1])),
                                                       latestPoints[i], &tempPointCount ) != 0;
                if( found )
                    cvFindCornerSubPix( grayImg, latestPoints[i], tempPointCount,
                                        cvSize(5,5), cvSize(-1,-1),
                                        cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,10,0.1));
                break;
            default:
                assert(0);
                result = false;
                break;
            }

            latestCounts[i] = found ? tempPointCount : -tempPointCount;
            result = result && found;
        }
    }

    if( storage )
        cvClearMemStorage( storage );

    return result;
}
Exemplo n.º 15
0
int main( int argc, char** argv )
{
    CvCapture* capture = 0;
    IplImage *frame, *frame_copy = 0;
    IplImage *image = 0;
    const char* scale_opt = "--scale=";
    int scale_opt_len = (int)strlen(scale_opt);
    const char* cascade_opt = "--cascade=";
    int cascade_opt_len = (int)strlen(cascade_opt);
    const char* nested_cascade_opt = "--nested-cascade";
    int nested_cascade_opt_len = (int)strlen(nested_cascade_opt);
    int i;
    const char* input_name = 0;

    for( i = 1; i < argc; i++ )
    {
        if( strncmp( argv[i], cascade_opt, cascade_opt_len) == 0 )
            cascade_name = argv[i] + cascade_opt_len;
        else if( strncmp( argv[i], nested_cascade_opt, nested_cascade_opt_len ) == 0 )
        {
            if( argv[i][nested_cascade_opt_len] == '=' )
                nested_cascade_name = argv[i] + nested_cascade_opt_len + 1;
            nested_cascade = (CvHaarClassifierCascade*)cvLoad( nested_cascade_name, 0, 0, 0 );
            if( !nested_cascade )
                fprintf( stderr, "WARNING: Could not load classifier cascade for nested objects\n" );
        }
        else if( strncmp( argv[i], scale_opt, scale_opt_len ) == 0 )
        {
            if( !sscanf( argv[i] + scale_opt_len, "%lf", &scale ) || scale < 1 )
                scale = 1;
        }
        else if( argv[i][0] == '-' )
        {
            fprintf( stderr, "WARNING: Unknown option %s\n", argv[i] );
        }
        else
            input_name = argv[i];
    }

    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );

    if( !cascade )
    {
        fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
        fprintf( stderr,
        "Usage: facedetect [--cascade=\"<cascade_path>\"]\n"
        "   [--nested-cascade[=\"nested_cascade_path\"]]\n"
        "   [--scale[=<image scale>\n"
        "   [filename|camera_index]\n" );
        return -1;
    }
    storage = cvCreateMemStorage(0);
    
    if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') )
        capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' );
    else if( input_name )
    {
        image = cvLoadImage( input_name, 1 );
        if( !image )
            capture = cvCaptureFromAVI( input_name );
    }
    else
        image = cvLoadImage( "lena.jpg", 1 );

    cvNamedWindow( "result", 1 );

    if( capture )
    {
        for(;;)
        {
            if( !cvGrabFrame( capture ))
                break;
            frame = cvRetrieveFrame( capture );
            if( !frame )
                break;
            if( !frame_copy )
                frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );
            if( frame->origin == IPL_ORIGIN_TL )
                cvCopy( frame, frame_copy, 0 );
            else
                cvFlip( frame, frame_copy, 0 );
            
            detect_and_draw( frame_copy );

            if( cvWaitKey( 10 ) >= 0 )
                goto _cleanup_;
        }

        cvWaitKey(0);
_cleanup_:
        cvReleaseImage( &frame_copy );
        cvReleaseCapture( &capture );
    }
    else
    {
        if( image )
        {
            detect_and_draw( image );
            cvWaitKey(0);
            cvReleaseImage( &image );
        }
        else if( input_name )
        {
            /* assume it is a text file containing the
               list of the image filenames to be processed - one per line */
            FILE* f = fopen( input_name, "rt" );
            if( f )
            {
                char buf[1000+1];
                while( fgets( buf, 1000, f ) )
                {
                    int len = (int)strlen(buf), c;
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';
                    printf( "file %s\n", buf ); 
                    image = cvLoadImage( buf, 1 );
                    if( image )
                    {
                        detect_and_draw( image );
                        c = cvWaitKey(0);
                        if( c == 27 || c == 'q' || c == 'Q' )
                            break;
                        cvReleaseImage( &image );
                    }
                }
                fclose(f);
            }
        }
    }
    
    cvDestroyWindow("result");

    return 0;
}
Exemplo n.º 16
0
bool ImageViewer::openImage(const QString &fileName)
{
    if ( !fileName.isEmpty() )
    {
        deleteImage();

        std::string sstr = fileName.toLocal8Bit().data();
        IplImage* newImage = cvLoadImage( sstr.c_str(), 1 );

        if ( !newImage )
        {
            QMessageBox::warning( this,
                                  tr("提示"),
                                  tr("无法打开文件 %1.\n").arg(fileName),
                                  QMessageBox::Close);
            return false;
        }

        _orgWidth = newImage->width;
        _orgHeight = newImage->height;

        if (cv::max(newImage->width,newImage->height) > 800)
        {
            _scale = cv::max(newImage->width/600.0f, newImage->height/600.0f);
            _ocvImage = cvCreateImage(cvSize(newImage->width/_scale, newImage->height/_scale), 8, 3);
            cvResize(newImage,_ocvImage);
        }
        else if (newImage->width <= 250 || newImage->height <= 250)
        {
            _scale = cv::min(newImage->width/300.0f, newImage->height/300.0f);
            _ocvImage = cvCreateImage(cvSize(newImage->width/_scale,newImage->height/_scale),8,3);
            cvResize(newImage,_ocvImage);
        }
        else
        {
            _ocvImage = cvCreateImage(cvSize(newImage->width, newImage->height), 8, 3);
            cvCopy(newImage, _ocvImage);
        }


        if( !_srcOcvImage )
        {
            _srcOcvImage = cvCreateImage(cvSize(_ocvImage->width, _ocvImage->height), _ocvImage->depth, _ocvImage->nChannels);
            cvCopy(_ocvImage, _srcOcvImage);
        }

        _srcWidth = _ocvImage->width;
        _srcHeight = _ocvImage->height;

        _labelMapImage = new QImage(_ocvImage->width, _ocvImage->height, QImage::Format_ARGB32);
        QImage _alphaImage(_ocvImage->width, _ocvImage->height, QImage::Format_Indexed8);
        _alphaImage.fill(255);
        _labelMapImage->setAlphaChannel(_alphaImage);
        _labelMapImage->fill(qRgb(0, 0, 0));

        QImage *image = IplImageToQImage( _ocvImage );

        setImage( *image );
        DELETEPTR(image);

        if(CV_IS_IMAGE(newImage))
        {
            cvReleaseImage(&newImage);
            newImage = 0;
        }
    }
    return true;
}
Exemplo n.º 17
0
CV_IMPL void
cvSegmentImage( const CvArr* srcarr, CvArr* dstarr,
                double canny_threshold, double ffill_threshold )
{
    CvMat* gray = 0;
    CvMat* canny = 0;
    void* stack = 0;

    CV_FUNCNAME( "cvSegmentImage" );

    __BEGIN__;

    CvMat srcstub, *src;
    CvMat dststub, *dst;
    CvMat* mask;
    CvSize size;
    CvPoint pt;
    int ffill_lw_up = cvRound( fabs(ffill_threshold) );

    CV_CALL( src = cvGetMat( srcarr, &srcstub ));
    CV_CALL( dst = cvGetMat( dstarr, &dststub ));

    if( src->data.ptr != dst->data.ptr )
    {
        CV_CALL( cvCopy( src, dst ));
        src = dst;
    }

    size = cvGetSize( src );

    CV_CALL( gray = cvCreateMat( size.height, size.width, CV_8UC1 ));
    CV_CALL( canny = cvCreateMat( size.height, size.width, CV_8UC1 ));

    CV_CALL( stack = cvAlloc( size.width * size.height * sizeof(Seg)));

    cvCvtColor( src, gray, CV_BGR2GRAY );
    cvCanny( gray, canny, 0, canny_threshold, 5 );

    mask = canny; // a new name for new role

    // make a non-zero border.
    cvRectangle( mask, cvPoint(0,0), cvPoint(size.width-1,size.height-1), 1, 1 );

    for( pt.y = 0; pt.y < size.height; pt.y++ )
    {
        for( pt.x = 0; pt.x < size.width; pt.x++ )
        {
            if( mask->data.ptr[mask->step*pt.y + pt.x] == 0 )
            {
                CvConnectedComp region;
                int avgVal[3] = { 0, 0, 0 };

                icvSegmFloodFill_Stage1( src->data.ptr, src->step,
                                         mask->data.ptr, mask->step,
                                         size, pt, avgVal,
                                         ffill_lw_up, ffill_lw_up,
                                         &region, stack );

                icvSegmFloodFill_Stage2( src->data.ptr, src->step,
                                         mask->data.ptr, mask->step,
                                         size, avgVal,
                                         region.rect );
            }
        }
    }

    __END__;

    cvReleaseMat( &gray );
    cvReleaseMat( &canny );
    cvFree( &stack );
}
Exemplo n.º 18
0
/* Optimization using Levenberg-Marquardt */
void cvLevenbergMarquardtOptimization(pointer_LMJac JacobianFunction,
                                    pointer_LMFunc function,
                                    /*pointer_Err error_function,*/
                                    CvMat *X0,CvMat *observRes,CvMat *resultX,
                                    int maxIter,double epsilon)
{
    /* This is not sparce method */
    /* Make optimization using  */
    /* func - function to compute */
    /* uses function to compute jacobian */

    /* Allocate memory */
    CvMat *vectX = 0;
    CvMat *vectNewX = 0;
    CvMat *resFunc = 0;
    CvMat *resNewFunc = 0;
    CvMat *error = 0;
    CvMat *errorNew = 0;
    CvMat *Jac = 0;
    CvMat *delta = 0;
    CvMat *matrJtJ = 0;
    CvMat *matrJtJN = 0;
    CvMat *matrJt = 0;
    CvMat *vectB = 0;
   
    CV_FUNCNAME( "cvLevenbegrMarquardtOptimization" );
    __BEGIN__;


    if( JacobianFunction == 0 || function == 0 || X0 == 0 || observRes == 0 || resultX == 0 )
    {
        CV_ERROR( CV_StsNullPtr, "Some of parameters is a NULL pointer" );
    }

    if( !CV_IS_MAT(X0) || !CV_IS_MAT(observRes) || !CV_IS_MAT(resultX) )
    {
        CV_ERROR( CV_StsUnsupportedFormat, "Some of input parameters must be a matrices" );
    }


    int numVal;
    int numFunc;
    double valError;
    double valNewError;

    numVal = X0->rows;
    numFunc = observRes->rows;

    /* test input data */
    if( X0->cols != 1 )
    {
        CV_ERROR( CV_StsUnmatchedSizes, "Number of colomn of vector X0 must be 1" );
    }
    
    if( observRes->cols != 1 )
    {
        CV_ERROR( CV_StsUnmatchedSizes, "Number of colomn of vector observed rusult must be 1" );
    }

    if( resultX->cols != 1 || resultX->rows != numVal )
    {
        CV_ERROR( CV_StsUnmatchedSizes, "Size of result vector X must be equals to X0" );
    }

    if( maxIter <= 0  )
    {
        CV_ERROR( CV_StsUnmatchedSizes, "Number of maximum iteration must be > 0" );
    }

    if( epsilon < 0 )
    {
        CV_ERROR( CV_StsUnmatchedSizes, "Epsilon must be >= 0" );
    }

    /* copy x0 to current value of x */
    CV_CALL( vectX      = cvCreateMat(numVal, 1,      CV_64F) );
    CV_CALL( vectNewX   = cvCreateMat(numVal, 1,      CV_64F) );
    CV_CALL( resFunc    = cvCreateMat(numFunc,1,      CV_64F) );
    CV_CALL( resNewFunc = cvCreateMat(numFunc,1,      CV_64F) );
    CV_CALL( error      = cvCreateMat(numFunc,1,      CV_64F) );
    CV_CALL( errorNew   = cvCreateMat(numFunc,1,      CV_64F) );
    CV_CALL( Jac        = cvCreateMat(numFunc,numVal, CV_64F) );
    CV_CALL( delta      = cvCreateMat(numVal, 1,      CV_64F) );
    CV_CALL( matrJtJ    = cvCreateMat(numVal, numVal, CV_64F) );
    CV_CALL( matrJtJN   = cvCreateMat(numVal, numVal, CV_64F) );
    CV_CALL( matrJt     = cvCreateMat(numVal, numFunc,CV_64F) );
    CV_CALL( vectB      = cvCreateMat(numVal, 1,      CV_64F) );

    cvCopy(X0,vectX);

    /* ========== Main optimization loop ============ */
    double change;
    int currIter;
    double alpha;

    change = 1;
    currIter = 0;
    alpha = 0.001;

    do {

        /* Compute value of function */
        function(vectX,resFunc);
        /* Print result of function to file */

        /* Compute error */
        cvSub(observRes,resFunc,error);        
        
        //valError = error_function(observRes,resFunc);
        /* Need to use new version of computing error (norm) */
        valError = cvNorm(observRes,resFunc);

        /* Compute Jacobian for given point vectX */
        JacobianFunction(vectX,Jac);

        /* Define optimal delta for J'*J*delta=J'*error */
        /* compute J'J */
        cvMulTransposed(Jac,matrJtJ,1);
        
        cvCopy(matrJtJ,matrJtJN);

        /* compute J'*error */
        cvTranspose(Jac,matrJt);
        cvmMul(matrJt,error,vectB);


        /* Solve normal equation for given alpha and Jacobian */
        do
        {
            /* Increase diagonal elements by alpha */
            for( int i = 0; i < numVal; i++ )
            {
                double val;
                val = cvmGet(matrJtJ,i,i);
                cvmSet(matrJtJN,i,i,(1+alpha)*val);
            }

            /* Solve system to define delta */
            cvSolve(matrJtJN,vectB,delta,CV_SVD);

            /* We know delta and we can define new value of vector X */
            cvAdd(vectX,delta,vectNewX);

            /* Compute result of function for new vector X */
            function(vectNewX,resNewFunc);
            cvSub(observRes,resNewFunc,errorNew);

            valNewError = cvNorm(observRes,resNewFunc);

            currIter++;

            if( valNewError < valError )
            {/* accept new value */
                valError = valNewError;

                /* Compute relative change of required parameter vectorX. change = norm(curr-prev) / norm(curr) )  */
                change = cvNorm(vectX, vectNewX, CV_RELATIVE_L2);

                alpha /= 10;
                cvCopy(vectNewX,vectX);
                break;
            }
            else
            {
                alpha *= 10;
            }

        } while ( currIter < maxIter  );
        /* new value of X and alpha were accepted */

    } while ( change > epsilon && currIter < maxIter );


    /* result was computed */
    cvCopy(vectX,resultX);

    __END__;

    cvReleaseMat(&vectX);
    cvReleaseMat(&vectNewX);
    cvReleaseMat(&resFunc);
    cvReleaseMat(&resNewFunc);
    cvReleaseMat(&error);
    cvReleaseMat(&errorNew);
    cvReleaseMat(&Jac);
    cvReleaseMat(&delta);
    cvReleaseMat(&matrJtJ);
    cvReleaseMat(&matrJtJN);
    cvReleaseMat(&matrJt);
    cvReleaseMat(&vectB);

    return;
}
Exemplo n.º 19
0
void CvAdaptiveSkinDetector::process(IplImage *inputBGRImage, IplImage *outputHueMask)
{
    IplImage *src = inputBGRImage;

    int h, v, i, l;
    bool isInit = false;

    nFrameCount++;

    if (imgHueFrame == NULL)
    {
        isInit = true;
        initData(src, nSamplingDivider, nSamplingDivider);
    }

    unsigned char *pShrinked, *pHueFrame, *pMotionFrame, *pLastGrayFrame, *pFilteredFrame, *pGrayFrame;
    pShrinked = (unsigned char *)imgShrinked->imageData;
    pHueFrame = (unsigned char *)imgHueFrame->imageData;
    pMotionFrame = (unsigned char *)imgMotionFrame->imageData;
    pLastGrayFrame = (unsigned char *)imgLastGrayFrame->imageData;
    pFilteredFrame = (unsigned char *)imgFilteredFrame->imageData;
    pGrayFrame = (unsigned char *)imgGrayFrame->imageData;

    if ((src->width != imgHueFrame->width) || (src->height != imgHueFrame->height))
    {
        cvResize(src, imgShrinked);
        cvCvtColor(imgShrinked, imgHSVFrame, CV_BGR2HSV);
    }
    else
    {
        cvCvtColor(src, imgHSVFrame, CV_BGR2HSV);
    }

    cvSplit(imgHSVFrame, imgHueFrame, imgSaturationFrame, imgGrayFrame, 0);

    cvSetZero(imgMotionFrame);
    cvSetZero(imgFilteredFrame);

    l = imgHueFrame->height * imgHueFrame->width;

    for (i = 0; i < l; i++)
    {
        v = (*pGrayFrame);
        if ((v >= GSD_INTENSITY_LT) && (v <= GSD_INTENSITY_UT))
        {
            h = (*pHueFrame);
            if ((h >= GSD_HUE_LT) && (h <= GSD_HUE_UT))
            {
                if ((h >= nSkinHueLowerBound) && (h <= nSkinHueUpperBound))
                    ASD_INTENSITY_SET_PIXEL(pFilteredFrame, h);

                if (ASD_IS_IN_MOTION(pLastGrayFrame, v, 7))
                    ASD_INTENSITY_SET_PIXEL(pMotionFrame, h);
            }
        }
        pShrinked += 3;
        pGrayFrame++;
        pLastGrayFrame++;
        pMotionFrame++;
        pHueFrame++;
        pFilteredFrame++;
    }

    if (isInit)
        cvCalcHist(&imgHueFrame, skinHueHistogram.fHistogram);

    cvCopy(imgGrayFrame, imgLastGrayFrame);

    cvErode(imgMotionFrame, imgTemp);  // eliminate disperse pixels, which occur because of the camera noise
    cvDilate(imgTemp, imgMotionFrame);

    cvCalcHist(&imgMotionFrame, histogramHueMotion.fHistogram);

    skinHueHistogram.mergeWith(&histogramHueMotion, fHistogramMergeFactor);

    skinHueHistogram.findCurveThresholds(nSkinHueLowerBound, nSkinHueUpperBound, 1 - fHuePercentCovered);

    switch (nMorphingMethod)
    {
        case MORPHING_METHOD_ERODE :
            cvErode(imgFilteredFrame, imgTemp);
            cvCopy(imgTemp, imgFilteredFrame);
            break;
        case MORPHING_METHOD_ERODE_ERODE :
            cvErode(imgFilteredFrame, imgTemp);
            cvErode(imgTemp, imgFilteredFrame);
            break;
        case MORPHING_METHOD_ERODE_DILATE :
            cvErode(imgFilteredFrame, imgTemp);
            cvDilate(imgTemp, imgFilteredFrame);
            break;
    }

    if (outputHueMask != NULL)
        cvCopy(imgFilteredFrame, outputHueMask);
}
Exemplo n.º 20
0
int main (int argc, char * const argv[]) 
{	
    // create all necessary instances
    cvNamedWindow (WINDOW_NAME, CV_WINDOW_AUTOSIZE);
    CvMemStorage* storage = cvCreateMemStorage(0);
    assert (storage);
	
	IplImage * backgroundx=NULL;
	IplImage *  backgrounds=NULL;
	IplImage * current_frame=NULL;
	IplImage * backgroundgray=NULL;
	
	if (argc!=4) {
		abort ();
	}
	else {
		backgrounds=cvLoadImage(argv[2], 4);
		backgroundx=cvLoadImage(argv[3], 4);
		backgroundgray=cvLoadImage(argv[3], 0);
		current_frame=cvLoadImage(argv[1],4);
		if (backgroundx==NULL) {
			abort();
		}
	}
	
	
	//get background
	
	cvShowImage (WINDOW_NAME, backgrounds);
	
	//backgroundx=seam(backgroundx, backgrounds->width, backgrounds->height);
	
    // get an initial frame and duplicate it for later work
    IplImage *  draw_image    = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 3);
    IplImage *  get_image    = cvCreateImage(cvSize (current_frame->width, current_frame->height), IPL_DEPTH_8U, 3);
    cvCopy(backgrounds, get_image, NULL);
	
	int h,w;
	
	
    // as long as there are images ...
   		for (w=0; w<=backgrounds->width; w++) {
			for (h=0; h<=backgrounds->height; h++) {
				uchar* p=&CV_IMAGE_ELEM(backgrounds,uchar,h,w*3);
				uchar* x=&CV_IMAGE_ELEM(backgroundx,uchar,h,w*3);
				uchar* q=&CV_IMAGE_ELEM(current_frame,uchar,h,w*3);
				uchar* temp=&CV_IMAGE_ELEM(draw_image,uchar,h,w*3);
				if (comparex(p,q)==1) {
					copypoint(x,temp);
				}
				else {
					copypoint(q,temp);
				}
			}
		}
        
        // just show the image
        cvShowImage (WINDOW_NAME, draw_image);
        
        // wait a tenth of a second for keypress and window drawing
        int key = cvWaitKey (0);
        if (key == 'q' || key == 'Q')
            abort();
    
    
    // be nice and return no error
    return 0;
}
Exemplo n.º 21
0
public:void detect_and_draw( IplImage* img, IplImage* imgAnterior ) {
        static CvScalar colors[] = {
            {{0,0,255}},
            {{0,128,255}},
            {{0,255,255}},
            {{0,255,0}},
            {{255,128,0}},
            {{255,255,0}},
            {{255,0,0}},
            {{255,0,255}}
        };

        double scale = 1.3;
        IplImage* gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
        IplImage* small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
                             cvRound (img->height/scale)),
                         8, 1 );
        int i;

        IplImage* onlyhaart = cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
        cvCopy(img, onlyhaart);

        cvCvtColor( img, gray, CV_BGR2GRAY );
        cvResize( gray, small_img, CV_INTER_LINEAR );
        cvEqualizeHist( small_img, small_img );
        cvClearMemStorage( storageHaart );

        if(cascade) {
            double t = (double)cvGetTickCount();
            CvSeq* faces = cvHaarDetectObjects(small_img, cascade, storageHaart,
                                                1.1, 2, 0/*CV_HAAR_DO_CANNY_PRUNING*/,
                                                cvSize(30, 30) );
            t = (double)cvGetTickCount() - t;
            for(i = 0; i < (faces ? faces->total : 0); i++ ) {
                CvRect* r = (CvRect*)cvGetSeqElem( faces, i );

                CvRect rect = cvRect(r->x, r->y, r->width, r->height);

                if ((rect.height < (img->height + 1)) & (rect.width < (img->width + 1))
                        & analizarMhi(img, imgAnterior, 30, rect)) {
                    printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*100.) );
                    CvPoint center;
                    int radius;
                    center.x = cvRound((rect.x + rect.width*0.5)*scale);
                    center.y = cvRound((rect.y + rect.height*0.5)*scale);
                    radius = cvRound((rect.width + rect.height)*0.25*scale);
                    cvCircle( img, center, radius, colors[i%8], 3, 8, 0 );
                }
                CvPoint center;
                int radius;
                center.x = cvRound((rect.x + rect.width*0.5)*scale);
                center.y = cvRound((rect.y + rect.height*0.5)*scale);
                radius = cvRound((rect.width + rect.height)*0.25*scale);
                cvCircle( onlyhaart, center, radius, colors[i%8], 3, 8, 0 );
            }
        }
        cvShowImage( "Detecta", img );
        cvShowImage( "onlyhaart", onlyhaart);
        cvShowImage("bluedetect", imgAnterior);
        cvReleaseImage( &gray );
        cvReleaseImage( &small_img );
    }
// Function cvUpdateFGDStatModel updates statistical model and returns number of foreground regions
// parameters:
//      curr_frame  - current frame from video sequence
//      p_model     - pointer to CvFGDStatModel structure
static int CV_CDECL
icvUpdateFGDStatModel( IplImage* curr_frame, CvFGDStatModel*  model, double )
{
    int            mask_step = model->Ftd->widthStep;
    CvSeq         *first_seq = NULL, *prev_seq = NULL, *seq = NULL;
    IplImage*      prev_frame = model->prev_frame;
    int            region_count = 0;
    int            FG_pixels_count = 0;
    int            deltaC  = cvRound(model->params.delta * 256 / model->params.Lc);
    int            deltaCC = cvRound(model->params.delta * 256 / model->params.Lcc);
    int            i, j, k, l;

    //clear storages
    cvClearMemStorage(model->storage);
    cvZero(model->foreground);

    // From foreground pixel candidates using image differencing
    // with adaptive thresholding.  The algorithm is from:
    //
    //    Thresholding for Change Detection
    //    Paul L. Rosin 1998 6p
    //    http://www.cis.temple.edu/~latecki/Courses/CIS750-03/Papers/thresh-iccv.pdf
    //
    cvChangeDetection( prev_frame, curr_frame, model->Ftd );
    cvChangeDetection( model->background, curr_frame, model->Fbd );

    for( i = 0; i < model->Ftd->height; i++ )
    {
        for( j = 0; j < model->Ftd->width; j++ )
        {
            if( ((uchar*)model->Fbd->imageData)[i*mask_step+j] || ((uchar*)model->Ftd->imageData)[i*mask_step+j] )
            {
            float Pb  = 0;
                float Pv  = 0;
                float Pvb = 0;

                CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;

                CvBGPixelCStatTable*   ctable = stat->ctable;
                CvBGPixelCCStatTable* cctable = stat->cctable;

                uchar* curr_data = (uchar*)(curr_frame->imageData) + i*curr_frame->widthStep + j*3;
                uchar* prev_data = (uchar*)(prev_frame->imageData) + i*prev_frame->widthStep + j*3;

                int val = 0;

                // Is it a motion pixel?
                if( ((uchar*)model->Ftd->imageData)[i*mask_step+j] )
                {
            if( !stat->is_trained_dyn_model ) {

                        val = 1;

            } else {

                        // Compare with stored CCt vectors:
                        for( k = 0;  PV_CC(k) > model->params.alpha2 && k < model->params.N1cc;  k++ )
                        {
                            if ( abs( V_CC(k,0) - prev_data[0]) <=  deltaCC &&
                                 abs( V_CC(k,1) - prev_data[1]) <=  deltaCC &&
                                 abs( V_CC(k,2) - prev_data[2]) <=  deltaCC &&
                                 abs( V_CC(k,3) - curr_data[0]) <=  deltaCC &&
                                 abs( V_CC(k,4) - curr_data[1]) <=  deltaCC &&
                                 abs( V_CC(k,5) - curr_data[2]) <=  deltaCC)
                            {
                                Pv += PV_CC(k);
                                Pvb += PVB_CC(k);
                            }
                        }
                        Pb = stat->Pbcc;
                        if( 2 * Pvb * Pb <= Pv ) val = 1;
                    }
                }
                else if( stat->is_trained_st_model )
                {
                    // Compare with stored Ct vectors:
                    for( k = 0;  PV_C(k) > model->params.alpha2 && k < model->params.N1c;  k++ )
                    {
                        if ( abs( V_C(k,0) - curr_data[0]) <=  deltaC &&
                             abs( V_C(k,1) - curr_data[1]) <=  deltaC &&
                             abs( V_C(k,2) - curr_data[2]) <=  deltaC )
                        {
                            Pv += PV_C(k);
                            Pvb += PVB_C(k);
                        }
                    }
                    Pb = stat->Pbc;
                    if( 2 * Pvb * Pb <= Pv ) val = 1;
                }

                // Update foreground:
                ((uchar*)model->foreground->imageData)[i*mask_step+j] = (uchar)(val*255);
                FG_pixels_count += val;

            }		// end if( change detection...
        }		// for j...
    }			// for i...
    //end BG/FG classification

    // Foreground segmentation.
    // Smooth foreground map:
    if( model->params.perform_morphing ){
        cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_OPEN,  model->params.perform_morphing );
        cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_CLOSE, model->params.perform_morphing );
    }


    if( model->params.minArea > 0 || model->params.is_obj_without_holes ){

        // Discard under-size foreground regions:
    //
        cvFindContours( model->foreground, model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
        for( seq = first_seq; seq; seq = seq->h_next )
        {
            CvContour* cnt = (CvContour*)seq;
            if( cnt->rect.width * cnt->rect.height < model->params.minArea ||
                (model->params.is_obj_without_holes && CV_IS_SEQ_HOLE(seq)) )
            {
                // Delete under-size contour:
                prev_seq = seq->h_prev;
                if( prev_seq )
                {
                    prev_seq->h_next = seq->h_next;
                    if( seq->h_next ) seq->h_next->h_prev = prev_seq;
                }
                else
                {
                    first_seq = seq->h_next;
                    if( seq->h_next ) seq->h_next->h_prev = NULL;
                }
            }
            else
            {
                region_count++;
            }
        }
        model->foreground_regions = first_seq;
        cvZero(model->foreground);
        cvDrawContours(model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1);

    } else {

        model->foreground_regions = NULL;
    }

    // Check ALL BG update condition:
    if( ((float)FG_pixels_count/(model->Ftd->width*model->Ftd->height)) > CV_BGFG_FGD_BG_UPDATE_TRESH )
    {
         for( i = 0; i < model->Ftd->height; i++ )
             for( j = 0; j < model->Ftd->width; j++ )
             {
                 CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;
                 stat->is_trained_st_model = stat->is_trained_dyn_model = 1;
             }
    }


    // Update background model:
    for( i = 0; i < model->Ftd->height; i++ )
    {
        for( j = 0; j < model->Ftd->width; j++ )
        {
            CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;
            CvBGPixelCStatTable* ctable = stat->ctable;
            CvBGPixelCCStatTable* cctable = stat->cctable;

            uchar *curr_data = (uchar*)(curr_frame->imageData)+i*curr_frame->widthStep+j*3;
            uchar *prev_data = (uchar*)(prev_frame->imageData)+i*prev_frame->widthStep+j*3;

            if( ((uchar*)model->Ftd->imageData)[i*mask_step+j] || !stat->is_trained_dyn_model )
            {
                float alpha = stat->is_trained_dyn_model ? model->params.alpha2 : model->params.alpha3;
                float diff = 0;
                int dist, min_dist = 2147483647, indx = -1;

                //update Pb
                stat->Pbcc *= (1.f-alpha);
                if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
                {
                    stat->Pbcc += alpha;
                }

                // Find best Vi match:
                for(k = 0; PV_CC(k) && k < model->params.N2cc; k++ )
                {
                    // Exponential decay of memory
                    PV_CC(k)  *= (1-alpha);
                    PVB_CC(k) *= (1-alpha);
                    if( PV_CC(k) < MIN_PV )
                    {
                        PV_CC(k) = 0;
                        PVB_CC(k) = 0;
                        continue;
                    }

                    dist = 0;
                    for( l = 0; l < 3; l++ )
                    {
                        int val = abs( V_CC(k,l) - prev_data[l] );
                        if( val > deltaCC ) break;
                        dist += val;
                        val = abs( V_CC(k,l+3) - curr_data[l] );
                        if( val > deltaCC) break;
                        dist += val;
                    }
                    if( l == 3 && dist < min_dist )
                    {
                        min_dist = dist;
                        indx = k;
                    }
                }


                if( indx < 0 )
                {   // Replace N2th elem in the table by new feature:
                    indx = model->params.N2cc - 1;
                    PV_CC(indx) = alpha;
                    PVB_CC(indx) = alpha;
                    //udate Vt
                    for( l = 0; l < 3; l++ )
                    {
                        V_CC(indx,l) = prev_data[l];
                        V_CC(indx,l+3) = curr_data[l];
                    }
                }
                else
                {   // Update:
                    PV_CC(indx) += alpha;
                    if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
                    {
                        PVB_CC(indx) += alpha;
                    }
                }

                //re-sort CCt table by Pv
                for( k = 0; k < indx; k++ )
                {
                    if( PV_CC(k) <= PV_CC(indx) )
                    {
                        //shift elements
                        CvBGPixelCCStatTable tmp1, tmp2 = cctable[indx];
                        for( l = k; l <= indx; l++ )
                        {
                            tmp1 = cctable[l];
                            cctable[l] = tmp2;
                            tmp2 = tmp1;
                        }
                        break;
                    }
                }


                float sum1=0, sum2=0;
                //check "once-off" changes
                for(k = 0; PV_CC(k) && k < model->params.N1cc; k++ )
                {
                    sum1 += PV_CC(k);
                    sum2 += PVB_CC(k);
                }
                if( sum1 > model->params.T ) stat->is_trained_dyn_model = 1;

                diff = sum1 - stat->Pbcc * sum2;
                // Update stat table:
                if( diff >  model->params.T )
                {
                    //printf("once off change at motion mode\n");
                    //new BG features are discovered
                    for( k = 0; PV_CC(k) && k < model->params.N1cc; k++ )
                    {
                        PVB_CC(k) =
                            (PV_CC(k)-stat->Pbcc*PVB_CC(k))/(1-stat->Pbcc);
                    }
                    assert(stat->Pbcc<=1 && stat->Pbcc>=0);
                }
            }

            // Handle "stationary" pixel:
            if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] )
            {
                float alpha = stat->is_trained_st_model ? model->params.alpha2 : model->params.alpha3;
                float diff = 0;
                int dist, min_dist = 2147483647, indx = -1;

                //update Pb
                stat->Pbc *= (1.f-alpha);
                if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
                {
                    stat->Pbc += alpha;
                }

                //find best Vi match
                for( k = 0; k < model->params.N2c; k++ )
                {
                    // Exponential decay of memory
                    PV_C(k) *= (1-alpha);
                    PVB_C(k) *= (1-alpha);
                    if( PV_C(k) < MIN_PV )
                    {
                        PV_C(k) = 0;
                        PVB_C(k) = 0;
                        continue;
                    }

                    dist = 0;
                    for( l = 0; l < 3; l++ )
                    {
                        int val = abs( V_C(k,l) - curr_data[l] );
                        if( val > deltaC ) break;
                        dist += val;
                    }
                    if( l == 3 && dist < min_dist )
                    {
                        min_dist = dist;
                        indx = k;
                    }
                }

                if( indx < 0 )
                {//N2th elem in the table is replaced by a new features
                    indx = model->params.N2c - 1;
                    PV_C(indx) = alpha;
                    PVB_C(indx) = alpha;
                    //udate Vt
                    for( l = 0; l < 3; l++ )
                    {
                        V_C(indx,l) = curr_data[l];
                    }
                } else
                {//update
                    PV_C(indx) += alpha;
                    if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
                    {
                        PVB_C(indx) += alpha;
                    }
                }

                //re-sort Ct table by Pv
                for( k = 0; k < indx; k++ )
                {
                    if( PV_C(k) <= PV_C(indx) )
                    {
                        //shift elements
                        CvBGPixelCStatTable tmp1, tmp2 = ctable[indx];
                        for( l = k; l <= indx; l++ )
                        {
                            tmp1 = ctable[l];
                            ctable[l] = tmp2;
                            tmp2 = tmp1;
                        }
                        break;
                    }
                }

                // Check "once-off" changes:
                float sum1=0, sum2=0;
                for( k = 0; PV_C(k) && k < model->params.N1c; k++ )
                {
                    sum1 += PV_C(k);
                    sum2 += PVB_C(k);
                }
                diff = sum1 - stat->Pbc * sum2;
                if( sum1 > model->params.T ) stat->is_trained_st_model = 1;

                // Update stat table:
                if( diff >  model->params.T )
                {
                    //printf("once off change at stat mode\n");
                    //new BG features are discovered
                    for( k = 0; PV_C(k) && k < model->params.N1c; k++ )
                    {
                        PVB_C(k) = (PV_C(k)-stat->Pbc*PVB_C(k))/(1-stat->Pbc);
                    }
                    stat->Pbc = 1 - stat->Pbc;
                }
            }		// if !(change detection) at pixel (i,j)

            // Update the reference BG image:
            if( !((uchar*)model->foreground->imageData)[i*mask_step+j])
            {
                uchar* ptr = ((uchar*)model->background->imageData) + i*model->background->widthStep+j*3;

                if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] &&
                    !((uchar*)model->Fbd->imageData)[i*mask_step+j] )
                {
                    // Apply IIR filter:
                    for( l = 0; l < 3; l++ )
                    {
                        int a = cvRound(ptr[l]*(1 - model->params.alpha1) + model->params.alpha1*curr_data[l]);
                        ptr[l] = (uchar)a;
                        //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l]*=(1 - model->params.alpha1);
                        //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l] += model->params.alpha1*curr_data[l];
                    }
                }
                else
                {
                    // Background change detected:
                    for( l = 0; l < 3; l++ )
                    {
                        //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l] = curr_data[l];
                        ptr[l] = curr_data[l];
                    }
                }
            }
        }		// j
    }			// i

    // Keep previous frame:
    cvCopy( curr_frame, model->prev_frame );

    return region_count;
}
Exemplo n.º 23
0
int process_frame(IplImage* frame, IplImage* h_plane, IplImage* v_plane, CvHistogram* hist_h, CvHistogram* hist_v, int Hthresh, int Vthresh)
{
	IplImage *resultF;
	int c;

	if(!frame) return -1;


	resultF = cvCreateImage( cvGetSize(frame), frame->depth, frame->nChannels );
	cvCopy(frame,resultF,NULL);

	cvShowImage("Display1", frame );

	//cvShowImage("Display", frame );
	//cvShowImage("Display", frame );

	IplImage* result = cvCreateImage( cvGetSize(h_plane ), 8, 1 );
	int width = frame->width;
	int height = frame->height;

	int h_bins = H_BINS, v_bins = V_BINS;

	int *h_s;
	h_s = malloc(sizeof(int)*h_bins);
	int *v_s;
	v_s = malloc(sizeof(int)*v_bins);

	int j;
	float pr = (float)Hthresh/100000;
	pr = pr*result->width*result->height;
	
	for(j = 0 ; j < h_bins ; j++)
	{
	  if(cvQueryHistValue_1D(hist_h,j) < pr)
	    {
	      h_s[j] = 0;//is obstical
	    }
	  else
	    {
	      h_s[j] = H_MAX;
	
	    }
	}
	pr = (float)Vthresh/1000;
	pr = pr*result->width*result->height;
	

	for(j = 0 ; j < v_bins ; j++)
	  {
	    if(cvQueryHistValue_1D(hist_v,j) < pr)
	      {
		//	printf("%f %f\n",cvQueryHistValue_1D(hist_v,j),pr);
		v_s[j] = 0;
	      }
	    else
	      v_s[j] = V_MAX;
	  	    //	    printf("%f %d %d\n",cvQueryHistValue_1D(hist_v,j),Vthresh,v_s[j]);
	    
	  }
	//	getchar();
	int i;
	CvScalar Black, White;
	Black.val[0] = 0;
	White.val[0] = 255;
	int p,q;
	cvCopy(v_plane, result,NULL);
	int h_bsize = H_MAX/H_BINS;
	int v_bsize = V_MAX/V_BINS;
	//printf("%d %d\n",h_bsize,v_bsize);
	for(i = 0 ; i < result->height ; i++)
	{
		for(j = 0 ; j < result->width ; j++)
		{
			CvScalar s;
			s = cvGet2D(h_plane ,i ,j);
			if(h_s[(int)s.val[0]/(h_bsize)]  != 0 )//obsticals are white
				cvSet2D(h_plane,i,j,Black);
			else
				cvSet2D(h_plane,i,j,White);


			s = cvGet2D(v_plane ,i ,j);
			if(s.val[0] == 255)
			  s.val[0] = 254;
			if(v_s[(int)s.val[0]/(v_bsize)]  != 0)
				cvSet2D(v_plane,i,j,Black);
			else
			  {
			    cvSet2D(v_plane,i,j,White);
			  }
		}
	}
	

	//for(i = 0; i < result->height; i++)
	//{
	//	for(j = 0; j < result->width; j++)
	//	{
	//		if(countArray[i/BLOCK_DIM][j/BLOCK_DIM] <= AMT_BLACK)
	//			cvSet2D(result, i, j, Black);			
	//		else
	//			cvSet2D(result, i, j, White);
	//	}
	//}

	cvOr(v_plane,h_plane,result,NULL);

	cvShowImage("Result", result );
	cvShowImage("HDisplay", h_plane );
	cvShowImage("VDisplay", v_plane );
	//UNCOMMENT FOR CONTOUR
	/*
	CvMemStorage* memStorage = cvCreateMemStorage(0);
	CvSeq* contours = 0;
	cvFindContours(result, memStorage, &contours,sizeof(CvContour),CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
	CvSeq* co;
	for( co = contours; co != NULL; co = co->h_next) 
	{
		cvDrawContours(resultF,co,cvScalarAll(0),cvScalarAll(0),-1,5,8,cvPoint(0,0));
	}
	cvShowImage("FinalDisplay", resultF );
	cvReleaseMemStorage(&memStorage);
	*/
	//comment stops here
	cvReleaseImage(&result );
	cvReleaseImage(&resultF );
	free(h_s);
	free(v_s);
	c = cvWaitKey(30);
	if( c == 'q' || c == 'Q' || (c & 100) == 27 )
		return -1;
	return 0;
}
Exemplo n.º 24
0
void faceDbCreator(const char filePath[50],const char coordsFilename[100],
                                      const int startFile,const int endFile,
                                      const int noIterations,const int border){
  /**Number of Feature Points used in aligning images.**/
  const int noFeaturePoints             =   4;

  const int initialSize                 =   38;

  int i,j,k,iteration;

  /**No of files from DB added for alignment**/
  int noFiles                           =   0;
  double xr                             =   0;
  double yr                             =   0;
  int x,y;
  char filePathCopy[100];
  /**Corrds of the standards face with respect to initialSize**/
  CvMat *stdCoords                      =   cvCreateMat(noFeaturePoints*2,1,
                                                                  CV_64FC1);
  double stdCoordsData[]                =   {5+border,6+border,32+border,
                                            6+border,18+border,15+border,
                                                    18+border,25+border};
  stdCoords->data.db                    =   stdCoordsData;

  /**Average Coords of the faces aligned so far**/
  double avgData[noFeaturePoints*2];
  CvMat *avgMat                         =   cvCreateMat(noFeaturePoints*2,1,
                                                                    CV_64FC1);
  avgMat->data.db                       =   avgData;
  /**Coords to which other coordinates are aligned to**/
  double testData[noFeaturePoints*2];
  CvMat *testMat                        =   cvCreateMat(noFeaturePoints*2,1,
                                                                    CV_64FC1);
  testMat->data.db                      =   testData;

  cvCopy(stdCoords,testMat);

  double tempCoords[noFeaturePoints*2];

  /**Coords of all the image in the database**/
  CvMat* coords[endFile-startFile+1];

  double coordsData[endFile-startFile+1][noFeaturePoints*8];

  /**Face DB image file names**/
  char fileNames[endFile-startFile+1][100];
  char tempFileName[100];
  char tempStr[50];

  IplImage *img                         =   NULL;
  IplImage *dst                         =   NULL;

  FILE* coordsFile                      =   fopen(coordsFilename,"r+");
  FILE* t                               =   NULL;

  if (coordsFile){
    for (i=-startFile+1;i<=endFile-startFile;++i){
      if(!feof(coordsFile)){
        fscanf(coordsFile,"%s %lf %lf %lf %lf %lf %lf %lf %lf",&tempStr,
                                &tempCoords[0],&tempCoords[1],&tempCoords[2],
                                &tempCoords[3],&tempCoords[4],&tempCoords[5],
                                                &tempCoords[6],&tempCoords[7]);
        /**Skip the coords upto startImage**/
        if (i>=0){
          strcpy(tempFileName,filePath);
          strcat(tempFileName,tempStr);
          /**Check whether the file exists**/
          if (t=fopen(tempFileName,"r")){
            fclose(t);
            strcpy(fileNames[noFiles],tempFileName);

            coords[noFiles]             =  cvCreateMat(noFeaturePoints*2,4,
                                                                    CV_64FC1);
            faceDbCreatorFillData(coordsData[noFiles],tempCoords,noFeaturePoints);

            coords[noFiles]->data.db    =   coordsData[noFiles];

            ++noFiles;
          }
        }
      }
      else{
        noFiles                         =   i-1;
        break;
      }
    }
    fclose(coordsFile);

    if (!noFiles){
      printf("Face DB Creator Error: No File To Process\n");
      exit(EXIT_FAILURE);
    }
  }
  else {
    printf("Face DB Creator Error: Could Not Open Coords File\n");
    exit(EXIT_FAILURE);
  }

  /**PsuedoInverse**/
  CvMat *temp2                          =   cvCreateMat(4,1,CV_64FC1);
  double tempData2[4];
  temp2->data.db                        =   tempData2;

  for (iteration=0;iteration<noIterations;++iteration){
    cvSetZero(avgMat);
    for (i=0;i<noFiles;++i){
      pseudoInverse(coords[i],testMat,temp2);
      for (j=0;j<noFeaturePoints;++j){
        xr                              =   coordsData[i][j*8]*temp2->data.db[0]
                                            -coordsData[i][j*8+4]*
                                            temp2->data.db[1]+temp2->data.db[2];

        yr                              =   coordsData[i][j*8]*temp2->data.db[1]
                                            +coordsData[i][j*8+4]*
                                            temp2->data.db[0]+temp2->data.db[3];
        coordsData[i][j*8]              =   xr;
        coordsData[i][j*8+5]            =   xr;
        coordsData[i][j*8+1]            =   -yr;
        coordsData[i][j*8+4]            =   yr;
        avgData[j*2]                    +=  xr;
        avgData[j*2+1]                  +=  yr;
      }

      img                               =   cvLoadImage(fileNames[i],
                                                      CV_LOAD_IMAGE_GRAYSCALE);

      dst                               =   cvCreateImage(cvSize(initialSize+
                                                2*border,initialSize+2*border),
                                                    img->depth,img->nChannels);
      cvSetZero(dst);

      double a                          =   temp2->data.db[0];
      double b                          =   temp2->data.db[1];
      double det                        =   a*a+b*b;
      double tx                         =   temp2->data.db[2];
      double ty                         =   temp2->data.db[3];

      /**Transform the image**/
      for (j=0;j<dst->height;++j){
        for (k=0;k<dst->width;++k){
          xr                            =   ((k-tx)*a+(j-ty)*b)/det;
          yr                            =   ((k-tx)*-b+(j-ty)*a)/det;
          if ((int)xr>=0 && (int)xr <img->width && (int)yr>=0
                                                 && (int)yr<img->height){
            *((unsigned char*)(dst->imageData)+j*dst->widthStep+k)=
                                    *((unsigned char*)(img->imageData)+
                                           (int)yr*img->widthStep+(int)xr);
          }

        }
      }
      cvSaveImage(fileNames[i],dst);
      cvReleaseImage(&img);
      cvReleaseImage(&dst);

    }

    /**Averge of the transformation performed so far**/
    for (j=0;j<noFeaturePoints*2;++j){
      avgData[j]                        /=  endFile-startFile+1;
    }
    /**Perform transformation on the average data**/
    CvMat* tempMat                      =   cvCreateMat(noFeaturePoints*2,4,
                                                                      CV_64FC1);
    double tempMatData[noFeaturePoints*8];
    tempMat->data.db                    =   tempMatData;
    faceDbCreatorFillData(tempMatData,avgData,noFeaturePoints);

    pseudoInverse(tempMat,stdCoords,temp2);

    for (j=0;j<noFeaturePoints;++j){
      testData[j*2]                     =   avgData[j*2]*temp2->data.db[0]-
                                            avgData[j*2+1]*temp2->data.db[1]+
                                                             temp2->data.db[2];
      testData[j*2+1]                   =   avgData[j*2]*temp2->data.db[1]+
                                            avgData[j*2+1]*temp2->data.db[0]+
                                                             temp2->data.db[3];
    }
    cvReleaseMat(&tempMat);
  }

  IplImage *img8U,*img64F;
  CvRect *cropArea;

  IplImage *finalImage32F               =   cvCreateImage(cvSize(CROPPED_WIDTH,
                                               CROPPED_HEIGHT),IPL_DEPTH_32F,1);
  IplImage *finalImage8U                =   cvCreateImage(cvSize(CROPPED_WIDTH,
                                                CROPPED_HEIGHT),IPL_DEPTH_8U,1);
  IplImage *transformImage64F;
  IplImage *transformImage32F;
  IplImage *croppedImage32F             =   cvCreateImage(cvSize(initialSize,
                                                  initialSize),IPL_DEPTH_32F,1);
  IplImage *croppedImage64F             =   cvCreateImage(cvSize(initialSize,
                                                  initialSize),IPL_DEPTH_64F,1);

  IplImage* mask                        =   cvCreateImage(cvGetSize
                                              (croppedImage64F),IPL_DEPTH_8U,1);
  maskGenerator(mask);

  /**Random transformations**/
  double scale                          =   0;
  double rotate                         =   0;
  double translateX                     =   0;
  double translateY                     =   0;

  tempStr[0]                            =   '_';
  tempStr[4]                            =   '.';
  tempStr[5]                            =   'j';
  tempStr[6]                            =   'p';
  tempStr[7]                            =   'g';
  tempStr[8]                            =   '\0';

  /**Random Number Generator**/
  CvRNG rg;

  for (i=0;i<noFiles;++i){
    img8U                               =   cvLoadImage(fileNames[i],
                                                       CV_LOAD_IMAGE_GRAYSCALE);
    img64F                              =   cvCreateImage(cvGetSize(img8U),
                                                               IPL_DEPTH_64F,1);
    cvConvertScale(img8U,img64F);
    cvReleaseImage(&img8U);

    remove(fileNames[i]);

    xr                                  =   coordsData[i][0]-stdCoordsData[0]+
                                                                         border;
    yr                                  =   coordsData[i][4]-stdCoordsData[1]+
                                                                         border;
    cvSetImageROI(img64F,cvRect(cvRound(xr),cvRound(yr),initialSize,
                                                            initialSize));
    cvCopy(img64F,croppedImage64F);

    /**Creating variations for each image**/
    for (j=0;j<NO_VARIATIONS;++j){
      lightingCorrection(croppedImage64F,mask);
      rg                                =   cvRNG(time(0)*1000*(i+20)*(j+30));

      cvConvertScale(croppedImage64F,croppedImage32F);
      cvResize(croppedImage32F,finalImage32F);
      cvConvertScale(finalImage32F,finalImage8U);
      tempStr[1]                        =   (j/100)%10+48;
      tempStr[2]                        =   (j/10)%10+48;tempStr[3]=j%10+48;

      strncpy(tempFileName,fileNames[i],strlen(fileNames[i])-4);

      tempFileName[strlen(fileNames[i])-4]
                                        ='\0';
      strcat(tempFileName,tempStr);

      cvSaveImage(tempFileName,finalImage8U);
      switch (cvRandInt(&rg)%3){
        /**Scaling**/
        case 0:
          if (cvRandInt(&rg)%2)
            scale                       =   cvRandReal(&rg)*MAX_SCALE*
                                            initialSize/CROPPED_WIDTH;
          else
            scale                       =   cvRandReal(&rg)*MIN_SCALE*
                                            initialSize/CROPPED_HEIGHT;

          transformImage64F             =   cvCreateImage(
                                            cvSize(cvRound(initialSize-2*scale),
                                            cvRound(initialSize-2*scale)),
                                            IPL_DEPTH_64F,1);

          transformImage32F             =   cvCreateImage(
                                            cvSize(cvRound(initialSize-2*scale),
                                            cvRound(initialSize-2*scale)),
                                            IPL_DEPTH_32F,1);

          cvSetImageROI(img64F,cvRect(cvRound(xr+scale),cvRound(yr+scale),
                    cvRound(initialSize-2*scale),cvRound(initialSize-2*scale)));

          cvCopy(img64F,transformImage64F);
          cvConvertScale(transformImage64F,transformImage32F);

          cvResize(transformImage32F,croppedImage32F);
          cvConvertScale(croppedImage32F,croppedImage64F);
          cvReleaseImage(&transformImage64F);
          cvReleaseImage(&transformImage32F);
          break;
        /**Rotation**/
        case 1:
          if (cvRandInt(&rg)%2)
            rotate                      =   cvRandReal(&rg)*MAX_ROTATE;
          else
            rotate                      =   cvRandReal(&rg)*MIN_ROTATE;

          cvResetImageROI(img64F);

          transformImage64F             =   cvCreateImage(cvGetSize(img64F),
                                                            IPL_DEPTH_64F,1);
          transformRotate(img64F,transformImage64F,
          &cvPoint2D64f(xr+initialSize/2,yr+initialSize/2),rotate*M_PI/180);

          cvSetImageROI(transformImage64F,
                            cvRect(xr,yr,initialSize,initialSize));

          cvCopy(transformImage64F,croppedImage64F);
          cvReleaseImage(&transformImage64F);
          break;
        default:
          /**Translation**/
          if (cvRandInt(&rg)%2){
            if (cvRandInt(&rg)%2){
              translateX                =   cvRandReal(&rg)*MAX_TRANSLATE*
                                                    initialSize/CROPPED_WIDTH;
              translateY                =   cvRandReal(&rg)*MAX_TRANSLATE*
                                                    initialSize/CROPPED_HEIGHT;
            }
            else{
              translateX                =   cvRandReal(&rg)*MIN_TRANSLATE*
                                                    initialSize/CROPPED_WIDTH;
              translateY                =   cvRandReal(&rg)*MIN_TRANSLATE*
                                                    initialSize/CROPPED_HEIGHT;
            }
          }
          else{
            if (cvRandInt(&rg)%2){
              translateX                =   cvRandReal(&rg)*MAX_TRANSLATE*
                                                    initialSize/CROPPED_WIDTH;
              translateY                =   cvRandReal(&rg)*MIN_TRANSLATE*
                                                    initialSize/CROPPED_HEIGHT;
            }
            else{
              translateX                =   cvRandReal(&rg)*MIN_TRANSLATE*
                                                    initialSize/CROPPED_WIDTH;
              translateY                =   cvRandReal(&rg)*MAX_TRANSLATE*
                                                    initialSize/CROPPED_HEIGHT;
            }
          }
          cvSetImageROI(img64F,cvRect(cvRound(xr+translateX),
                              cvRound(yr+translateY),initialSize,initialSize));
          cvCopy(img64F,croppedImage64F);
      }
    }
    cvReleaseImage(&img64F);
    cvReleaseMat(&coords[i]);
  }
  cvReleaseImage(&finalImage8U);
  cvReleaseImage(&finalImage32F);
  cvReleaseImage(&croppedImage32F);
  cvReleaseImage(&croppedImage64F);
  cvReleaseMat(&stdCoords);
  cvReleaseMat(&testMat);
  cvReleaseMat(&avgMat);
  cvReleaseMat(&temp2);
}
Exemplo n.º 25
0
/* Wrapper function for distance transform group */
CV_IMPL void
cvDistTransform( const void* srcarr, void* dstarr,
                 int distType, int maskSize,
                 const float *mask,
                 void* labelsarr )
{
    CvMat* temp = 0;
    CvMat* src_copy = 0;
    CvMemStorage* st = 0;
    
    CV_FUNCNAME( "cvDistTransform" );

    __BEGIN__;

    float _mask[5] = {0};
    int _imask[3];
    CvMat srcstub, *src = (CvMat*)srcarr;
    CvMat dststub, *dst = (CvMat*)dstarr;
    CvMat lstub, *labels = (CvMat*)labelsarr;
    CvSize size;
    CvIPPDistTransFunc ipp_func = 0;
    CvIPPDistTransFunc2 ipp_inp_func = 0;

    CV_CALL( src = cvGetMat( src, &srcstub ));
    CV_CALL( dst = cvGetMat( dst, &dststub ));

    if( !CV_IS_MASK_ARR( src ) || CV_MAT_TYPE( dst->type ) != CV_32FC1 &&
        (CV_MAT_TYPE(dst->type) != CV_8UC1 || distType != CV_DIST_L1 || labels) )
        CV_ERROR( CV_StsUnsupportedFormat,
        "source image must be 8uC1 and the distance map must be 32fC1 "
        "(or 8uC1 in case of simple L1 distance transform)" );

    if( !CV_ARE_SIZES_EQ( src, dst ))
        CV_ERROR( CV_StsUnmatchedSizes, "the source and the destination images must be of the same size" );

    if( maskSize != CV_DIST_MASK_3 && maskSize != CV_DIST_MASK_5 && maskSize != CV_DIST_MASK_PRECISE )
        CV_ERROR( CV_StsBadSize, "Mask size should be 3 or 5 or 0 (presize)" );

    if( distType == CV_DIST_C || distType == CV_DIST_L1 )
        maskSize = !labels ? CV_DIST_MASK_3 : CV_DIST_MASK_5;
    else if( distType == CV_DIST_L2 && labels )
        maskSize = CV_DIST_MASK_5;

    if( maskSize == CV_DIST_MASK_PRECISE )
    {
        CV_CALL( icvTrueDistTrans( src, dst ));
        EXIT;
    }
    
    if( labels )
    {
        CV_CALL( labels = cvGetMat( labels, &lstub ));
        if( CV_MAT_TYPE( labels->type ) != CV_32SC1 )
            CV_ERROR( CV_StsUnsupportedFormat, "the output array of labels must be 32sC1" );

        if( !CV_ARE_SIZES_EQ( labels, dst ))
            CV_ERROR( CV_StsUnmatchedSizes, "the array of labels has a different size" );

        if( maskSize == CV_DIST_MASK_3 )
            CV_ERROR( CV_StsNotImplemented,
            "3x3 mask can not be used for \"labeled\" distance transform. Use 5x5 mask" );
    }

    if( distType == CV_DIST_C || distType == CV_DIST_L1 || distType == CV_DIST_L2 )
    {
        icvGetDistanceTransformMask( (distType == CV_DIST_C ? 0 :
            distType == CV_DIST_L1 ? 1 : 2) + maskSize*10, _mask );
    }
    else if( distType == CV_DIST_USER )
    {
        if( !mask )
            CV_ERROR( CV_StsNullPtr, "" );

        memcpy( _mask, mask, (maskSize/2 + 1)*sizeof(float));
    }

    if( !labels )
    {
        if( CV_MAT_TYPE(dst->type) == CV_32FC1 )
            ipp_func = (CvIPPDistTransFunc)(maskSize == CV_DIST_MASK_3 ?
                icvDistanceTransform_3x3_8u32f_C1R_p : icvDistanceTransform_5x5_8u32f_C1R_p);
        else if( src->data.ptr != dst->data.ptr )
            ipp_func = (CvIPPDistTransFunc)icvDistanceTransform_3x3_8u_C1R_p;
        else
            ipp_inp_func = icvDistanceTransform_3x3_8u_C1IR_p;
    }

    size = cvGetMatSize(src);

    if( (ipp_func || ipp_inp_func) && src->cols >= 4 && src->rows >= 2 )
    {
        _imask[0] = cvRound(_mask[0]);
        _imask[1] = cvRound(_mask[1]);
        _imask[2] = cvRound(_mask[2]);

        if( ipp_func )
        {
            IPPI_CALL( ipp_func( src->data.ptr, src->step,
                    dst->data.fl, dst->step, size,
                    CV_MAT_TYPE(dst->type) == CV_8UC1 ?
                    (void*)_imask : (void*)_mask ));
        }
        else
        {
            IPPI_CALL( ipp_inp_func( src->data.ptr, src->step, size, _imask ));
        }
    }
    else if( CV_MAT_TYPE(dst->type) == CV_8UC1 )
    {
        CV_CALL( icvDistanceATS_L1_8u( src, dst ));
    }
    else
    {
        int border = maskSize == CV_DIST_MASK_3 ? 1 : 2;
        CV_CALL( temp = cvCreateMat( size.height + border*2, size.width + border*2, CV_32SC1 ));

        if( !labels )
        {
            CvDistTransFunc func = maskSize == CV_DIST_MASK_3 ?
                icvDistanceTransform_3x3_C1R :
                icvDistanceTransform_5x5_C1R;

            func( src->data.ptr, src->step, temp->data.i, temp->step,
                  dst->data.fl, dst->step, size, _mask );
        }
        else
        {
            CvSeq *contours = 0;
            CvPoint top_left = {0,0}, bottom_right = {size.width-1,size.height-1};
            int label;

            CV_CALL( st = cvCreateMemStorage() );
            CV_CALL( src_copy = cvCreateMat( size.height, size.width, src->type ));
            cvCmpS( src, 0, src_copy, CV_CMP_EQ );
            cvFindContours( src_copy, st, &contours, sizeof(CvContour),
                            CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
            cvZero( labels );
            for( label = 1; contours != 0; contours = contours->h_next, label++ )
            {
                CvScalar area_color = cvScalarAll(label);
                cvDrawContours( labels, contours, area_color, area_color, -255, -1, 8 );
            }

            cvCopy( src, src_copy );
            cvRectangle( src_copy, top_left, bottom_right, cvScalarAll(255), 1, 8 );

            icvDistanceTransformEx_5x5_C1R( src_copy->data.ptr, src_copy->step, temp->data.i, temp->step,
                        dst->data.fl, dst->step, labels->data.i, labels->step, size, _mask );
        }
    }

    __END__;

    cvReleaseMat( &temp );
    cvReleaseMat( &src_copy );
    cvReleaseMemStorage( &st );
}
Exemplo n.º 26
0
void face_detect( void )
{
	CvCapture *capture = 0;
	IplImage *frame = 0;
	IplImage *frame_copy = 0;
	double height = 480;
	double width = 640;
	int c;
	CvRect last_rect = {0};

	CvHaarClassifierCascade* cvHCC = (CvHaarClassifierCascade*)cvLoad(filename);
	CvMemStorage* cvMStr = cvCreateMemStorage(0);
	CvSeq* face;
	capture = cvCreateCameraCapture (0);
	
	cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_WIDTH, width);
	cvSetCaptureProperty (capture, CV_CAP_PROP_FRAME_HEIGHT, height);
	cvNamedWindow ("capture_face_detect", CV_WINDOW_AUTOSIZE);
		
	open_tonnel();

	while (1) {
		CvRect near_rect = {0};
		frame = cvQueryFrame (capture);


		frame_copy = cvCreateImage(cvSize(frame->width, frame->height), IPL_DEPTH_8U, frame->nChannels);
		if(frame->origin == IPL_ORIGIN_TL) {
			cvCopy(frame, frame_copy);
		} else {
			cvFlip(frame, frame_copy);
		}

		IplImage* gray = cvCreateImage(cvSize(frame_copy->width, frame_copy->height), IPL_DEPTH_8U, 1);
		IplImage* detect_frame = cvCreateImage(cvSize((frame_copy->width / SCALE), (frame_copy->height / SCALE)), IPL_DEPTH_8U, 1);
		cvCvtColor(frame_copy, gray, CV_BGR2GRAY);
		cvResize(gray, detect_frame, CV_INTER_LINEAR);
		cvEqualizeHist(detect_frame, detect_frame);


		face = cvHaarDetectObjects(detect_frame, cvHCC, cvMStr, 1.1, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(30, 30) );

		CvScalar detect_color = CV_RGB(255, 0, 0);

		double d = 1000000000000000.0;

		for (int i = 0; i < face->total; i++) {
			CvRect* faceRect = (CvRect*)cvGetSeqElem(face, i);

			if(last_rect.x == 0 && last_rect.y == 0) {

			} else {
				double x = abs(last_rect.x - faceRect->x);
				double y = abs(last_rect.y - faceRect->y);
				double e = sqrt( x*x+y*y );
				if( d > e) {
					last_rect.x = faceRect->x;
					last_rect.y = faceRect->y;
					last_rect.width = faceRect->width;
					last_rect.height = faceRect->height;
					printf("x\n");
				}
			}

			// rect
			cvRectangle(frame_copy,
				cvPoint(faceRect->x * SCALE, faceRect->y * SCALE),
				cvPoint((faceRect->x + faceRect->width) * SCALE, (faceRect->y + faceRect->height) * SCALE),
				detect_color,
				3, CV_AA);
			detect_color = CV_RGB(0, 0, 255);


		}

		// send to server
		{
			char str[1024];
			sprintf_s(str, "[{ \"x\" : %f, \"y\" : %f}]", last_rect.x * SCALE, last_rect.y * SCALE);
			printf("%s", str);
			send_tunnel(str);
		}
		
		cvShowImage ("capture_face_detect", frame_copy);
		cvReleaseImage(&gray);
		cvReleaseImage(&detect_frame);

		// key event
		c = cvWaitKey (16);
		if (c == 'e') {
			break;
		}
		if( c == 's') {
				CvRect* faceRect = (CvRect*)cvGetSeqElem(face, 0);
				if(faceRect != NULL) {
					last_rect.x = faceRect->x;
					last_rect.y = faceRect->y;
					last_rect.width = faceRect->width;
					last_rect.height = faceRect->height;
				}
		}
	}

	close_tunnel();

	/* free */
	cvReleaseMemStorage(&cvMStr);
	cvReleaseCapture (&capture);	
	cvDestroyWindow("capture_face_detect");
	cvReleaseHaarClassifierCascade(&cvHCC);
	


}
ossimRefPtr<ossimImageData> ossimTileToIplFilter::getTile(const ossimIrect& tileRect,
        ossim_uint32 resLevel)
{


    cout << "Getting Tile !" << endl;

    // Check input data sources for valid and null tiles
    ossimImageSource *imageSource = PTR_CAST(ossimImageSource, getInput(0));
    ossimRefPtr<ossimImageData> imageSourceData;


    if (imageSource)
        imageSourceData = imageSource->getTile(tileRect, resLevel);

    if (!isSourceEnabled())
        return imageSourceData;


    if (!theTile.valid())
    {
        if(getInput(0))
        {
            theTile = ossimImageDataFactory::instance()->create(this, this);
            theTile->initialize();
        }
    }

    if (!imageSourceData.valid() || !theTile.valid())
        return ossimRefPtr<ossimImageData>();

    theTile->setOrigin(tileRect.ul());
    if (theTile->getImageRectangle() != tileRect)
    {
        theTile->setImageRectangle(tileRect);
        theTile->initialize();
    }


    IplImage *input = cvCreateImage(cvSize(tileRect.width(), tileRect.height()),IPL_DEPTH_8U,3);
    IplImage *output = cvCreateImage(cvSize(tileRect.width(),tileRect.height()),IPL_DEPTH_8U,3);

    cvZero(input);
    cvZero(output);

    // If 16 or 32 bits, downsample to 8 bits
    ossimScalarType inputType = imageSourceData->getScalarType();
    if(inputType == OSSIM_UINT16 || inputType == OSSIM_USHORT11)
        CopyTileToIplImage(static_cast<ossim_uint16>(0), imageSourceData, input, tileRect);
    else
        CopyTileToIplImage(static_cast<ossim_uint8>(0), imageSourceData, input, tileRect);


    cvCopy(input, output);



    int bins = 256;
    int hsize[] = {bins};

    float binVal;
    float sum=0;
    int firstIndexFlag = 1;

    /*// Create histogram of image
    CvHistogram *hist;
    hist = cvCreateHist(1, hsize, CV_HIST_ARRAY, 0, 1);
    cvCalcHist(&input, hist, 0, 0);
    cvNormalizeHist(hist, 100);

    binVal = cvQueryHistValue_1D(hist,1);
    */

    // Determine the actual height and width of each tile
    ossimIrect fullImageRect;
    fullImageRect = imageSource->getBoundingRect(0);

    ossim_int32 tileHeight, tileWidth, imageWidth, imageHeight;
    tileHeight = tileRect.height();
    tileWidth = tileRect.width();

    imageWidth = fullImageRect.width();
    imageHeight = fullImageRect.height();

    ossim_int32 totRows, totCols;
    totRows = (ossim_uint32)round(imageHeight / tileHeight);
    totCols = (ossim_uint32)round(imageWidth / tileWidth);

    ossimIpt upperLeftTile = tileRect.ul();

    if ((upperLeftTile.x + 1) > fullImageRect.ul().x + totCols * tileWidth)
        tileWidth = imageWidth - totCols * tileWidth;

    if ((upperLeftTile.y + 1) > fullImageRect.ul().y + totRows * tileHeight)
        tileHeight = imageHeight - totRows * tileHeight;

    //Begin Ship Detect Algorithim





    // Create sub-image to ignore zeros created by OSSIM

    // ie, the tile is 512x512 but on the edges, the information is only in 512x10
    CvRect subRect = cvRect(0, 0, tileWidth, tileHeight);
    IplImage *subImg = cvCreateImage(cvSize(tileWidth, tileHeight),IPL_DEPTH_8U,3);
    cvSetImageROI(input, subRect);
    cvCopy(input, subImg);

    cvResetImageROI(input);


    showImage(subImg,input);

    cvReleaseImage(&input);
    cvReleaseImage(&output);


    return theTile;
}
int main(int argc, char **argv) {
	double parameter=0.005;		// Percentage of the whole frame
	int i=0,index=0;
	int frame=0;				// Frame number (index)
	int msec;
	int total_frames=0;			// Total frames
	int width_img=0;			// Frame width
	int height_img=0;			// Frame height
	int *check_frames;			// Contains indeces of marked frames
	int *list_of_frames;		// List of frames
	int marked_frames=0;			// Marked frames
	double fps=0.0;				// FPS (Frames Per Seconds
	FILE *fp;				// TXT file pointer
	CvCapture *capture=0;			// Capture struct
	IplImage *decoded_previous;		// Previous frame (decoded)
	IplImage *decoded_current;		// Current frame (decoded)
	IplImage *bgr_frame;			// Frame
	IplImage *new_frame;			// Frame
	CvSize size;				// Frame size (width x height)
	clock_t start, stop, diff; // Timer
	
	// Text variables
	CvScalar black = CV_RGB(255,0,0);
	CvFont font1;
	int thickness = 2.0;
	char text1[20] = "0"; // frame number
	char text2[20] = "0"; // frame msec positiion
	double hscale = 1.0;
	double vscale = 1.0;
	double shear = 0.0;

	// Check if the user gave arguments
	if(argc != 4) {
		fprintf(stderr, "\nUSAGE: %s <input_video_file> <output_TXT_file> <parameter>\n", argv[0]);
		return EXIT_FAILURE;
	}

	
	
	/**** STAGE 1: PROCESS FRAMES ****/
	
	capture = cvCreateFileCapture(argv[1]);	// Open video file to start capture
	if(!capture) {
		printf("Error opening video file! (cvCreateFileCapture)\n");
		return EXIT_FAILURE;
	}

	fp=fopen(argv[2],"w");		// Open file to write stats
	if(fp == NULL) {
		printf("Error opening file! (fopen)\n");
		return EXIT_FAILURE;
	}
	
	parameter = atof(argv[3]);	// Percentage of the whole frame

	fps = cvGetCaptureProperty(capture,CV_CAP_PROP_FPS);				// Get FPS
	width_img = cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);		// Get frame width
	height_img = cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);		// Get frame height
	total_frames = cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_COUNT);		// Get total frames
	size = cvSize(width_img,height_img);						// Get size of frames

	check_frames = (int *)malloc(sizeof(*check_frames) * total_frames);
	list_of_frames = (int *)malloc(sizeof(*list_of_frames) * total_frames);
	if (check_frames == NULL || list_of_frames == NULL) {
		printf("Error allocating memory!\n");
		return EXIT_FAILURE;
	}

	// Initialize arrays
	for(i=0;i<total_frames;i++) {
		check_frames[i]=0;
		list_of_frames[i]=0;
	}
	
	cvInitFont(&font1,CV_FONT_HERSHEY_SIMPLEX,hscale,vscale,shear,thickness,CV_AA);
	
	CvPoint pt1 = cvPoint(5,30);
	CvPoint pt2 = cvPoint(5,70);
	
	fprintf(fp,"Filename\t:\t%s\n\nFrame width\t:\t%d\nFrame height\t:\t%d\nFPS\t\t:\t%f\nTotal frames\t:\t%d\n\n\n\n",argv[1],width_img,height_img,fps,total_frames);
	fprintf(fp,"Start processing frames...\n\n");
	
	start = clock(); // Start timer

	bgr_frame=cvQueryFrame(capture);												// Grab first frame
	decoded_current = cvCreateImage(size, bgr_frame->depth, bgr_frame->nChannels);	// Create the current frame
	decoded_previous = cvCreateImage(size, bgr_frame->depth, bgr_frame->nChannels);	// Create the previous frame
	cvCopy(bgr_frame,decoded_previous,NULL);											// Save the copy
	
	// Grab frames from the video until NULL
	while((bgr_frame=cvQueryFrame(capture)) != NULL) {
		/* When entering this loop, we have already grabbed a frame
		 * so the frame counter starts from 2
		 */
		frame = cvGetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES);					// Get the current frame number
		
		cvCopy(bgr_frame,decoded_current,NULL);											// Save the copy
		
		/**** START PROCESSING ****/
		comprdiff(decoded_previous, decoded_current, frame, fp, width_img, height_img, &index, parameter);
		/**** END PROCESSING ****/

		cvCopy(bgr_frame,decoded_previous,NULL);	// Save the copy

		if(index==1) {
			check_frames[frame]=1;	// It means that the specific frame is marked
		}
	}
	
	cvReleaseImage(&bgr_frame);			// Release bgr_frame
	cvReleaseImage(&decoded_previous);	// Release decoded_previous
	cvReleaseImage(&decoded_current);	// Release decoded_current
	cvReleaseCapture(&capture);			// Release capture
	
	stop = clock();			// Stop timer
	diff = stop - start;	// Get difference between start time and current time;
	fprintf(fp,"\n\nTotal time processing frames : %f minutes\t%f seconds\n", (((float)diff)/CLOCKS_PER_SEC)/60, ((float)diff)/CLOCKS_PER_SEC);
	fprintf(fp,"\n\n\n\nMarked frames\n\n");
	
	for(i=0;i<total_frames;i++) {
		// If we have a marked frame
		if(check_frames[i]==1) {
			list_of_frames[i]=i;
			fprintf(fp,"frame %d\n",i);	// Write to file only marked frames
			marked_frames++;			// Counter to keep track of the total marked frames
		}
	}

	fprintf(fp,"\n\nTotal marked frames\t:\t%d\n",marked_frames);

	//If there is no markeed frames, exit
	if(marked_frames == 0) {
		return EXIT_SUCCESS;
	}
	
	
	
	/**** STAGE 2: PIPE PIXELS ****/
	
	capture = cvCreateFileCapture(argv[1]);	// Re-Open video file to start capture
	if(!capture) {
		printf("Error opening video file! (cvCreateFileCapture)\n");
		return EXIT_FAILURE;
	}
	
	fprintf(fp,"Start writing frames...\n\n");
	
	start = clock(); // Start timer
	
	bgr_frame = cvQueryFrame(capture);	// Retrieve frame
	new_frame = cvCreateImage(size, bgr_frame->depth, bgr_frame->nChannels);	// Create the new frame
	
	do {
		frame = cvGetCaptureProperty(capture,CV_CAP_PROP_POS_FRAMES);	// Get the current frame number
		msec = cvGetCaptureProperty(capture,CV_CAP_PROP_POS_MSEC);
		msec=msec/1000;
		
		// If the index number of the current frame is equal to the frame we want, then write it to the stream.
		if(frame == list_of_frames[frame]) {
			cvCopy(bgr_frame,new_frame,NULL);	// Save the copy

			sprintf(text1,"%d frame",frame); // int to char via sprintf()
			cvPutText(new_frame,text1,pt1,&font1,black); // frame number

			sprintf(text2,"%d sec",msec); // int to char via sprintf()
			cvPutText(new_frame,text2,pt2,&font1,black); // frame msec position

			fwrite(new_frame->imageData, sizeof(unsigned char), new_frame->imageSize, stdout); // Pipe image data to stdout
		} else {
			fwrite(new_frame->imageData, sizeof(unsigned char), new_frame->imageSize, stdout); // Pipe image data to stdout
		}
	} while((bgr_frame=cvQueryFrame(capture)) != NULL);
	
	stop = clock(); 		// Stop timer
	diff = stop - start; 	// Get difference between start time and current time;
	fprintf(fp,"\n\nTotal time writing frames : %f minutes\t%f seconds\n", (((float)diff)/CLOCKS_PER_SEC)/60, ((float)diff)/CLOCKS_PER_SEC);
	fprintf(fp,"Writing completed!\n\n");

	fclose(fp);					// Close file pointer
	free(list_of_frames);		// Free list_of_frames
	free(check_frames);			// Free check_frames
	cvReleaseImage(&bgr_frame);	// Release bgr_frame
	cvReleaseImage(&new_frame);	// Release new_frame
	cvReleaseCapture(&capture);	// Release capture

	return EXIT_SUCCESS;
}
Exemplo n.º 29
0
int main(int argc, char *argv[])
{
	IplImage* img = 0;
	int height,width,step,channels;
	unsigned char *data;

	// load an image
	img= cvLoadImage("kantai.png");
	if(!img){
		printf("Could not load image file: %s\n",argv[1]);
		exit(0);
	}

	// get the image data
	height    = img->height;
	width     = img->width;
	step      = img->widthStep;
	channels  = img->nChannels;
	data      = (uchar *)img->imageData;

	printf("Processing a %dx%d image with %d channels\n",height,width,channels);
	printf("step = %d\n", step);

	IplImage* imgGrayscale = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1); // 8-bit grayscale is enough.
	// convert to grayscale.
	cvCvtColor(img, imgGrayscale, CV_BGR2GRAY);

	// Create an image for the outputs
	IplImage* imgSobelX = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 ); // to prevent overflow.
	IplImage* imgSobelY = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );
	IplImage* imgSobelAdded = cvCreateImage( cvGetSize(img), IPL_DEPTH_32F, 1 );
	IplImage* imgSobel = cvCreateImage( cvGetSize(img), IPL_DEPTH_8U, 1 ); // final image is enough to be an 8-bit plane.


	// Sobel
	cvSobel(imgGrayscale, imgSobelX, 1, 0, 3);
	cvSobel(imgGrayscale, imgSobelY, 0, 1, 3);
	cvAdd(imgSobelX, imgSobelY, imgSobelAdded);
	cvConvertScaleAbs(imgSobelAdded, imgSobel); //scaled to 8-bit level; important for visibility.


	//----------------------- OULINE EXTRACTION -------------------------------
	// Normal diff
	IplImage* imgNormDiff = cvCreateImage(cvGetSize(img), 8, 1);
	cvCopy(imgGrayscale,imgNormDiff);
	norm_diff(imgNormDiff);

	// Roberts
	IplImage* imgRoberts = cvCreateImage(cvGetSize(img), 8, 1);
	cvCopy(imgGrayscale,imgRoberts);
	roberts(imgRoberts);

	// Sobel
	IplImage* imgSobel2 = cvCreateImage(cvGetSize(img), 8, 1);
	cvCopy(imgGrayscale,imgSobel2);
	sobel(imgSobel2);

	// Laplacian
	IplImage* imgLap = cvCreateImage(cvGetSize(img), 8, 1);
	cvCopy(imgGrayscale,imgLap);
	laplacian(imgLap);

	//--------------------------- ENHANCEMENT --------------------------------
	// Laplacian
	IplImage* imgLap2 = cvCreateImage(cvGetSize(img), 8, 3);
	IplImage* imgRed = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgGreen = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgBlue = cvCreateImage(cvGetSize(img), 8, 1);

	cvSplit(img, imgRed, imgGreen, imgBlue, NULL);

	laplacian2(imgBlue);
	laplacian2(imgGreen);
	laplacian2(imgRed);
	cvMerge(imgRed,imgGreen,imgBlue, NULL, imgLap2);

	// Variant
	IplImage* imgVariant = cvCreateImage(cvGetSize(img), 8, 3);
	IplImage* imgRed2 = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgGreen2 = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgBlue2 = cvCreateImage(cvGetSize(img), 8, 1);

	cvSplit(img, imgRed2, imgGreen2, imgBlue2, NULL);

	variant(imgBlue2);
	variant(imgGreen2);
	variant(imgRed2);
	cvMerge(imgRed2,imgGreen2,imgBlue2, NULL, imgVariant);

	// Sobel
	IplImage* imgSobel3 = cvCreateImage(cvGetSize(img), 8, 3);
	IplImage* imgRed3 = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgGreen3 = cvCreateImage(cvGetSize(img), 8, 1);
	IplImage* imgBlue3 = cvCreateImage(cvGetSize(img), 8, 1);

	cvSplit(img, imgRed3, imgGreen3, imgBlue3, NULL);

	sobel2(imgBlue3);
	sobel2(imgGreen3);
	sobel2(imgRed3);
	cvMerge(imgRed3,imgGreen3,imgBlue3, NULL, imgSobel3);




	// create a window
	cvNamedWindow("Original", CV_WINDOW_KEEPRATIO);

	cvNamedWindow("Normal different line", CV_WINDOW_KEEPRATIO);
	cvNamedWindow("Roberts line", CV_WINDOW_FREERATIO);
	cvNamedWindow("Sobel line", CV_WINDOW_FREERATIO);
	cvNamedWindow("Laplacian line", CV_WINDOW_KEEPRATIO);

	cvNamedWindow("Laplacian Color", CV_WINDOW_KEEPRATIO);
	cvNamedWindow("Variant", CV_WINDOW_KEEPRATIO);
	cvNamedWindow("Sobel", CV_WINDOW_KEEPRATIO);
	/*cvNamedWindow( "Sobel-x" );
  cvNamedWindow( "Sobel-y" );
  cvNamedWindow( "Sobel-Added" );
  cvNamedWindow( "Sobel-Added (scaled)" );*/

	// show the image
	cvShowImage("Original", img);
	cvShowImage("Normal different line", imgNormDiff);
	cvShowImage("Roberts line",imgRoberts);
	cvShowImage("Sobel line", imgSobel2);
	cvShowImage("Laplacian line", imgLap);

	cvShowImage("Laplacian Color", imgLap2);
	cvShowImage("Variant", imgVariant);
	cvShowImage("Sobel", imgSobel3);

	/*cvShowImage("Sobel-x", imgSobelX);
  cvShowImage("Sobel-y", imgSobelY);
  cvShowImage("Sobel-Added", imgSobelAdded);
  cvShowImage("Sobel-Added (scaled)", imgSobel);*/

	// wait for a key
	cvWaitKey(0);

	// release the image
	cvReleaseImage(&img);
	cvReleaseImage(&imgGrayscale);
	cvReleaseImage(&imgNormDiff);
	cvReleaseImage(&imgRoberts);
	cvReleaseImage(&imgSobel2);
	cvReleaseImage(&imgLap);

	cvReleaseImage(&imgLap2);
	cvReleaseImage(&imgVariant);
	cvReleaseImage(&imgSobel3);

	cvReleaseImage(&imgSobelX);
	cvReleaseImage(&imgSobelY);
	cvReleaseImage(&imgSobelAdded);
	cvReleaseImage(&imgSobel);


	return 0;
}
Exemplo n.º 30
0
void initialize(int argc, char** argv ){
	IplImage* image;
    // Structure for getting video from camera or avi
    CvCapture* capture = 0;

    // Images to capture the frame from video or camera or from file
    IplImage *frame, *frame_copy = 0;

    // Used for calculations
    int optlen = strlen("--cascade=");

    // Input file name for avi or image file.
    const char* input_name;

    // Check for the correct usage of the command line
    if( argc > 1 && strncmp( argv[1], "--cascade=", optlen ) == 0 ){
        cascade_name = argv[1] + optlen;
        input_name = argc > 2 ? argv[2] : 0;
    }else{
    	cerr<< cascade_name << " Usage: facedetect --cascade=\"<cascade_path>\" [filename|camera_index]\n"<<endl;
    	throw BAD_ARGUMENT();;
    }

    // Load the HaarClassifierCascade
    cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
    // Check whether the cascade has loaded successfully. Else report and error and quit
    if( !cascade ){
        cerr<<"ERROR: Could not load classifier cascade\n"<<endl;
        throw BAD_ARGUMENT();
    }

    // Allocate the memory storage
   // storage = cvCreateMemStorage(0);

    // Find whether to detect the object from file or from camera.
    if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') )
        capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' );
    else
        capture = cvCaptureFromAVI( input_name );
    cvWaitKey(2000);
    //cvSetCaptureProperty(capture, CV_CAP_PROP_POS_AVI_RATIO, (double)0.9);
    //cvSetCaptureProperty(capture,CV_CAP_PROP_FORMAT,1);
    //cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 256);
    //cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 256);
    // Create a new named window with title: result
    //cvNamedWindow( "result", 1 );

    // Find if the capture is loaded successfully or not.
    // If loaded succesfully, then:
    if( capture ){
        // Capture from the camera.
        for(;;){
            // Capture the frame and load it in IplImage
            if( !cvGrabFrame( capture )){
                break;
            }

            frame = cvRetrieveFrame( capture );

            // If the frame does not exist, quit the loop
            if( !frame ){
                break;
            }

            // Allocate framecopy as the same size of the frame
            //if( !frame_copy )
                frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
                                            IPL_DEPTH_8U, frame->nChannels );

            // Check the origin of image. If top left, copy the image frame to frame_copy.
            if( frame->origin == IPL_ORIGIN_TL )
                cvCopy( frame, frame_copy, 0 );
            // Else flip and copy the image
            else
            	cvCopy( frame, frame_copy, 0 );

            extractFace(frame_copy,cascade);
            //cvShowImage("Result",frame_copy);
            // Wait for a while before proceeding to the next frame
//            cvWaitKey(20);
            if( cvWaitKey( 20 ) == 27 ) //exit if Esc is pressed(repeatedly)
                break;
        }

        // Release the images, and capture memory
        cvReleaseImage( &frame_copy );
        cvReleaseCapture( &capture );
    }

    // If the capture is not loaded successfully, then:
    else{
        // Assume the image to be lena.jpg, or the input_name specified
        const char* filename = input_name ? input_name : (char*)"lena.jpg";

        // Load the image from that filename
        image = cvLoadImage( filename, 1 );

        // If Image is loaded successfully, then:
        if( image ){
        	extractFace(image,cascade);
        	//cvShowImage("result",image);
            // Wait for user input
            cvWaitKey(0);

            // Release the image memory
           // cvReleaseImage( &image );
        }
        else{
            /* assume it is a text file containing the
               list of the image filenames to be processed - one per line */
            FILE* f = fopen( filename, "rt" );
            if( f ){
                char buf[1000+1];

                // Get the line from the file
                while(fgets( buf, 1000, f )){

                    // Remove the spaces if any, and clean up the name
                    int len = (int)strlen(buf);
                    while( len > 0 && isspace(buf[len-1]) )
                        len--;
                    buf[len] = '\0';

                    // Load the image from the filename present in the buffer
                    image = cvLoadImage( buf, 1 );

                    // If the image was loaded successfully, then:
                    if( image ){
						//return image;
                    	//cvShowImage("result",image);
                    	extractFace(image,cascade);
                        // Wait for the user input, and release the memory
                        //cvWaitKey(0);
                        //cvReleaseImage( &image );
                    }
                }
                // Close then file
                fclose(f);
            }
        }
    }
    // Destroy the window previously created with filename: "result"
    //cvDestroyWindow("result");
}