Beispiel #1
0
cv::Mat readFrameIndex(cv::VideoCapture cap, int frameNum){
	int idx = frameNum-1;
	cv::Mat frame;
	bool success;
	cap.set ( CV_CAP_PROP_POS_FRAMES , idx );
	success = cap.retrieve(frame);
	return frame;
}
Beispiel #2
0
void WriteCameraImage (const char sCameraImg[])
{
/*// Call capGrabFrame twice to flush images stored in the camera queue.  This
// makes the screen image more responsive to the current position of the
// subject, especially when using slow stacked ASM models.
// TODO does this slow down the frame rate and is there a better way of doing this?

capGrabFrame(hgCapWnd);
capGrabFrame(hgCapWnd);

// now get the actual frame we want, and save it to disk

capGrabFrame(hgCapWnd);
capFileSaveDIB(hgCapWnd, sCameraImg);*/

	cv::Mat frame;
	cam.retrieve(frame);
	img = Image(frame.cols, frame.rows, false);
	memcpy(img.buf, frame.ptr(), sizeof(img));	
	cv::imwrite(sCameraImg, frame);
}
Beispiel #3
0
/**
 * @function main
 */
int main( int argc, char* argv[] ) {
  
  // Filter
  gFilterLimits.resize(6);
  //gFilterLimits << -0.35, 0.35, -0.70, 0.70, 1.5, 2.4; // Kinect
  gFilterLimits << -1.0, 1.0, -1.5, 1.5, 0.35, 2.0; // Asus on top of Crichton


  ObjectsDatabase mOd;
  mOd.init_classifier();
  mOd.load_dataset();

  gCapture.open( cv::CAP_OPENNI2 );
  
  if( !gCapture.isOpened() ) {
    printf("\t [ERROR] Could not open the capture object \n");
    return -1;
  }

  gCapture.set( cv::CAP_PROP_OPENNI2_MIRROR, 0.0 );
  gCapture.set( cv::CAP_PROP_OPENNI_REGISTRATION, -1.0 );
  gF = (float)gCapture.get( cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH );

  cv::namedWindow( gWindowName, cv::WINDOW_AUTOSIZE );
 
  ::google::InitGoogleLogging( argv[0] );
  
  for(;;) {

    if( !gCapture.grab() ) {
      printf("\t * ERROR Could not grab a frame \n");
      return -1;
    }

    gCapture.retrieve( gRgbImg, cv::CAP_OPENNI_BGR_IMAGE );
    if( gIsSegmentedFlag ) { drawSegmented(); }
    cv::imshow( gWindowName, gRgbImg );
    
    gCapture.retrieve( gPclMap, cv::CAP_OPENNI_POINT_CLOUD_MAP );


    cv::imshow( gWindowName, gRgbImg );

    char k = cv::waitKey(30);
    if( k == 'q' ) {
      printf("\t Finishing program \n");
      break;
    } 

    /** Recognize */
    else if( k == 'i' ) {

      // Process image
      process();
      gLabels.resize(gClusters.size() );
      gIndex.resize(gClusters.size() );      
      // Store images
      for( int i = 0; i < gClusters.size(); ++i ) {

	int xl = gBoundingBoxes[i](0);
	int yl = gBoundingBoxes[i](1);
	int xw = gBoundingBoxes[i](2)-gBoundingBoxes[i](0);
	int yw = gBoundingBoxes[i](3)-gBoundingBoxes[i](1);
	
	cv::Mat img( gRgbImg, cv::Rect( xl, yl,
					xw, yw ) );
	
	// Predict 
	mOd.classify( img, gIndex[i], gLabels[i] );
	
	cv::putText( gRgbImg,
		     gLabels[i], cv::Point(gBoundingBoxes[i](0), gBoundingBoxes[i](1) ),
		     cv::FONT_HERSHEY_SIMPLEX, 1, 
		     gColors[i],
		     2 );

	mOd.sayIt(gIndex[i]);
      }
      
      
      
    } // else
    
    
  } // for
  
} // main
void OpenCVTemplateApp::makeGUI() {
    interface->clear();
    interface->addButton("load image", [this] {
        auto path = ci::app::getOpenFilePath();
        image = cv::imread(path.string());
        std::cout <<"cols "<<image.cols << std::endl;
        std::cout <<"rows "<<image.rows << std::endl;
        std::cout <<"channels "<<image.channels() << std::endl;
        imageTexture = gl::Texture::create(fromOcv(image));
    });
    interface->addButton("load video", [this] {
        auto path = ci::app::getOpenFilePath();
        video.open(path.string());
        frameWidth = video.get(cv::CAP_PROP_FRAME_WIDTH);
        frameHeight = video.get(cv::CAP_PROP_FRAME_HEIGHT);
        totalFrames = video.get(cv::CAP_PROP_FRAME_COUNT);
        video.read(frame);
        if(isGrayScale) {
            cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
        }
        frameTexture = gl::Texture::create(fromOcv(frame));
        makeGUI();
    });
    interface->addSeparator();
    if(frameTexture) {
        interface->addParam("gray scale", &isGrayScale).updateFn([this] {
            video.retrieve(frame);
            if(isGrayScale) {
                cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
            }
            frameTexture = gl::Texture::create(fromOcv(frame));
            makeGUI();
        });
        interface->addParam("nb of feature",&nbOfFeaturePoints).min(1).max(1000);
        if(isGrayScale) {
            interface->addButton("get feature points", [this] {
                cv::goodFeaturesToTrack(frame, featurePoints, nbOfFeaturePoints, 0.01, 10, cv::Mat(), 3, 0, 0.04);
            });
        }
        interface->addSeparator();
        interface->addParam("frame",&frameIndex).min(0).max(totalFrames-1).step(1).updateFn([this] {
            video.set(cv::CAP_PROP_POS_FRAMES,frameIndex);
            video.read(frame);
            if(isGrayScale) {
                cv::cvtColor(frame, frame, cv::COLOR_BGR2GRAY);
            }
            frameTexture = gl::Texture::create(fromOcv(frame));
        });
        interface->addSeparator();
        interface->addParam("speed", &frameSpeed).min(1).max(1000).step(1);
        interface->addButton("play",[this] {
            currentState = PLAY;
            makeGUI();
        });
        if(currentState == PLAY) {
            interface->addButton("pause",[this] {
                currentState = PAUSE;
                makeGUI();
            });
        }
    }
}
int main()
{
    double timeUse;
    struct timeval startTime, stopTime;

    cv::Mat rawImage, grayImage;
    std::vector<cv::Rect> faces;

    init();
    spider_head(45);

    std::stringstream logStream, outStream;
    float faceX, faceY;
    int8_t rotateDegree;
    uint8_t rotatePwm;
    uint8_t lostCounter = 0;
    while(running)
    {
        logStream.str("");
        logStream << std::fixed << std::setprecision(3);
        outStream.str("");
        outStream << std::fixed << std::setprecision(3);

        gettimeofday(&startTime, NULL);

        camera.retrieve(rawImage);

        cv::cvtColor(rawImage, grayImage, cv::COLOR_BGR2GRAY);
        cv::equalizeHist(grayImage, grayImage);

        faces.clear();
        face_cascade.detectMultiScale(grayImage, faces, 1.1,
            2, 0|cv::CASCADE_SCALE_IMAGE, cv::Size(30, 30));
        if(faces.empty())
        {
            if(lostCounter != 0)
            {
                lostCounter --;
                spider_rotate_degree(rotateDegree, rotatePwm, NULL, NULL);

                logStream << "Face lost, lost counter: " << static_cast<int>(lostCounter) << ", ";
            }
            else
            {
                spider_move_stop();
                
                logStream << "No face!";
            }
        }
        else
        {
            lostCounter = 5;
            faceX = faces[0].x+faces[0].width*0.5;
            faceY = faces[0].y+faces[0].height*0.5;

            logStream << "Get face, size: " << faces.size() << ", ";
            logStream << "coordinate: x " << faceX << " y " << faceY;

            if(faceX < 80)
            {
                rotateDegree = -5;
                rotatePwm = 80;
            }
            else if(faceX > 80)
            {
                rotateDegree = 5;
                rotatePwm = 80;
            }

            if(faceX < 70 || faceX > 90)
            {
                spider_rotate_degree(rotateDegree, rotatePwm, NULL, NULL);
            }

            //spider_move(1, 55);
        }

        gettimeofday(&stopTime, NULL);
        timeUse = stopTime.tv_sec - startTime.tv_sec + (stopTime.tv_usec - startTime.tv_usec)/1000000.0;
        if(timeUse < 0.25)
            usleep((0.25 - timeUse) * 1000000);

        outStream << "Time use: " << timeUse << "s, " << logStream.str();
        std::cout << outStream.str() << std::endl;
    }

    void* result;
    pthread_join(grabThread, &result);

    spider_head(35);
    spider_move_stop();
    spider_rotate_stop();
    spider_close();

    camera.release();

    std::cout << "Program exit!" << std::endl;

    return 0;
}
Beispiel #6
0
int main() {
	
	

	// 打开摄像头
	video_capture.open(0);
	if (!video_capture.isOpened()){
		std::cerr << "Could not open video" << endl;
		return -1;
	}

	// 获取第一张图像,用于这设置参数
	video_capture >> input_image;

	// 读取摄像机参数
	camera_params.readFromXMLFile("camera.yml");
	camera_params.resize(input_image.size());

	// 注册窗口
	cv::namedWindow("thres",1);
	cv::namedWindow("in",1);

	marker_dector.getThresholdParams(threshold_param1, threshold_param2);
	i_threshold_param1 = threshold_param1;
	i_threshold_param2 = threshold_param2;

	cv::createTrackbar("ThresParam1", "in",&i_threshold_param1, 13, cvTackBarEvents);
	cv::createTrackbar("ThresParam2", "in",&i_threshold_param2, 13, cvTackBarEvents);

	char key=0;
	int index=0;

	//capture until press ESC or until the end of the video
	while( key!=27 && video_capture.grab())
	{
		//copy image
		video_capture.retrieve(input_image);
		
		//number of images captured
		index++; 

		//for checking the speed
		double tick = (double)cv::getTickCount();
		//Detection of markers in the image passed
		marker_dector.detect(input_image, markers, camera_params, marker_size);
		//chekc the speed by calculating the mean speed of all iterations
		average_time.first += ((double)cv::getTickCount() - tick) / cv::getTickFrequency();
		average_time.second++;
		std::cout << "Time detection=" << 1000 * average_time.first / average_time.second << " milliseconds" << endl;

		//print marker info and draw the markers in image
		input_image.copyTo(input_image_copy);
		for(unsigned int i = 0; i < markers.size(); i++){
			std::cout << markers[i] << std::endl;
			markers[i].draw(input_image, Scalar(0, 0, 255), 2);
		}

		//draw a 3d cube in each marker if there is 3d info
		if ( camera_params.isValid())
			for(unsigned int i = 0; i < markers.size(); i++){
				aruco::CvDrawingUtils::draw3dCube(input_image, markers[i], camera_params);
				aruco::CvDrawingUtils::draw3dAxis(input_image, markers[i], camera_params);
			}

			//DONE! Easy, right?
			cout << endl << endl << endl;
			//show input with augmented information and  the thresholded image
			cv::imshow("in", input_image);
			cv::imshow("thres", marker_dector.getThresholdedImage());

			key=cv::waitKey(0);//wait for key to be pressed
	}
	return 0;
}
int main(int argc, char** argv)
{
    
    /// READ PARAMETERS
    if(!readParameters(argc, argv))
    return false;
    
    /// CREATE UNDISTORTED CAMERA PARAMS
    CameraParamsUnd=CameraParams;
    CameraParamsUnd.Distorsion=cv::Mat::zeros(4,1,CV_32F);
    cout<<" REATE UNDISTORTED CAMERA PARAMS "<<endl;
    
    /// SET BOARD DETECTOR PARAMETERS
    TheBoardDetector.setParams(TheBoardConfig,CameraParamsUnd,TheMarkerSize);
    
    /// CAPTURE FIRST FRAME
    TheVideoCapturer.grab();
    TheVideoCapturer.retrieve ( TheInputImage );
    cv::undistort(TheInputImage,TheInputImageUnd,CameraParams.CameraMatrix,CameraParams.Distorsion);
    cout<<" CAPTURE FIRST FRAME "<<endl;
    /// INIT OGRE
    initOgreAR(CameraParamsUnd, TheInputImageUnd.ptr<uchar>(0));
    cout<<" OGRE initiated "<<endl;
    while (TheVideoCapturer.grab())
    {
        
        /// READ AND UNDISTORT IMAGE
        TheVideoCapturer.retrieve ( TheInputImage );
        cv::undistort(TheInputImage,TheInputImageUnd,CameraParams.CameraMatrix,CameraParams.Distorsion);
        
        /// DETECT BOARD
        float probDetect = TheBoardDetector.detect(TheInputImageUnd);
        cout<<" detection: "<<probDetect<<endl;
        /// UPDATE BACKGROUND IMAGE
        mTexture->getBuffer()->blitFromMemory(mPixelBox);
        
        /// UPDATE SCENE
        if ( probDetect>0.2) ogreNode->setVisible(true);
        else ogreNode->setVisible(false);
        
        // set node pose
        double position[3], orientation[4];
        TheBoardDetector.getDetectedBoard().OgreGetPoseParameters(position, orientation);
        ogreNode->setPosition( position[0], position[1], position[2]  );
        ogreNode->setOrientation( orientation[0], orientation[1], orientation[2], orientation[3]  );
        
        // Update animation
        double deltaTime = 1.2*root->getTimer()->getMilliseconds()/1000.;
        baseAnim->addTime(deltaTime);
        topAnim->addTime(deltaTime);
        root->getTimer()->reset();
        
        /// RENDER FRAME
        if(root->renderOneFrame() == false) break;
        Ogre::WindowEventUtilities::messagePump();
        
        /// KEYBOARD INPUT
        keyboard->capture();
        if (keyboard->isKeyDown(OIS::KC_ESCAPE)) break;
        
        
    }
    
    im->destroyInputObject(keyboard);
    im->destroyInputSystem(im);
    im = 0;
    
    delete root;
    return 0;
}