Ejemplo n.º 1
0
	static int openVideo(cv::VideoCapture &capture, std::string filename){
		std::cout << "abriendo video " << filename << std::endl;
		capture.open(filename);
		if (!capture.isOpened()) {
			std::cout << "abriendo camara " << filename << std::endl;
			int id_webcam = std::atoi(filename.c_str());
			capture.open(id_webcam);
		}
		if (!capture.isOpened()) {
			// std::cout << "no puedo abrir " << filename << std::endl;
			return 0;
		}
			return 1;
		}
Ejemplo n.º 2
0
VCrop::VCrop(cv::VideoCapture &capture, const float &x, const float &y, const float &size) : capture(&capture) {
    if (!capture.isOpened()) {
        std::string error_message = "Error when reading input stream";
        LOG(ERROR) << error_message;
        throw error_message;
    }

    int frame_width = capture.get(CV_CAP_PROP_FRAME_WIDTH);
    int frame_height = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
    VLOG(2) << "Frame Width: " << frame_width;
    VLOG(2) << "Frame Height: " << frame_height;
    LOG_IF(FATAL, frame_width <= 0) << "Frame width is less than zero.";
    LOG_IF(FATAL, frame_height <= 0) << "Frame height is less than zero.";
    float diameter = sqrt(frame_width * frame_width + frame_height * frame_height);
    cv::Point2i top_left(frame_width * x, frame_height * y);
    cv::Size2i rect_size(diameter * size, diameter * size);
    if (top_left.x + rect_size.width > frame_width || top_left.y + rect_size.height > frame_height) {
        LOG(ERROR) << "Size(" << rect_size << ") to too large for given x(" << top_left.x << ") and y(" << top_left.y << ") coordinate.";
    }
    roi = new cv::Rect(top_left, rect_size);
    VLOG(1) << "RoI: \t" << *roi;

    frame_rate = capture.get(CV_CAP_PROP_FPS);
    if (isnan(frame_rate) || frame_rate <= 0) {
        LOG(WARNING) << "Failed to get frame rate, setting rate to 10fps.";
        frame_rate = 10;
    }
    VLOG(1) << "Frame Rate: \t" << frame_rate;
}
Ejemplo n.º 3
0
 VideoCaptureManager()
 : cap_(0)
 {
     if(!cap_.isOpened()){
         std::cerr << "failed to open" << std::endl;
     }
 }
Ejemplo n.º 4
0
///////////////////////////////////////////////////////////////////////////////
// initalize the video for processing
///////////////////////////////////////////////////////////////////////////////
void initVideo( string videoFile ){

	vidCap = cv::VideoCapture( videoFile );

	if( !vidCap.isOpened() ){
		cout << "Video did not open" << endl;
		shutItDown(-1);
	}
	width = vidCap.get(CV_CAP_PROP_FRAME_WIDTH);
	height = vidCap.get(CV_CAP_PROP_FRAME_HEIGHT);
	cout << "width: " << vidCap.get(CV_CAP_PROP_FRAME_WIDTH) << endl;
	cout << "height: " << vidCap.get(CV_CAP_PROP_FRAME_HEIGHT) << endl;

	currentImage = cv::Mat(640,480, CV_8UC3);
	frameCount = 0;
	vidCap.read(currentImage);
	cv::cvtColor(currentImage, currGray, CV_BGR2GRAY);
	swap(prevGray, currGray); swap(prevImage, currentImage);
	flow = cv::Mat(currentImage.size(), CV_32FC2);

	termcrit = cv::TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03);
	needToInit = true;

	rel_vec_x = 1.0f;
	rel_vec_y = 0.0f;
}
Ejemplo n.º 5
0
void initVideoStream(cv::VideoCapture &cap)
{
	if (cap.isOpened())
		cap.release();

	cap.open(0); // open the default camera
}
Ejemplo n.º 6
0
bool Director::initCamera()
{
#if SIMULATOR == 1
	if (!capture.isOpened()) {
		cerr << "Error: Can not open camera!" << endl;
		return false;
	}
#else
	fc2Error error = fc2CreateContext(&context);
	if (error != FC2_ERROR_OK) {
		cerr << "Error in fc2CreateContext:" << error << endl;
        return false;
    }        

	unsigned int numCameras = 0;
    error = fc2GetNumOfCameras(context, &numCameras);
    if (error != FC2_ERROR_OK) {
		cerr << "Error in fc2GetNumOfCameras: " << error << endl;
        return false;
    }
    if (numCameras == 0) {
        // No cameras detected
		cerr << "No cameras detected." << endl;
        return false;
    }

    // Get the 0th camera
	fc2PGRGuid guid;
    error = fc2GetCameraFromIndex(context, 0, &guid);
    if (error != FC2_ERROR_OK) {
		cerr << "Error in fc2GetCameraFromIndex: " << error << endl;
        return false;
    }    

    error = fc2Connect(context, &guid);
    if (error != FC2_ERROR_OK) {
		cerr << "Error in fc2Connect: " << error << endl;
        return false;
    }

    error = fc2StartCapture(context);
    if (error != FC2_ERROR_OK) {
		cerr << "Error in fc2StartCapture: " << error << endl;
        return false;
    }

	error = fc2CreateImage(&rawImage);
	if (error != FC2_ERROR_OK) {
		cerr << "Error in fc2CreateImage: " << error << endl;
		return false;
	}
	error = fc2CreateImage(&image);
	if (error != FC2_ERROR_OK) {
		cerr << "Error in fc2CreateImage: " << error << endl;
		return false;
	}
#endif

	return true;
}
Ejemplo n.º 7
0
void InitOpenCVModules() //OPENCV 데이터들의 초기화
{
	/*----------------------*/
	//
	//
	//
	//
	/*-----------------------*/
	

	if (Webcam_mode)
	{
		camera.open(Webcam_number);

		if (!camera.isOpened())  //소스 영상 에러체크
		{
			//error in opening the video input
			cerr << "Unable to open Camera Stream" << endl;
			exit(EXIT_FAILURE);
		}


		camera >> Current_Frame; //카메라 소스에서 Current Frame으로 데이터를 넣어준다.
	}

	else
	{
Ejemplo n.º 8
0
 bool isOpened(){
     if(capture_type==0){
         return videoCapture.isOpened();
     }else{
         return imageCapture.isOpened();
     }
 };
Ejemplo n.º 9
0
    static cv::Mat getImageFromCamera() {
        myLog << "get image from camera";

        if(cap.isOpened() == false) {
            myLog << "ERROR: can`t initialise the cam";
        }

        cv::Mat currentImage;
        for(int i = 0; i < 6; i++) {
            cap >> currentImage;
        }
        return currentImage;
    }
Ejemplo n.º 10
0
void* grabFunc(void* in_data)
{
    while(running)
    {
        pthread_mutex_lock(&mutexLock);
        if(camera.isOpened())
        {
            camera.grab();
        }
        pthread_mutex_unlock(&mutexLock);
    }

    std::cout << "Grab thread exit." << std::endl;
}
bool readParameters(int argc, char** argv)
{
    
    if (argc<4) {
        usage();
        return false;
    }
    
    TheInputVideo=argv[1];
    
    /*   // read input video
    if (TheInputVideo=="live") TheVideoCapturer.open(0);
    else TheVideoCapturer.open(argv[1]);
    if (!TheVideoCapturer.isOpened())
    {
        cerr<<"Could not open video"<<endl;
        return false;
    }*/
    
    //read from camera
    if (TheInputVideo=="live") TheVideoCapturer.open(0);
    else TheVideoCapturer.open(TheInputVideo);
    if (!TheVideoCapturer.isOpened())
    {
        cerr<<"Could not open video"<<endl;
        return -1;
    }
    
    // read intrinsic file
    try {
        CameraParams.readFromXMLFile(argv[2]);
        } catch (std::exception &ex) {
        cout<<ex.what()<<endl;
        return false;
    }
    
    // read board file
    try {
        TheBoardConfig.readFromFile(argv[3]);
        } catch (std::exception &ex) {
        cout<<ex.what()<<endl;
        return false;
    }
    
    if(argc>4) TheMarkerSize=atof(argv[4]);
    else TheMarkerSize=1.;
    return true;
}
Ejemplo n.º 12
0
void openCamera(cv::VideoCapture& cap, int argc, const char** argv) {
	cv::CommandLineParser parser(argc, argv, keys);
	int camNum = parser.get<int>("1");

	cap.open(camNum);

	if (!cap.isOpened()) {
		//help();
		std::cout << "***Could not initialize capturing...***\n";
		std::cout << "Current parameter's value: \n";
		exit (-1);
	}

	cap.set(CV_CAP_PROP_FRAME_WIDTH, 160);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, 120);
}
Ejemplo n.º 13
0
void Init(cv::VideoCapture& capture)
{
  if(!capture.isOpened()){
    std::cout << "error starting video capture" << std::endl;
    exit(0);
  }
  //propose a resolution
  capture.set(CV_CAP_PROP_FRAME_WIDTH, RESOLUTION_X);
  capture.set(CV_CAP_PROP_FRAME_HEIGHT, RESOLUTION_Y);
  //get the actual (supported) resolution
  ivWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH);
  ivHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT);
  std::cout << "camera/video resolution: " << ivWidth << "x" << ivHeight << std::endl;

  cv::namedWindow("MOCTLD", 0); //CV_WINDOW_AUTOSIZE );
  // cv::resizeWindow("MOCTLD", ivWidth, ivHeight);
  cv::setMouseCallback("MOCTLD", MouseHandler);
}
Ejemplo n.º 14
0
bool fInitCamera (HWND hMainWnd, int iDeviceIndex)
{
// Create a capture window to capture the output from the camera
// The capture window is not actually displayed (in this application)

/*hgCapWnd = capCreateCaptureWindow(NULL,
                                  WS_CHILD,         // window style
                                  0, 0, 0, 0,       // position and size
                                  hMainWnd, ID_CAP);*/

	if (!cam.isOpened()){
		Err("Can't connect to the camera.");
		return false;
	}

	/*if (!hgCapWnd)
    {
    Err("Can't open the camera display window");
    return 0;
    }
if (!capDriverConnect(hgCapWnd, iDeviceIndex))
    {
    Err("Can't connect to the camera");
    return false;
    }
if (!capDriverGetCaps(hgCapWnd, &gDriverCaps, sizeof(CAPDRIVERCAPS)))
    {
    Err("Can't get capabilities of the camera");
    return false;
    }
if (!capPreview(hgCapWnd, FALSE))   // turn off preview
    {
    Err("capPreview FALSE failed");
    return false;
    }
if (!capGetStatus(hgCapWnd, &gCapStatus, sizeof(CAPSTATUS)))
    {
    Err("Can't get status of the camera");
    return false;
    }
PrintCapStatus();*/
return true;    // success
}
Ejemplo n.º 15
0
int main() {
    if (!camera.isOpened()) return 1;
	int keyCheck = 0;
	resetTimer();
	MouseGlove.setCenterHSV(140,161,145);
	MouseGlove.setLeftHSV(96,68,118);
	MouseGlove.setRightHSV(38,205,246);
	MouseGlove.setScrollHSV(63,144,204);
	MouseGlove.setCenterColorThreshold(45);
	MouseGlove.setLeftColorThreshold(0);
	MouseGlove.setRightColorThreshold(25);
	MouseGlove.setScrollColorThreshold(0);
	MouseGlove.setScrollAnchorYCoordinate(240);
	MouseGlove.setMinArea(100);
	leftClickStatus = false;
	rightClickStatus = false;
    while(cv::waitKey(10) != 13) {
		if (!camera.read(image)) return 1;
		cv::flip(image,image,1);
		MouseGlove.processCenterMarker(image);
		MouseGlove.processLeftMarker(image);
		MouseGlove.processRightMarker(image);
		MouseGlove.processScrollMarker(image);
		if (getTime() > 0.3) {
			coordenadasMouse = MouseGlove.getCenterMarkerCoordinates();
			MouseGlove.calibrateCoordinates(coordenadasMouse);
		}
		if (MouseGlove.mouseDetected()) {
			MouseGlove.moveMouse(coordenadasMouse);
	//	}
		//if (MouseGlove.getLeftClickStatus() != leftClickStatus)
		//	toggleLeftClick();
		//if (MouseGlove.getRightClickStatus() != rightClickStatus)
		//	toggleRightClick();
	//	if (MouseGlove.scrollDetected()) {
		//	MouseGlove.scroll(coordenadasMouse,MouseGlove.getScrollSpeed());
	//	}
		MouseGlove.displayMouseStatus(image);
		cv::imshow("Mouse",image);
    }
}
Ejemplo n.º 16
0
int main() {
    if (!camera.isOpened()) return 1;
	int keyCheck = 0;
	resetTimer();
	MouseGlove.setCenterHSV(104,238,203);
	MouseGlove.setLeftHSV(150,150,232);
	MouseGlove.setRightHSV(15,205,246);
	MouseGlove.setScrollHSV(63,144,204);
	MouseGlove.setCenterColorThreshold(50);
	MouseGlove.setLeftColorThreshold(35);
	MouseGlove.setRightColorThreshold(25);
	MouseGlove.setScrollColorThreshold(30);
	MouseGlove.setScrollAnchorYCoordinate(240);
	MouseGlove.setMinArea(100);
	leftClickStatus = false;
	rightClickStatus = false;
    while(cv::waitKey(10) != 13) {
		if (!camera.read(image)) return 1;
		cv::flip(image,image,1);
		MouseGlove.processCenterMarker(image);
		MouseGlove.processLeftMarker(image);
		MouseGlove.processRightMarker(image);
		MouseGlove.processScrollMarker(image);
		if (getTime() > 0.3) {
			mouseCoordinates = MouseGlove.getCenterMarkerCoordinates();
			MouseGlove.calibrateCoordinates(mouseCoordinates);
		}
		if (MouseGlove.mouseDetected()) {
			MouseGlove.moveMouse(mouseCoordinates);
		}
		if (MouseGlove.getLeftClickStatus() != leftClickStatus)
			toggleLeftClick();
		if (MouseGlove.getRightClickStatus() != rightClickStatus)
			toggleRightClick();
		if (MouseGlove.scrollDetected()) {
			MouseGlove.scroll(mouseCoordinates,MouseGlove.getScrollSpeed());
		}
		MouseGlove.displayMouseStatus(image);
		cv::imshow("Mouse Glove",image);
    }
}
Ejemplo n.º 17
0
void initParams::readAndSet(cv::VideoCapture& cap, char* input){
	
	cv::FileStorage fs(input, cv::FileStorage::READ);
	if(fs.isOpened()){
		fs.open(input, cv::FileStorage::READ);	
		fs["frameWidth"] 	>> frameWidth;
		fs["frameHeight"] 	>> frameHeight;
		fs["fps"]		>> fps;
		fs["camId"]		>> camId;
		fs["generateData"]	>> genData;
		fs["visualizeData"]	>> visualize;
		fs["writeVideo"]	>> writeVideo;
		fs["generatePerfData"]	>> genPerfData;
		fs["videoFile"]		>> videoFile;
		fs.release();
		cap.open(videoFile);
		if(!cap.isOpened()){
			std::cerr << "Could not access video file: " << videoFile << std::endl;
			exit(EXIT_FAILURE);
		}
	}
int main() {
	cout << "Click the colored object you would like to track." << endl;
	if (!camera.isOpened()) return 1;
	ColorTracker.createColorSquare();
	TargetTracker.setThresh(20, 50, 70);
	TargetTracker.setMinContourArea(500);
	while (cv::waitKey(20) != 13) {
		if (!camera.read(image)) return 1;
		cv::flip(image, image, 1);
		processMouseActions();
		if (mouseButtonDown) {
			cv::rectangle(image, boundingRect, tealColor, 1);
			targetSelected = false;
			HSVDefined = false;
		}
		if (targetSelected && !HSVDefined) { 
			targetImage = image(calibratedRect(boundingRect));
			cv::imshow(targetName, targetImage);
			int hue = Hist.getMostAbundantHue(Hist.getHueHistogram(targetImage));
			int sat = Hist.getMostAbundantSat(Hist.getSatHistogram(targetImage));
			int val = Hist.getMostAbundantVal(Hist.getValHistogram(targetImage));
			ColorTracker.setColorSquareHSV(hue, sat, val);
			TargetTracker.setHSVToTrack(hue, sat, val);
			TargetTracker.findTargetAndUpdateRectangle(image);
			HSVDefined = true;
		}
		if (targetSelected && HSVDefined) {
			TargetTracker.findTargetAndUpdateRectangle(image);
			boundingRect = TargetTracker.getBoundingRect();
			targetImage = image(calibratedRect(boundingRect));
			cv::destroyWindow(targetName);
		}
		drawQuadrants(TargetTracker.getTargetCoordinates());
		displayTargetStatus();
		writeTargetStatusToFile();
		cv::imshow(imageName,image);
	}
}
Ejemplo n.º 19
0
void init()
{
    //摄像头
    camera.open(0);

    if(!camera.isOpened())
    {
        std::cout << "Can not find camera!" << std::endl;
        exit(-1);
    }

    camera.set(cv::CAP_PROP_FRAME_WIDTH,160);
    camera.set(cv::CAP_PROP_FRAME_HEIGHT,120);

/*    double width = camera.get(cv::CAP_PROP_FRAME_WIDTH);
    double height = camera.get(cv::CAP_PROP_FRAME_HEIGHT);

    std::cout << "width:" << width << "\t";
    std::cout << "height:" << height << "\t" << std::endl;*/

    face_cascade_name = "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";

    if(!face_cascade.load(face_cascade_name))
    {
        std::cout << "can not find face_cascade_file!" << std::endl;
        exit(-1);
    }

    running = true;
    pthread_mutex_init(&mutexLock, NULL);
    pthread_create(&grabThread, NULL, grabFunc, NULL);

    //Setup edi robot mraa control lib
    spider_init();

    signal(SIGINT, clear);
    signal(SIGTERM, clear);
}
Ejemplo n.º 20
0
void init()
{
    prog1 = makeProgramObjectFromFiles(vshadername0, fshadername0);
   
    if (!stream1.isOpened()) {
        cout << "Cannot open camera." << endl;
    }

//To load arrows as texture to put on object (keyboard arrows)
    
    //Image from: http://packdog.com/static/images/blog/keys-for-happiness/arrows.gif
    GLuint tex_2d = SOIL_load_OGL_texture
    (		"plainArrows.png",
     SOIL_LOAD_AUTO,
     SOIL_CREATE_NEW_ID,
     SOIL_FLAG_MIPMAPS | SOIL_FLAG_INVERT_Y | SOIL_FLAG_NTSC_SAFE_RGB | SOIL_FLAG_COMPRESS_TO_DXT
     );  //get image
    if(0==tex_2d)
    {
        cout << "soil error" << endl;
    }

}
Ejemplo n.º 21
0
void cam_Server::handle_connection(int client_handle)
{
	// No Camera connected
	if( state == 0 )
	{
		static int last_cam_opened = 0;

		webcam = cv::VideoCapture(  last_cam_opened );

		if( !webcam.isOpened() )
		{
		 	std::cout << "No camera detected under nmb: " << last_cam_opened << std::endl;

			last_cam_opened = ((last_cam_opened+1)%10);
			if( last_cam_opened == 0 ) // If we're trying the first video again, sleep a little bit
			{
				sleep(1);
			}

		}	
		else
		{
			state = 1;
		}

		return;
	}

	else if( state == 1) 
	{


		cv::Mat image;

		webcam >> image;

		if( image.data == NULL )
		{
			std::cout << "No image available" << std::endl;
			return;
		}

		cvtColor( image, image, CV_RGB2GRAY);

		// original Seitenverhältniss beibehalten
		cv::resize( image, image, cv::Size( 250,250) );
	
		int buffer[3];
		buffer[0] = image.type();
		buffer[1] = image.cols;
		buffer[2] = image.rows;
	
		int ret = write( client_handle, buffer, sizeof(int)*3 );
	
		if( ret <= 0 )
		{
			std::cout << "Konnte nicht schreiben" << std::endl;
			return;
		}

		int bytes_to_write = buffer[1]*buffer[2];
		int bytes_written = 0;

		char* image_buffer = new char[bytes_to_write]; 	
	
		for( int i = 0; i <bytes_to_write; i++ )
		{
			int x = i % buffer[1];
			int y = i / buffer[2];
	
			 image_buffer[ i ] = image.at<uchar>(x, y) ;
		}


		do{
			//std::cout 	<< "bytes to write: " << bytes_to_write
			//				<< "bytes written: " <<  bytes_written << std::endl;

				ret =  write( client_handle, 
						&image_buffer[bytes_written] , 
						bytes_to_write - bytes_written );

				if( ret < 0 )
				{
					bytes_written =  bytes_written;
				}
				else 
				{
					 bytes_written += ret;
				}

			} while( bytes_to_write != bytes_written );

		delete image_buffer;

		cv::waitKey( 100 );
	}
Ejemplo n.º 22
0
/**
 * @function main
 */
int main( int argc, char* argv[] ) {
  
  // Filter
  gFilterLimits.resize(6);
  //gFilterLimits << -0.35, 0.35, -0.70, 0.70, 1.5, 2.4; // Kinect
  gFilterLimits << -1.0, 1.0, -1.5, 1.5, 0.35, 2.0; // Asus on top of Crichton


  ObjectsDatabase mOd;
  mOd.init_classifier();
  mOd.load_dataset();

  gCapture.open( cv::CAP_OPENNI2 );
  
  if( !gCapture.isOpened() ) {
    printf("\t [ERROR] Could not open the capture object \n");
    return -1;
  }

  gCapture.set( cv::CAP_PROP_OPENNI2_MIRROR, 0.0 );
  gCapture.set( cv::CAP_PROP_OPENNI_REGISTRATION, -1.0 );
  gF = (float)gCapture.get( cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH );

  cv::namedWindow( gWindowName, cv::WINDOW_AUTOSIZE );
 
  ::google::InitGoogleLogging( argv[0] );
  
  for(;;) {

    if( !gCapture.grab() ) {
      printf("\t * ERROR Could not grab a frame \n");
      return -1;
    }

    gCapture.retrieve( gRgbImg, cv::CAP_OPENNI_BGR_IMAGE );
    if( gIsSegmentedFlag ) { drawSegmented(); }
    cv::imshow( gWindowName, gRgbImg );
    
    gCapture.retrieve( gPclMap, cv::CAP_OPENNI_POINT_CLOUD_MAP );


    cv::imshow( gWindowName, gRgbImg );

    char k = cv::waitKey(30);
    if( k == 'q' ) {
      printf("\t Finishing program \n");
      break;
    } 

    /** Recognize */
    else if( k == 'i' ) {

      // Process image
      process();
      gLabels.resize(gClusters.size() );
      gIndex.resize(gClusters.size() );      
      // Store images
      for( int i = 0; i < gClusters.size(); ++i ) {

	int xl = gBoundingBoxes[i](0);
	int yl = gBoundingBoxes[i](1);
	int xw = gBoundingBoxes[i](2)-gBoundingBoxes[i](0);
	int yw = gBoundingBoxes[i](3)-gBoundingBoxes[i](1);
	
	cv::Mat img( gRgbImg, cv::Rect( xl, yl,
					xw, yw ) );
	
	// Predict 
	mOd.classify( img, gIndex[i], gLabels[i] );
	
	cv::putText( gRgbImg,
		     gLabels[i], cv::Point(gBoundingBoxes[i](0), gBoundingBoxes[i](1) ),
		     cv::FONT_HERSHEY_SIMPLEX, 1, 
		     gColors[i],
		     2 );

	mOd.sayIt(gIndex[i]);
      }
      
      
      
    } // else
    
    
  } // for
  
} // main
int main(int argc, char** argv)
{
	//ROS
    ROS_INFO("sks_vision without ui");
	ros::init(argc, argv, "sks_vision_nui");
	ros::NodeHandle nh;
    ros::Rate loop_rate(1000);
//	image_transport::ImageTransport it(nh);

    ros::Publisher pub_m = nh.advertise<geometry_msgs::Vector3>("/sks_vision",1);
    ros::Subscriber position_sub = nh.subscribe("/slam_out_pose",10,current_pos);

    nh.getParam("/SKS/vision/auto_gray",para_auto_gray);
    nh.getParam("/SKS/vision/gray_sum",para_threshold_num);

    while(1){
        cap.open(200);
        if(cap.isOpened()) break;
        else std::cout<<"camera isnt opened"<<std::endl;
    }
    while (nh.ok()){
        cap >> cvframe;

        if(!cvframe.empty()) {
            frame = MatToQImage(cvframe);
        //    frame = func_Gray(frame);
            frame = func_DeepGray(frame);
            //==============================================================
                int blacknum=0;
            //    for(int h=0;h<frame.height();h+=2){
            //        for(int w=0;w<frame.width();w+=2){
            //            if(qRed(frame.pixel(w,h))<=64&&qRed(frame.pixel(w,h))>=27) blacknum++;//60,20
            //        }
            //    }
            //   std::cout<<blacknum<<std::endl;
            //===============================================================
            //Threshold_bar --> Threshold
            int gray;
            if(para_auto_gray){
                gray = para_threshold_num;
            }else{
//                if(blacknum>=BLACK_BLOCK){
//                    gray=70;
//                }else{
                gray = Average_Threshold(frame);
    //            gray = Qtsu(frame);
                if(cu_pos.pose.position.x<-1.07+0.2&&cu_pos.pose.position.x>-3.14-0.2&&
                           cu_pos.pose.position.y<3.87+0.2&&cu_pos.pose.position.y>1.13-0.2/*&& !goal_one_send*/){
                    gray=24;
                }else{
                    gray=58;
                }
//                    if(gray > THRESHOLD_MAX) gray = THRESHOLD_MAX;
//                    else if(gray < THRESHOLD_MIN) gray = THRESHOLD_MIN;

            }
            frame = func_Threshold(frame,gray);

            org_frame = frame;
            for(int dot=2;dot < frame.height()-2;dot+=(frame.height()/16)){
                for(int i=0;i<frame.width();i++){
                    frame.setPixel(i,dot,QColor(0,255,0).rgb());
                }
            }
            //reset
            memset(Position,0,sizeof(Position)/sizeof(Position[0][0]));
            //Segmentation
            for(int h=2;h < frame.height()-2;h+=(frame.height()/16)){
                bw_sw = compareBlackWhite(org_frame,h);
                if(bw_sw==0){
                    Segmentation1(org_frame,&frame,h,255);
                }else if(bw_sw==1){
                    Segmentation1(org_frame,&frame,h,0);
                }
            }
            if(push_p>2){
                for(int i=0;i<push_p;i++){
                    total_x += Position[0][i];
                    total_y += Position[1][i];
                }
                double vector_x=total_x/push_p;
                double vector_y=total_y/push_p;
                double vector_h=frame.height()-1-vector_y;
                double vector_w=(ROBOT_CENTER-1-vector_x)/vector_h;

                for(int i=0;i<vector_h;i++){
                    frame.setPixel(ROBOT_CENTER-1-vector_w*i,frame.height()-1-i,QColor(0,0,255).rgb());
                }
                final_w = vector_x - (ROBOT_CENTER);
                final_h = frame.height() - 1 - vector_y;


                int sensor_x,sensor_y=100;
                sensor_x = sensorPoint(org_frame,&frame,sensor_y);
                final_w += sensor_x;
                sensor_y=379;
                final_h += sensor_y;
//                if(sensor_x!=0) final_h += sensor_y;
                sensor_y=380;
                sensor_x = sensorPoint(org_frame,&frame,sensor_y);
                final_w += sensor_x;
                sensor_y=99;
                final_h += sensor_y;
//                if(sensor_x!=0) final_h += sensor_y;

                ang=atan(final_w/final_h);

                frame.setPixel(vector_x-1,vector_y-1,QColor(0,0,255).rgb());
                frame.setPixel(vector_x-1,vector_y,QColor(0,0,255).rgb());
                frame.setPixel(vector_x-1,vector_y+1,QColor(0,0,255).rgb());
                frame.setPixel(vector_x,vector_y-1,QColor(0,0,255).rgb());
                frame.setPixel(vector_x,vector_y,QColor(0,0,255).rgb());
                frame.setPixel(vector_x,vector_y+1,QColor(0,0,255).rgb());
                frame.setPixel(vector_x+1,vector_y-1,QColor(0,0,255).rgb());
                frame.setPixel(vector_x+1,vector_y,QColor(0,0,255).rgb());
                frame.setPixel(vector_x+1,vector_y+1,QColor(0,0,255).rgb());
                vec3.z = ang;
            }else{
                vec3.z = 999;
            }
            //reset
            total_x =0,total_y =0,push_p =0;//,h_max=0;
            final_w =0,final_h =0;
            total_black=0;total_white=0;

            cvGray = QImageToCvMat(frame);
//            cv::imshow(WINDOW,cvGray);
//            cv::waitKey(1);

            //ROS
            pub_m.publish(vec3);
        }
        loop_rate.sleep();
        ros::spinOnce();
    }
}
Ejemplo n.º 24
0
int main() {
	
	

	// 打开摄像头
	video_capture.open(0);
	if (!video_capture.isOpened()){
		std::cerr << "Could not open video" << endl;
		return -1;
	}

	// 获取第一张图像,用于这设置参数
	video_capture >> input_image;

	// 读取摄像机参数
	camera_params.readFromXMLFile("camera.yml");
	camera_params.resize(input_image.size());

	// 注册窗口
	cv::namedWindow("thres",1);
	cv::namedWindow("in",1);

	marker_dector.getThresholdParams(threshold_param1, threshold_param2);
	i_threshold_param1 = threshold_param1;
	i_threshold_param2 = threshold_param2;

	cv::createTrackbar("ThresParam1", "in",&i_threshold_param1, 13, cvTackBarEvents);
	cv::createTrackbar("ThresParam2", "in",&i_threshold_param2, 13, cvTackBarEvents);

	char key=0;
	int index=0;

	//capture until press ESC or until the end of the video
	while( key!=27 && video_capture.grab())
	{
		//copy image
		video_capture.retrieve(input_image);
		
		//number of images captured
		index++; 

		//for checking the speed
		double tick = (double)cv::getTickCount();
		//Detection of markers in the image passed
		marker_dector.detect(input_image, markers, camera_params, marker_size);
		//chekc the speed by calculating the mean speed of all iterations
		average_time.first += ((double)cv::getTickCount() - tick) / cv::getTickFrequency();
		average_time.second++;
		std::cout << "Time detection=" << 1000 * average_time.first / average_time.second << " milliseconds" << endl;

		//print marker info and draw the markers in image
		input_image.copyTo(input_image_copy);
		for(unsigned int i = 0; i < markers.size(); i++){
			std::cout << markers[i] << std::endl;
			markers[i].draw(input_image, Scalar(0, 0, 255), 2);
		}

		//draw a 3d cube in each marker if there is 3d info
		if ( camera_params.isValid())
			for(unsigned int i = 0; i < markers.size(); i++){
				aruco::CvDrawingUtils::draw3dCube(input_image, markers[i], camera_params);
				aruco::CvDrawingUtils::draw3dAxis(input_image, markers[i], camera_params);
			}

			//DONE! Easy, right?
			cout << endl << endl << endl;
			//show input with augmented information and  the thresholded image
			cv::imshow("in", input_image);
			cv::imshow("thres", marker_dector.getThresholdedImage());

			key=cv::waitKey(0);//wait for key to be pressed
	}
	return 0;
}
Ejemplo n.º 25
0
int main(int argc, char* argv[])
{
    signal(SIGINT, quitFunction);

    // Simple parsing of the parameters related to the image acquisition
    int xRes = 640;
    int yRes = 480;
    int cameraIndex = 0;
    if (argc > 2) {
        xRes = std::atoi(argv[1]);
        yRes = std::atoi(argv[2]);
    }
    if (argc > 3) {
        cameraIndex = std::atoi(argv[3]);
    }

    // The source of input images
    capture.open(cameraIndex);
    if (!capture.isOpened())
    {
        std::cerr << "Unable to initialise video capture." << std::endl;
        return 1;
    }
#ifdef OPENCV3
    capture.set(cv::CAP_PROP_FRAME_WIDTH, xRes);
    capture.set(cv::CAP_PROP_FRAME_HEIGHT, yRes);
#else
    capture.set(CV_CAP_PROP_FRAME_WIDTH, xRes);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, yRes);
#endif
    cv::Mat inputImage;

    // The tag detection happens in the Chilitags class.
    chilitags::Chilitags chilitags;

    // The detection is not perfect, so if a tag is not detected during one frame,
    // the tag will shortly disappears, which results in flickering.
    // To address this, Chilitags "cheats" by keeping tags for n frames
    // at the same position. When tags disappear for more than 5 frames,
    // Chilitags actually removes it.
    // Here, we cancel this to show the raw detection results.
    chilitags.setFilter(0, 0.0f);

    cv::namedWindow("DisplayChilitags");
    // Main loop, exiting when 'q is pressed'
    for (; 'q' != (char) cv::waitKey(1) && sRunning; ) {

        // Capture a new image.
        capture.read(inputImage);

        // Start measuring the time needed for the detection
        int64 startTime = cv::getTickCount();

        // Detect tags on the current image (and time the detection);
        // The resulting map associates tag ids (between 0 and 1023)
        // to four 2D points corresponding to the corners positions
        // in the picture.
        chilitags::TagCornerMap tags = chilitags.find(inputImage);

        // Measure the processing time needed for the detection
        int64 endTime = cv::getTickCount();
        float processingTime = 1000.0f*((float) endTime - startTime)/cv::getTickFrequency();


        // Now we start using the result of the detection.

        // First, we set up some constants related to the information overlaid
        // on the captured image
        const static cv::Scalar COLOR(255, 0, 255);
        // OpenCv can draw with sub-pixel precision with fixed point coordinates
        static const int SHIFT = 16;
        static const float PRECISION = 1<<SHIFT;

        // We dont want to draw directly on the input image, so we clone it
        cv::Mat outputImage = inputImage.clone();

        for (const std::pair<int, chilitags::Quad> & tag : tags) {

            int id = tag.first;
            // We wrap the corner matrix into a datastructure that allows an
            // easy access to the coordinates
            const cv::Mat_<cv::Point2f> corners(tag.second);

            // We start by drawing the borders of the tag
            for (size_t i = 0; i < 4; ++i) {
                cv::line(
                    outputImage,
                    PRECISION*corners(i),
                    PRECISION*corners((i+1)%4),
#ifdef OPENCV3
                    COLOR, 1, cv::LINE_AA, SHIFT);
#else
                    COLOR, 1, CV_AA, SHIFT);
#endif
            }

            // Other points can be computed from the four corners of the Quad.
            // Chilitags are oriented. It means that the points 0,1,2,3 of
            // the Quad coordinates are consistently the top-left, top-right,
            // bottom-right and bottom-left corners.
            // (i.e. clockwise, starting from top-left)
            // Using this, we can compute (an approximation of) the center of
            // tag.
            cv::Point2f center = 0.5f*(corners(0) + corners(2));
            cv::putText(outputImage, cv::format("%d", id), center,
                        cv::FONT_HERSHEY_SIMPLEX, 0.5f, COLOR);
        }

        // Some stats on the current frame (resolution and processing time)
        cv::putText(outputImage,
                    cv::format("%dx%d %4.0f ms (press q to quit)",
                               outputImage.cols, outputImage.rows,
                               processingTime),
                    cv::Point(32,32),
                    cv::FONT_HERSHEY_SIMPLEX, 0.5f, COLOR);

        // Finally...
        cv::imshow("DisplayChilitags", outputImage);
    }

    cv::destroyWindow("DisplayChilitags");
    capture.release();

    return 0;
}
Ejemplo n.º 26
0
int main(int argc, char**argv)
{
	capture.open(0);
	if (capture.isOpened() == false)
	{
		std::cerr << "no capture device found" << std::endl;
		return 1;
	}
	capture.set(CV_CAP_PROP_FRAME_WIDTH,  vgaSize.width);
	capture.set(CV_CAP_PROP_FRAME_HEIGHT, vgaSize.height);
	if (capture.get(cv::CAP_PROP_FRAME_WIDTH) != (double)vgaSize.width || capture.get(cv::CAP_PROP_FRAME_HEIGHT) != (double)vgaSize.height)
	{
		std::cerr << "current device doesn't support " << vgaSize.width << "x" << vgaSize.height << " size" << std::endl;
		return 2;
	}
	cv::Mat image;
	capture >> image;

	cv::namedWindow(windowName);
	cv::imshow(windowName, image);

	initCuda();
	initArray(image);

	char key = -1;
	enum device statusDevice = useCpuSimd;
	enum precision statusPrecision = precisionFloat;
	int index = 1;
	cv::Mat stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
	cv::Mat gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
	double elapsedTime;
	while (isFinish(key) == false)
	{
		capture >> image;

		switch (key)
		{
		case 'h':
		case 'H':
			// switch to half precision
			statusPrecision = precisionHalf;
			std::cout << std::endl << header << "half  " << std::endl;
			stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
			gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
			break;
		case 'f':
		case 'F':
			// switch to single precision
			statusPrecision = precisionFloat;
			std::cout << std::endl << header << "single" << std::endl;
			stub = cv::imread(imagePath[index][1], cv::IMREAD_UNCHANGED);
			gain = cv::Mat(stub.rows, stub.cols, CV_32FC1, stub.data);
			break;
		case 'b':
		case 'B':
			// switch to gray gain
			statusPrecision = precisionByte;
			std::cout << std::endl << header << "char" << std::endl;
			gain = cv::imread(imagePath[index][2], cv::IMREAD_GRAYSCALE);
			break;
		case '0':
		case '1':
			index = key - '0';
			switch (statusPrecision)
			{
			case precisionHalf:
				// precision half
				stub = cv::imread(imagePath[index][0], cv::IMREAD_UNCHANGED);
				gain = cv::Mat(stub.rows, stub.cols/2, CV_16SC1, stub.data);
				break;
			case precisionFloat:
				// precision single
				stub = cv::imread(imagePath[index][1], cv::IMREAD_UNCHANGED);
				gain = cv::Mat(stub.rows, stub.cols, CV_32FC1, stub.data);
				break;
			case precisionByte:
				// precision single
				gain = cv::imread(imagePath[index][2], cv::IMREAD_GRAYSCALE);
				break;
			default:
				break;
			}
			break;
		case 'c':
		case 'C':
			std::cout << std::endl << "Using CPU SIMD           " << std::endl;
			statusDevice = useCpuSimd;
			break;
		case 'g':
		case 'G':
			std::cout << std::endl << "Using GPU                " << std::endl;
			statusDevice = useGpu;
			break;
		default:
			break;
		}

		if (statusDevice == useCpuSimd)
		{
			elapsedTime = multiplyImage(image, gain);
		}
		else
		{
#ifdef HAVE_CUDA
			// CUDA
			elapsedTime = multiplyImageCuda(image, gain);
#endif // HAVE_CUDA
		}
		computeStatistics(elapsedTime, key);

		if (key == 's' || key == 'S')
		{
			cv::imwrite(dumpFilename, image);
		}

		cv::imshow(windowName, image);
		key = cv::waitKey(1);
	}
	std::cout << std::endl;
	cv::destroyAllWindows();
	releaseArray();

	return 0;
}
Ejemplo n.º 27
0
// --------------------------------------------------------------------------
// display(引数なし)
// 画面の更新時に呼ばれる関数です
// 戻り値: なし
// --------------------------------------------------------------------------
void display(void)
{
    // 描画用のバッファクリア
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    if (cap.isOpened()) {
        // カメラ画像取得
        cv::Mat image_raw;
        cap >> image_raw;

        // 歪み補正
        cv::Mat image;
        cv::remap(image_raw, image, mapx, mapy, cv::INTER_LINEAR);

        // カメラ画像(RGB)表示
        cv::Mat rgb;
        cv::cvtColor(image, rgb, cv::COLOR_BGR2RGB);
        cv::flip(rgb, rgb, 0);
        glDepthMask(GL_FALSE);
        glDrawPixels(rgb.cols, rgb.rows, GL_RGB, GL_UNSIGNED_BYTE, rgb.data);

        // BGRA画像
        cv::Mat bgra;
        cv::cvtColor(image, bgra, cv::COLOR_BGR2BGRA);

        // データを渡す
        BGRAVideoFrame frame;
        frame.width = bgra.cols;
        frame.height = bgra.rows;
        frame.data = bgra.data;
        frame.stride = bgra.step;

        // マーカ検出
        MarkerDetector detector(calibration);
        detector.processFrame(frame);
        std::vector<Transformation> transformations = detector.getTransformations();

        // 射影変換行列を計算
        Matrix44 projectionMatrix = buildProjectionMatrix(calibration.getIntrinsic(), frame.width, frame.height);

        // 射影変換行列を適用
        glMatrixMode(GL_PROJECTION);
        glLoadMatrixf(projectionMatrix.data);

        // ビュー行列の設定
        glMatrixMode(GL_MODELVIEW);
        glLoadIdentity();

        // デプス有効
        glDepthMask(GL_TRUE);

        // 頂点配列有効
        glEnableClientState(GL_VERTEX_ARRAY);
        glEnableClientState(GL_COLOR_ARRAY);

        // ビュー行列を退避
        glPushMatrix();

        // ラインの太さ設定
        glLineWidth(3.0f);

        // ライン頂点配列
        float lineX[] = { 0, 0, 0, 1, 0, 0 };
        float lineY[] = { 0, 0, 0, 0, 1, 0 };
        float lineZ[] = { 0, 0, 0, 0, 0, 1 };

        // 2D平面
        const GLfloat squareVertices[] = { -0.5f, -0.5f,
                                            0.5f, -0.5f,
                                           -0.5f,  0.5f,
                                            0.5f,  0.5f };

        // 2D平面カラー(RGBA)
        const GLubyte squareColors[] = { 255, 255,   0, 255,
                                           0, 255, 255, 255,
                                           0,   0,   0,   0,
                                         255,   0, 255, 255 };

        // AR描画
        for (size_t i = 0; i < transformations.size(); i++) {
            // 変換行列
            Matrix44 glMatrix = transformations[i].getMat44();

            // ビュー行列にロード
            glLoadMatrixf(reinterpret_cast<const GLfloat*>(&glMatrix.data[0]));

            // 2D平面の描画
            glEnableClientState(GL_COLOR_ARRAY);
            glVertexPointer(2, GL_FLOAT, 0, squareVertices);
            glColorPointer(4, GL_UNSIGNED_BYTE, 0, squareColors);
            glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
            glDisableClientState(GL_COLOR_ARRAY);

            // 座標軸のスケール
            float scale = 0.5;
            glScalef(scale, scale, scale);

            // カメラから見えるようにちょっと移動
            glTranslatef(0, 0, 0.1f);

            // X軸
            glColor4f(1.0f, 0.0f, 0.0f, 1.0f);
            glVertexPointer(3, GL_FLOAT, 0, lineX);
            glDrawArrays(GL_LINES, 0, 2);

            // Y軸
            glColor4f(0.0f, 1.0f, 0.0f, 1.0f);
            glVertexPointer(3, GL_FLOAT, 0, lineY);
            glDrawArrays(GL_LINES, 0, 2);

            // Z軸
            glColor4f(0.0f, 0.0f, 1.0f, 1.0f);
            glVertexPointer(3, GL_FLOAT, 0, lineZ);
            glDrawArrays(GL_LINES, 0, 2);
        }

        // 頂点配列無効
        glDisableClientState(GL_VERTEX_ARRAY);

        // ビュー行列を戻す
        glPopMatrix();
    }