void XtionDepthDriverImpl::setDepthCameraResolution(const DepthResolution resolution)
{
  _stream.stop();

  VideoMode mode = _stream.getVideoMode();

  if(resolution == DEPTH_RESOLUTION_320_240) {
    mode.setResolution(320, 240);
  } else if(resolution == DEPTH_RESOLUTION_640_480) {
    mode.setResolution(640, 480);
  } else {
    _stream.start();
    throw Exception("Invalid resolution");
  }

  Status rc = _stream.setVideoMode(mode);
  if (rc != STATUS_OK) {
    _stream.start();
    throw Exception(std::string("Set the resolution failed with\n")
      + OpenNI::getExtendedError());
  }
  
  rc = _stream.start();
  if(rc != STATUS_OK) {
    // how handle this?!
    close();
    
    throw Exception("Unable to start the depth stream");
  }
}
示例#2
0
void TouchTracking::initializeVideo()
{
    OpenNI::initialize();

    if (device.open(ANY_DEVICE) != STATUS_OK)
    {
        throw std::runtime_error("could not open any device!");
    }

    if (!device.hasSensor(SENSOR_DEPTH))
    {
        throw std::runtime_error("sensor cannot receive depth!");
    }

    auto info = device.getSensorInfo(SENSOR_DEPTH);
    auto& modes = info->getSupportedVideoModes();
    //std::cout << "depth sensor supported modes:\r\n";
    for (int i = 0; i < modes.getSize(); ++i)
    {
        m_videoModes.push_back(modes[i]);
        //std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
    }

    VideoMode mode;
    mode.setFps(60);
    mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
    mode.setResolution(320, 240);
    videoMode(mode);

    stream.setMirroringEnabled(false);
}
void DeviceController::printVideoMode(VideoMode mode)
{
	PixelFormat pixelFormat = mode.getPixelFormat();
	string pixelFormatName;
	switch (pixelFormat) 
	{
		case PIXEL_FORMAT_DEPTH_1_MM:
		{
			pixelFormatName = "PIXEL_FORMAT_DEPTH_1_MM";
			break;
		}
		case PIXEL_FORMAT_DEPTH_100_UM:
		{
			pixelFormatName = "PIXEL_FORMAT_DEPTH_100_UM";
			break;
		}
		case PIXEL_FORMAT_SHIFT_9_2:
		{
			pixelFormatName = "PIXEL_FORMAT_SHIFT_9_2";
			break;
		}
		case PIXEL_FORMAT_SHIFT_9_3:
		{
			pixelFormatName = "PIXEL_FORMAT_SHIFT_9_3";
			break;
		}
		case PIXEL_FORMAT_RGB888:
		{
			pixelFormatName = "PIXEL_FORMAT_RGB888";
			break;
		}
		case PIXEL_FORMAT_YUV422:
		{
			pixelFormatName = "PIXEL_FORMAT_YUV422";
			break;
		}
		case PIXEL_FORMAT_GRAY8:
		{
			pixelFormatName = "PIXEL_FORMAT_GRAY8";
			break;
		}
		case PIXEL_FORMAT_GRAY16:
		{
			pixelFormatName = "PIXEL_FORMAT_GRAY16";
			break;
		}
		case PIXEL_FORMAT_JPEG:
		{
			pixelFormatName = "PIXEL_FORMAT_JPEG";
			break;
		}
			
	}
	ofLogVerbose() << "PixelFormat: "	<< pixelFormatName;
	ofLogVerbose() << "ResolutionX: "	<< mode.getResolutionX();
	ofLogVerbose() << "ResolutionY: "	<< mode.getResolutionY();
	ofLogVerbose() << "FPS: "			<< mode.getFps();
}
void XtionDepthDriverImpl::open()
{
  if(_device.isValid()) return;
  
  Status rc = _device.open(ANY_DEVICE);
  if(rc != STATUS_OK) {
    throw Exception(std::string("Open the device failed with\n")
      + OpenNI::getExtendedError());
  }
  _device.open(ANY_DEVICE);

  if(!_device.getSensorInfo(SENSOR_DEPTH)) {
    _device.close();
    throw Exception("Device has no depth sensor!");
  }

  rc = _stream.create(_device, SENSOR_DEPTH);
  if(rc != STATUS_OK) {
    _device.close();
    throw Exception(std::string("Create the depth stream failed with\n")
      + OpenNI::getExtendedError());
  }

  VideoMode mode = _stream.getVideoMode();
  mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);

  rc = _stream.setVideoMode(mode);
  if(rc != STATUS_OK) {
      throw Exception(std::string("Set the pixel format to "
          "PIXEL_FORMAT_DEPTH_1_MM failed with\n")
          + OpenNI::getExtendedError());
  }

  rc = _stream.start();
  if(rc != STATUS_OK) {
    _stream.destroy();
    _device.close();

    throw Exception(std::string("Starting the depth stream failed with\n")
      + OpenNI::getExtendedError());
  }

  rc = _stream.addNewFrameListener(this);
  if(rc != STATUS_OK) {
    _stream.stop();
    _stream.destroy();
    _device.close();

    throw Exception(std::string("Adding the frame listener failed with\n")
      + OpenNI::getExtendedError());
  }
}
DepthResolution XtionDepthDriverImpl::depthCameraResolution() const
{
  const VideoMode mode = _stream.getVideoMode();

  int res_x = mode.getResolutionX();
  int res_y = mode.getResolutionY();

  if((res_x == 320) && (res_y = 240)) {
    return DEPTH_RESOLUTION_320_240;
  } else if((res_x == 640) && (res_y = 480)) {
    return DEPTH_RESOLUTION_640_480;
  }
  
  return DEPTH_INVALID_RESOLUTION;
}
示例#6
0
文件: avform.cpp 项目: Pik-9/qTox
void AVForm::open(const QString &devName, const VideoMode &mode)
{
    QRect rect = mode.toRect();
    Settings::getInstance().setCamVideoRes(rect);
    Settings::getInstance().setCamVideoFPS(static_cast<quint16>(mode.FPS));
    camera.open(devName, mode);
}
示例#7
0
void Window::create(VideoMode mode, const String &title, uint32 style, const ContextSettings &settings) {
    close();

    if (style & WindowStyle::Fullscreen) {
        if (fullscreenWindow) {
            std::cerr << "Creating two fullscreen windows is not allowed, switching to windowed mode\n";
            style &= ~WindowStyle::Fullscreen;
        } else {
            if (!mode.isValid()) {
                std::cerr << "The requested video mode is not available, switching to a valid mode.\n";
                mode = VideoMode::getFullScreenModes()[0];
            }

            fullscreenWindow = this;
        }
    }

#	if defined (Q_IOS) || defined(Q_ANDROID)
    if (style & WindowStyle::Fullscreen) {
        style &= ~WindowStyle::Fullscreen;
    } else {
        style |= WindowStyle::Titlebar;
    }
#	else
    if ((style & WindowStyle::Close) || (style & WindowStyle::Resize)) {
        style |= WindowStyle::Titlebar;
    }
#	endif

    mBase = WindowBase::createWindow(mode, title, style, settings);
    mContext = GLContext::create(settings, mBase, mode.bpp);

    init(mode.maximize);
}
示例#8
0
void Renderer::initialize(bool fs, int mode) {
  fullscreen = fs;
  if (!renderThreadId)
    renderThreadId = currentThreadId();
  else
    CHECK(currentThreadId() == *renderThreadId);
  CHECK(!getResolutions().empty()) << sf::VideoMode::getFullscreenModes().size() << " " << int(sf::VideoMode::getDesktopMode().bitsPerPixel);
  CHECK(mode >= 0 && mode < getResolutions().size()) << mode << " " << getResolutions().size();
  VideoMode vMode = getResolutions()[mode];
  CHECK(vMode.isValid()) << "Video mode invalid: " << int(vMode.width) << " " << int(vMode.height) << " " << int(vMode.bitsPerPixel) << " " << fs;
  if (fullscreen)
    display.create(vMode, "KeeperRL", sf::Style::Fullscreen);
  else
    display.create(sf::VideoMode::getDesktopMode(), "KeeperRL");
  sfView = new sf::View(display.getDefaultView());
  display.setVerticalSyncEnabled(true);
}
示例#9
0
	Game() {
		getLog().addDevice( new StreamLogDevice(std::cout) );

		win_.reset		( new Window( *this ) );
		inp_.reset		( new Input( *this ) );
		scene_.reset	( new ChcSceneManager(*this) );
		asmg_.reset		( new AssetManager( *this ) );

		asmg_->addContentFactory< ImageContent >( "img" );
		auto vms = VideoMode::getAvailable();
		
		std::sort( vms.begin(), vms.end(),
			[]( VideoMode &a, VideoMode &b ) {
				return a.getWidth() < b.getWidth();
			}
		);
		VideoMode vm = vms.back();
		vm.setFullscreen( true );
		//vm.setSize( 1920, 800 );
		vm.setVSync(true);
		vm.setDecorated( true );
		vm.setBpp( 32 );
		
		win_->setVideoMode( vm );
		
		addTask( *win_ );
		addTask( *inp_ );

		win_->addTask( *scene_ );
		
		inp_->addInputListener(*this);

		setupGL();
		loadTexture();
		createCamera();
		createLevel();

		ctrl_.reset( new CameraControl( *this ) );
		ctrl_->connect();
		

		BlockPlacer *pl = new BlockPlacer( *world_ );
		
		inp_->addInputListener( *pl );
		scene_->getRoot().createChild().attachEntity( *pl );		
	}
示例#10
0
文件: avform.cpp 项目: Pik-9/qTox
void AVForm::on_videoModescomboBox_currentIndexChanged(int index)
{
    assert(0 <= index && index < videoModes.size());
    int devIndex = videoDevCombobox->currentIndex();
    assert(0 <= devIndex && devIndex < videoDeviceList.size());

    QString devName = videoDeviceList[devIndex].first;
    VideoMode mode = videoModes[index];

    if (CameraDevice::isScreen(devName) && mode == VideoMode())
    {
        if (Settings::getInstance().getScreenGrabbed())
        {
            VideoMode mode(Settings::getInstance().getScreenRegion());
            open(devName, mode);
            return;
        }

        // note: grabber is self-managed and will destroy itself when done
        ScreenshotGrabber* screenshotGrabber = new ScreenshotGrabber;

        auto onGrabbed = [screenshotGrabber, devName, this] (QRect region)
        {
            VideoMode mode(region);
            mode.width = mode.width / 2 * 2;
            mode.height = mode.height / 2 * 2;

            // Needed, if the virtual screen origin is the top left corner of the primary screen
            QRect screen = QApplication::primaryScreen()->virtualGeometry();
            mode.x += screen.x();
            mode.y += screen.y();

            Settings::getInstance().setScreenRegion(mode.toRect());
            Settings::getInstance().setScreenGrabbed(true);

            open(devName, mode);
        };

        connect(screenshotGrabber, &ScreenshotGrabber::regionChosen, this, onGrabbed, Qt::QueuedConnection);
        screenshotGrabber->showGrabber();
        return;
    }

    Settings::getInstance().setScreenGrabbed(false);
    open(devName, mode);
}
示例#11
0
文件: Window.cpp 项目: 42bottles/SFML
void Window::create(VideoMode mode, const String& title, Uint32 style, const ContextSettings& settings)
{
    // Destroy the previous window implementation
    close();

    // Fullscreen style requires some tests
    if (style & Style::Fullscreen)
    {
        // Make sure there's not already a fullscreen window (only one is allowed)
        if (fullscreenWindow)
        {
            err() << "Creating two fullscreen windows is not allowed, switching to windowed mode" << std::endl;
            style &= ~Style::Fullscreen;
        }
        else
        {
            // Make sure that the chosen video mode is compatible
            if (!mode.isValid())
            {
                err() << "The requested video mode is not available, switching to a valid mode" << std::endl;
                mode = VideoMode::getFullscreenModes()[0];
            }

            // Update the fullscreen window
            fullscreenWindow = this;
        }
    }

    // Check validity of style according to the underlying platform
    #if defined(SFML_SYSTEM_IOS) || defined(SFML_SYSTEM_ANDROID)
        if (style & Style::Fullscreen)
            style &= ~Style::Titlebar;
        else
            style |= Style::Titlebar;
    #else
        if ((style & Style::Close) || (style & Style::Resize))
            style |= Style::Titlebar;
    #endif

    // Recreate the window implementation
    m_impl = priv::WindowImpl::create(mode, title, style, settings);

    // Recreate the context
    m_context = priv::GlContext::create(settings, m_impl, mode.bitsPerPixel);

    // Perform common initializations
    initialize();
}
void CVideoSourceKinect::initPlayer(std::string& strPathFileName_){
	init();
	_nMode = PLAYING_BACK;

	PRINTSTR("Initialize OpenNI Player...");
	//inizialization 
	Status nRetVal = openni::OpenNI::initialize();
	printf("After initialization:\n%s\n", openni::OpenNI::getExtendedError());
	nRetVal = _device.open(strPathFileName_.c_str());		CHECK_RC_(nRetVal, "Open oni file");
	nRetVal = _depth.create(_device, openni::SENSOR_DEPTH); CHECK_RC_(nRetVal, "Initialize _cContext"); 
	nRetVal = _color.create(_device, openni::SENSOR_COLOR); CHECK_RC_(nRetVal, "Initialize _cContext"); 

	nRetVal = _depth.start(); CHECK_RC_(nRetVal, "Create depth video stream fail");
	nRetVal = _color.start(); CHECK_RC_(nRetVal, "Create color video stream fail"); 

	if (_depth.isValid() && _color.isValid())
	{
		VideoMode depthVideoMode = _depth.getVideoMode();
		VideoMode colorVideoMode = _color.getVideoMode();

		int depthWidth = depthVideoMode.getResolutionX();
		int depthHeight = depthVideoMode.getResolutionY();
		int colorWidth = colorVideoMode.getResolutionX();
		int colorHeight = colorVideoMode.getResolutionY();

		if (depthWidth != colorWidth || depthHeight != colorHeight)
		{
			printf("Warning - expect color and depth to be in same resolution: D: %dx%d, C: %dx%d\n",
				depthWidth, depthHeight,
				colorWidth, colorHeight);
			//return ;
		}
	}

	_streams = new VideoStream*[2];
	_streams[0] = &_depth;
	_streams[1] = &_color;

	// set as the highest resolution 0 for 480x640 
	//register the depth generator with the image generator
	if ( _nRawDataProcessingMethod == BIFILTER_IN_ORIGINAL || _nRawDataProcessingMethod == BIFILTER_IN_DISPARITY ){
		nRetVal = _device.setImageRegistrationMode(IMAGE_REGISTRATION_DEPTH_TO_COLOR);

		//nRetVal = _depth.GetAlternativeViewPointCap().getGLModelViewMatrixPoint ( _color );	CHECK_RC_ ( nRetVal, "Getting and setting AlternativeViewPoint failed: " ); 
		// Set Hole Filter
		_device.setDepthColorSyncEnabled(TRUE);
	}//if (_bUseNIRegistration)
	PRINTSTR(" Done.");

	return;
}//initPlayer()
示例#13
0
void Window::create(VideoMode mode, const String& title, Uint32 style, const ContextSettings& settings)
{
    // Destroy the previous window implementation
    close();

    // Fullscreen style requires some tests
    if (style & Style::Fullscreen)
    {
        // Make sure there's not already a fullscreen window (only one is allowed)
        if (fullscreenWindow)
        {
            err() << "Creating two fullscreen windows is not allowed, switching to windowed mode" << std::endl;
            style &= ~Style::Fullscreen;
        }
        else
        {
            // Make sure that the chosen video mode is compatible
            if (!mode.isValid())
            {
                err() << "The requested video mode is not available, switching to a valid mode" << std::endl;
                mode = VideoMode::getFullscreenModes()[0];
            }

            // Update the fullscreen window
            fullscreenWindow = this;
        }
    }

    // Check validity of style
    if ((style & Style::Close) || (style & Style::Resize))
        style |= Style::Titlebar;

    // Recreate the window implementation
    m_impl = priv::WindowImpl::create(mode, title, style);
    m_impl->onDragDrop.connect(sigc::mem_fun(this, &Window::RedirectDragDrop));

    // Recreate the context
    m_context = priv::GlContext::create(settings, m_impl, mode.bitsPerPixel);

    // Perform common initializations
    initialize();
}
示例#14
0
void Window::Create(VideoMode mode, const std::string& title, unsigned long style, const ContextSettings& settings)
{
    // Destroy the previous window implementation
    Close();

    // Fullscreen style requires some tests
    if (style & Style::Fullscreen)
    {
        // Make sure there's not already a fullscreen window (only one is allowed)
        if (fullscreenWindow)
        {
            Err() << "Creating two fullscreen windows is not allowed, switching to windowed mode" << std::endl;
            style &= ~Style::Fullscreen;
        }
        else
        {
            // Make sure that the chosen video mode is compatible
            if (!mode.IsValid())
            {
                Err() << "The requested video mode is not available, switching to a valid mode" << std::endl;
                mode = VideoMode::GetFullscreenModes()[0];
            }

            // Update the fullscreen window
            fullscreenWindow = this;
        }
    }

    // Check validity of style
    if ((style & Style::Close) || (style & Style::Resize))
        style |= Style::Titlebar;

    // Recreate the window implementation
    myWindow = priv::WindowImpl::New(mode, title, style);

    // Recreate the context
    myContext = priv::GlContext::New(myWindow, mode.BitsPerPixel, settings);

    // Perform common initializations
    Initialize();
}
示例#15
0
void Window::create(VideoMode mode, const std::string& title, Uint32 style, const ContextSettings& settings)
{
    // Destroy the previous window implementation
    close();

    // Fullscreen style requires some tests
    if (style & Style::Fullscreen)
    {
        // Make sure there's not already a fullscreen window (only one is allowed)
        if (fullscreenWindow)
        {
            style &= ~Style::Fullscreen;
        }
        else
        {
            // Make sure that the chosen video mode is compatible
            if (!mode.isValid())
            {
                mode = VideoMode::getFullscreenModes()[0];
            }

            // Update the fullscreen window
            fullscreenWindow = this;
        }
    }

    // Check validity of style
    if ((style & Style::Close) || (style & Style::Resize))
        style |= Style::Titlebar;

    // Recreate the window implementation
	m_impl = priv::WindowImplWin32::create(mode, title, style, settings);

    // Recreate the context
    m_context = priv::GlContext::create(settings, m_impl, mode.bitsPerPixel);

    // Perform common initializations
    initialize();
}
示例#16
0
////////////////////////////////////////////////////////////
/// Create the window
////////////////////////////////////////////////////////////
void Window::Create(VideoMode Mode, const std::string& Title, unsigned long WindowStyle, const WindowSettings& Params)
{
    // Destroy the previous window implementation
    Close();

    // Fullscreen style requires some tests
    if (WindowStyle & Style::Fullscreen)
    {
        // Make sure there's not already a fullscreen window (only one is allowed)
        if (FullscreenWindow)
        {
            std::cerr << "Creating two fullscreen windows is not allowed, switching to windowed mode" << std::endl;
            WindowStyle &= ~Style::Fullscreen;
        }
        else
        {
            // Make sure the chosen video mode is compatible
            if (!Mode.IsValid())
            {
                std::cerr << "The requested video mode is not available, switching to a valid mode" << std::endl;
                Mode = VideoMode::GetMode(0);
            }

            // Update the fullscreen window
            FullscreenWindow = this;
        }
    }

    // Check validity of style
    if ((WindowStyle & Style::Close) || (WindowStyle & Style::Resize))
        WindowStyle |= Style::Titlebar;

    // Activate the global context
    Context::GetGlobal().SetActive(true);

    mySettings = Params;
    Initialize(priv::WindowImpl::New(Mode, Title, WindowStyle, mySettings));
}
示例#17
0
	void Window::setVideoMode( const VideoMode& mode ) {
		log << "Setting video mode " << mode << ".\n";

		if(!mode.isValid()) {
			log << "VideoModeNotSupportedException raised.\n";
			throw VideoModeNotSupportedException();
		}

		if( mode == mode_ ) {
			log << "No changes detected. Not doing anything.\n";
			return;
		}

		bool opOk;
		int w = mode.getWidth();
		int h = mode.getHeight();
		int bpp = mode.getBpp();
		int flags = SDL_OPENGL;

		if( mode.isFullscreen() )
			flags |= SDL_FULLSCREEN;

		if( !mode.isDecorated() )
			flags |= SDL_NOFRAME;

		_putenv(_strdup("SDL_VIDEO_CENTERED=1")); 
		SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER,1);
		SDL_GL_SetAttribute(SDL_GL_SWAP_CONTROL,mode.hasVSync()?1:0);
		
		opOk = SDL_SetVideoMode(w,h,bpp,flags) != 0;
		if( !opOk ) {
			log << "Warning: Mode reported it was valid, but SDL_SetVideoMode() failed.\n";
			throw VideoModeNotSupportedException();
		}

		mode_ = mode;
		log << "Video mode set.\n";
	}
示例#18
0
int main()
{
	FILE *fptrI = fopen("C:\\Users\\Alan\\Documents\\ShapeFeatures.csv","w");
	fprintf(fptrI, "Classtype, Area, Perimeter, Circularity, Extent\n");
	fclose(fptrI);

	Mat input = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\Fingers.bmp", 1);
	Mat input2 = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\NotFingers.bmp", 1);
	Mat inputF = imread("C:\\Users\\Alan\\Pictures\\Science Fair 2014\\SVM\\Shape Features\\ImageFeaturesBinaryF.bmp", 1);
	Mat gray(input.rows, input.cols, CV_8UC3);
	Mat gray2(input.rows, input.cols, CV_8UC3);
	Mat grayF(input.rows, input.cols, CV_8UC3);
	cvtColor(input, gray, CV_BGR2GRAY);
	cvtColor(input2, gray2, CV_BGR2GRAY);
	cvtColor(inputF, grayF, CV_BGR2GRAY);
	shapeFeatures(gray, input, 1);
	shapeFeatures(gray2, input2, 2);
	namedWindow("Image");
	imshow("Image", input);
	namedWindow("Image2");
	imshow("Image2", input2);

	//------------------------------------------------------
	//--------[SVM]--------
	// Read input data from file created above
	double parameters[5];
	vector<double> svmI, svmA, svmP, svmC, svmE;
	int size = 1;
	double index = 0; double area = 0; double perimeter = 0; double circularity = 0;
	char buffer[1024];
	char *record, *line;
	FILE* fptrR = fopen("C:\\Users\\Alan\\Documents\\ShapeFeatures.csv", "r");
	fscanf(fptrR, "%*[^\n]\n", NULL);

	svmI.resize(size); svmA.resize(size); svmP.resize(size); svmC.resize(size); 

	while((line=fgets(buffer, sizeof(buffer), fptrR))!=NULL)
	{
		size++;
		svmI.resize(size);
		svmA.resize(size);
		svmP.resize(size);
		svmC.resize(size);
		svmE.resize(size);

		record = strtok(line, ";");
		for(int i = 0; i < 5; i++);
		{
			double value = atoi(record);
			record = strtok(line,";");
		}
		char *lineCopy = record;
		char *pch;

		pch = strtok(lineCopy, ",");
		parameters[0] = atoi(pch);
		
		int j = 1;
		while( j < 5 )
		{
			pch = strtok (NULL, ",");
			parameters[j] = atof(pch);
			j++;
		}
		svmI[size-1] = parameters[0];
		svmA[size-1] = parameters[1];
		svmP[size-1] = parameters[2];
		svmC[size-1] = parameters[3];
		svmE[size-1] = parameters[4];
	}
	fclose(fptrR);
	//---------------------
	// Data for visual representation
    int width = 512, height = 512;
    Mat image = Mat::zeros(height, width, CV_8UC3);

    // Set up training data
    //float labels[8] = {1.0, -1.0, -1.0, -1.0};
	float labels[1000];
	for(int i = 0; i < svmI.size()-1; i++)
	{
		labels[i] = svmI[i+1];
	}
    Mat labelsMat(1000, 1, CV_32FC1, labels);

    float trainingData[1000][4];
	for(int i = 0; i < svmE.size()-1; i++)
	{
		trainingData[i][0] = svmE[i+1];
		trainingData[i][1] = svmC[i+1];
		trainingData[i][2] = svmA[i+1];
		trainingData[i][3] = svmP[i+1];
	}
    Mat trainingDataMat(1000, 4, CV_32FC1, trainingData);

    // Set up SVM's parameters
    CvSVMParams params;
	params = SVMFinger.get_params();
    //params.svm_type    = CvSVM::C_SVC;
    //params.kernel_type = CvSVM::LINEAR;
    //params.term_crit   = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);

    // Train the SVM
    SVMFinger.train_auto(trainingDataMat, labelsMat, Mat(), Mat(), params);

//	Mat sampleMat = (Mat_<float>(1,2) << 138.5, 57);
//	float response = SVMFinger.predict(sampleMat);

	waitKey();
	destroyWindow("Image");
	destroyWindow("Image2");

	//------------------------------------------
	OpenNI::initialize();

	Device devAnyDevice;
    devAnyDevice.open(ANY_DEVICE);

	//----------------[Define Video Settings]-------------------
	//Set Properties of Depth Stream
	VideoMode mModeDepth;
	mModeDepth.setResolution( 640, 480 );
	mModeDepth.setFps( 30 );
	mModeDepth.setPixelFormat( PIXEL_FORMAT_DEPTH_100_UM );

	//Set Properties of Color Stream
	VideoMode mModeColor;
    mModeColor.setResolution( 640, 480 );
    mModeColor.setFps( 30 );
    mModeColor.setPixelFormat( PIXEL_FORMAT_RGB888 );
	//----------------------------------------------------------
	//----------------------[Initial Streams]---------------------
	VideoStream streamInitDepth;
    streamInitDepth.create( devAnyDevice, SENSOR_DEPTH );

	VideoStream streamInitColor;
    streamInitColor.create( devAnyDevice, SENSOR_COLOR );

	streamInitDepth.setVideoMode( mModeDepth );
	streamInitColor.setVideoMode( mModeColor );

	namedWindow( "Depth Image (Init)",  CV_WINDOW_AUTOSIZE );
    namedWindow( "Color Image (Init)",  CV_WINDOW_AUTOSIZE );
	//namedWindow( "Thresholded Image (Init)", CV_WINDOW_AUTOSIZE );

	VideoFrameRef  frameDepthInit;
    VideoFrameRef  frameColorInit;

	streamInitDepth.start();
	streamInitColor.start();
	cv::Mat BackgroundFrame;

	int avgDist = 0;
	int iMaxDepthInit = streamInitDepth.getMaxPixelValue();
	
	OutX.clear();
	OutY.clear();

	vector<int> OldOutX, OldOutY;
	OldOutX.clear();
	OldOutY.clear();
	//------------------------------------------------------------
	//--------------------[Initiation Process]--------------------
	while( true )
	{
		streamInitDepth.readFrame( &frameDepthInit );
		streamInitColor.readFrame( &frameColorInit );

		const cv::Mat mImageDepth( frameDepthInit.getHeight(), frameDepthInit.getWidth(), CV_16UC1, (void*)frameDepthInit.getData());

        cv::Mat mScaledDepth;
        mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepthInit );

        cv::imshow( "Depth Image (Init)", mScaledDepth );

        const cv::Mat mImageRGB(frameColorInit.getHeight(), frameColorInit.getWidth(), CV_8UC3, (void*)frameColorInit.getData());

        cv::Mat cImageBGR;
        cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );

		//--------------------[Get Average Distance]---------------------
		int depthVal = 0;
		int frameHeight = frameDepthInit.getHeight();
		int frameWidth = frameDepthInit.getWidth();
		//------------
		//backgroundDepth.resize(frameHeight * frameWidth);
		//---------------------------------------------------------------
		
		int initCount = 0;
		for(int i = 0; i < frameHeight; i++)
		{
			for(int j = 0; j < frameWidth; j++)
			{
				depthVal = mImageDepth.at<unsigned short>(i, j) + depthVal;
				initCount++;
			}
		}
		avgDist = depthVal / ((frameHeight) * (frameWidth));

		cout << "Average Distance: " << avgDist << endl;
		cv::imshow( "Color Image (Init)", cImageBGR );

		if( cv::waitKey(1) == 'q')
		{
			mImageDepth.copyTo(BackgroundFrame);
            break;
		}
	}

	streamInitDepth.destroy();
	streamInitColor.destroy();

	destroyWindow( "Depth Image (Init)" );
	destroyWindow( "Color Image (Init)" );

	VideoStream streamDepth;
    streamDepth.create( devAnyDevice, SENSOR_DEPTH );

	VideoStream streamColor;
    streamColor.create( devAnyDevice, SENSOR_COLOR );

	streamDepth.setVideoMode( mModeDepth );
	streamColor.setVideoMode( mModeColor );

	streamDepth.start();
    streamColor.start();

	namedWindow( "Depth Image",  CV_WINDOW_AUTOSIZE );
    namedWindow( "Color Image",  CV_WINDOW_AUTOSIZE );
	namedWindow( "Thresholded Image", CV_WINDOW_AUTOSIZE );

	int iMaxDepth = streamDepth.getMaxPixelValue();

    VideoFrameRef  frameColor;
	VideoFrameRef  frameDepth;

	OutX.clear();
	OutY.clear();
	//------------------------------------------------------------

	//------------------------------------------------------------
	//-----------------------[Main Process]-----------------------
	while( true ) 
    {
        streamDepth.readFrame( &frameDepth );
        streamColor.readFrame( &frameColor );

        const cv::Mat mImageDepth( frameDepth.getHeight(), frameDepth.getWidth(), CV_16UC1, (void*)frameDepth.getData());

        cv::Mat mScaledDepth;
        mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );

		////////////////////////////////////////////////////////////////////////////////////////////
		//---------------------[Downsampling]-------------------------------------------------------
		double min;
		double max;
		cv::minMaxIdx(mImageDepth, &min, &max);
		cv::Mat adjMap;
		// expand your range to 0..255. Similar to histEq();
		float scale = 255 / (max-min);
		mImageDepth.convertTo(adjMap,CV_8UC1, scale, -min*scale); 

		// this is great. It converts your grayscale image into a tone-mapped one, 
		// much more pleasing for the eye
		// function is found in contrib module, so include contrib.hpp 
		// and link accordingly
		cv::Mat falseColorsMap;
		applyColorMap(adjMap, falseColorsMap, cv::COLORMAP_AUTUMN);

		cv::imshow("Out", falseColorsMap);
		//------------------------------------------------------------------------------------------
		////////////////////////////////////////////////////////////////////////////////////////////

        cv::imshow( "Depth Image", mScaledDepth );
		cv::imshow( "Depth Image2", adjMap );

        const cv::Mat mImageRGB(frameColor.getHeight(), frameColor.getWidth(), CV_8UC3, (void*)frameColor.getData());

        cv::Mat cImageBGR;
        cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );
		
		//-------------[Threshold]-----------------
		cv::Mat mImageThres( frameDepth.getHeight(), frameDepth.getWidth(), CV_8UC1 );

		int backgroundPxlCount = 0;
		for(int i = 0; i < 480; i++)
		{
			for(int j = 0; j < 640; j++)
			{
				int depthVal = mImageDepth.at<unsigned short>(i, j);

				avgDist = BackgroundFrame.at<unsigned short>(i, j)-2;

				if((depthVal > (avgDist-14)) && (depthVal <= (avgDist-7)))
				{
					//mImageThres.data[mImageThres.step[0]*i + mImageThres.step[1]*j] = 255;
					mImageThres.at<uchar>(i, j) = 255;
				}
				else
				{
					//mImageThres.data[mImageThres.step[0]*i + mImageThres.step[1]*j] = 0;
					mImageThres.at<uchar>(i, j) = 0;
				}

				backgroundPxlCount++;
			}
		}
		GaussianBlur( mImageThres, mImageThres, Size(3,3), 0, 0 );
		
		fingerDetection( mImageThres, cImageBGR, OldOutX, OldOutY);

		cv::imshow("Thresholded Image", mImageThres);
		//----------------------------------------
        if( cv::waitKey(1) == 'q')
		{
            break;
		}
		//------------------------------------------------
		cv::imshow( "Color Image", cImageBGR );
		//----------------------------------
		OldOutX.clear();
		OldOutY.clear();
		OldOutX = OutX;
		OldOutY = OutY;
		OutX.clear();
		OutY.clear();
    }

	return 0;
}
示例#19
0
void AVForm::updateVideoModes(int curIndex)
{
    if (curIndex<0 || curIndex>=videoDeviceList.size())
    {
        qWarning() << "Invalid index";
        return;
    }
    QString devName = videoDeviceList[curIndex].first;
    QVector<VideoMode> allVideoModes = CameraDevice::getVideoModes(devName);
    std::sort(allVideoModes.begin(), allVideoModes.end(),
        [](const VideoMode& a, const VideoMode& b)
            {return a.width!=b.width ? a.width>b.width :
                    a.height!=b.height ? a.height>b.height :
                    a.FPS>b.FPS;});
    bool previouslyBlocked = bodyUI->videoModescomboBox->blockSignals(true);
    bodyUI->videoModescomboBox->clear();

    // Identify the best resolutions available for the supposed XXXXp resolutions.
    std::map<int, VideoMode> idealModes;
    idealModes[120] = {160,120,0,0};
    idealModes[240] = {460,240,0,0};
    idealModes[360] = {640,360,0,0};
    idealModes[480] = {854,480,0,0};
    idealModes[720] = {1280,720,0,0};
    idealModes[1080] = {1920,1080,0,0};
    std::map<int, int> bestModeInds;

    qDebug("available Modes:");
    for (int i=0; i<allVideoModes.size(); ++i)
    {
        VideoMode mode = allVideoModes[i];
        qDebug("width: %d, height: %d, FPS: %f, pixel format: %s", mode.width, mode.height, mode.FPS, CameraDevice::getPixelFormatString(mode.pixel_format).toStdString().c_str());

        // PS3-Cam protection, everything above 60fps makes no sense
        if(mode.FPS > 60)
            continue;

        for(auto iter = idealModes.begin(); iter != idealModes.end(); ++iter)
        {
            int res = iter->first;
            VideoMode idealMode = iter->second;
            // don't take approximately correct resolutions unless they really
            // are close
            if (mode.norm(idealMode) > 300)
                continue;

            if (bestModeInds.find(res) == bestModeInds.end())
            {
                bestModeInds[res] = i;
                continue;
            }
            int ind = bestModeInds[res];
            if (mode.norm(idealMode) < allVideoModes[ind].norm(idealMode))
            {
                bestModeInds[res] = i;
            }
            else if (mode.norm(idealMode) == allVideoModes[ind].norm(idealMode))
            {
                // prefer higher FPS and "better" pixel formats
                if (mode.FPS > allVideoModes[ind].FPS)
                {
                    bestModeInds[res] = i;
                }
                else if (mode.FPS == allVideoModes[ind].FPS &&
                        CameraDevice::betterPixelFormat(mode.pixel_format, allVideoModes[ind].pixel_format))
                {
                    bestModeInds[res] = i;
                }
            }
        }
    }
    qDebug("selected Modes:");
    int prefResIndex = -1;
    QSize prefRes = Settings::getInstance().getCamVideoRes();
    unsigned short prefFPS = Settings::getInstance().getCamVideoFPS();
    // Iterate backwards to show higest resolution first.
    videoModes.clear();
    for(auto iter = bestModeInds.rbegin(); iter != bestModeInds.rend(); ++iter)
    {
        int i = iter->second;
        VideoMode mode = allVideoModes[i];

        if (videoModes.contains(mode))
            continue;

        videoModes.append(mode);
        if (mode.width==prefRes.width() && mode.height==prefRes.height() && mode.FPS == prefFPS && prefResIndex==-1)
            prefResIndex = videoModes.size() - 1;
        QString str;
        qDebug("width: %d, height: %d, FPS: %f, pixel format: %s\n", mode.width, mode.height, mode.FPS, CameraDevice::getPixelFormatString(mode.pixel_format).toStdString().c_str());
        if (mode.height && mode.width)
            str += tr("%1p").arg(iter->first);
        else
            str += tr("Default resolution");
        bodyUI->videoModescomboBox->addItem(str);
    }
    if (videoModes.isEmpty())
        bodyUI->videoModescomboBox->addItem(tr("Default resolution"));
    bodyUI->videoModescomboBox->blockSignals(previouslyBlocked);
    if (prefResIndex != -1)
    {
        bodyUI->videoModescomboBox->setCurrentIndex(prefResIndex);
    }
    else
    {
        // If the user hasn't set a preffered resolution yet,
        // we'll pick the resolution in the middle of the list,
        // and the best FPS for that resolution.
        // If we picked the lowest resolution, the quality would be awful
        // but if we picked the largest, FPS would be bad and thus quality bad too.
        int numRes=0;
        QSize lastSize;
        for (int i=0; i<videoModes.size(); i++)
        {
            if (lastSize != QSize{videoModes[i].width, videoModes[i].height})
            {
                numRes++;
                lastSize = {videoModes[i].width, videoModes[i].height};
            }
        }
        int target = numRes/2;
        numRes=0;
        for (int i=0; i<videoModes.size(); i++)
        {
            if (lastSize != QSize{videoModes[i].width, videoModes[i].height})
            {
                numRes++;
                lastSize = {videoModes[i].width, videoModes[i].height};
            }
            if (numRes==target)
            {
                bodyUI->videoModescomboBox->setCurrentIndex(i);
                break;
            }
        }

        if (videoModes.size())
        {
            bodyUI->videoModescomboBox->setUpdatesEnabled(false);
            bodyUI->videoModescomboBox->setCurrentIndex(-1);
            bodyUI->videoModescomboBox->setUpdatesEnabled(true);
            bodyUI->videoModescomboBox->setCurrentIndex(0);
        }
        else
        {
            // We don't have any video modes, open it with the default mode
            camera.open(devName);
        }
    }
}
示例#20
0
文件: avform.cpp 项目: Pik-9/qTox
void AVForm::selectBestModes(QVector<VideoMode> &allVideoModes)
{
    // Identify the best resolutions available for the supposed XXXXp resolutions.
    std::map<int, VideoMode> idealModes;
    idealModes[120] = VideoMode(160, 120);
    idealModes[240] = VideoMode(430, 240);
    idealModes[360] = VideoMode(640, 360);
    idealModes[480] = VideoMode(854, 480);
    idealModes[720] = VideoMode(1280, 720);
    idealModes[1080] = VideoMode(1920, 1080);

    std::map<int, int> bestModeInds;
    for (int i = 0; i < allVideoModes.size(); ++i)
    {
        VideoMode mode = allVideoModes[i];

        // PS3-Cam protection, everything above 60fps makes no sense
        if (mode.FPS > 60)
            continue;

        for (auto iter = idealModes.begin(); iter != idealModes.end(); ++iter)
        {
            int res = iter->first;
            VideoMode idealMode = iter->second;
            // don't take approximately correct resolutions unless they really
            // are close
            if (mode.norm(idealMode) > 300)
                continue;

            if (bestModeInds.find(res) == bestModeInds.end())
            {
                bestModeInds[res] = i;
                continue;
            }

            int index = bestModeInds[res];
            VideoMode best = allVideoModes[index];
            if (mode.norm(idealMode) < best.norm(idealMode))
            {
                bestModeInds[res] = i;
                continue;
            }

            if (mode.norm(idealMode) == best.norm(idealMode))
            {
                // prefer higher FPS and "better" pixel formats
                if (mode.FPS > best.FPS)
                {
                    bestModeInds[res] = i;
                    continue;
                }

                bool better = CameraDevice::betterPixelFormat(mode.pixel_format, best.pixel_format);
                if (mode.FPS >= best.FPS && better)
                    bestModeInds[res] = i;
            }
        }
    }

    QVector<VideoMode> newVideoModes;
    for (auto it = bestModeInds.rbegin(); it != bestModeInds.rend(); ++it)
    {
        VideoMode mode = allVideoModes[it->second];

        if (newVideoModes.empty())
        {
            newVideoModes.push_back(mode);
        }
        else
        {
            int size = getModeSize(mode);
            auto result = std::find_if(newVideoModes.cbegin(), newVideoModes.cend(),
                                       [size](VideoMode mode) { return getModeSize(mode) == size; });

            if (result == newVideoModes.end())
                newVideoModes.push_back(mode);
        }
    }
    allVideoModes = newVideoModes;
}
示例#21
0
	void Window::setDecorated( bool decorated ) {
		log << "Decoration change to " << decorated << ".\n";
		VideoMode newMode = mode_;
		newMode.setDecorated(decorated);
		setVideoMode(newMode);
	}
示例#22
0
	void Window::setFullscreen( bool fullscreen ) {
		log << "Fullscreen change to " << fullscreen << ".\n";
		VideoMode newMode = mode_;
		newMode.setFullscreen(fullscreen);
		setVideoMode(newMode);
	}
    Bool RenderWindowLinux::Open( const VideoMode & p_VideoMode, const std::string & p_Title, const Uint32 p_Style )
	{
	    // open a connection with X server
	    if( ( m_pDisplay = XOpenDisplay( NULL ) ) == NULL )
        {
            std::cout << "[RenderWindowLinux::Create] Can not connect to X server." << std::endl;
            return false;
        }

        // Initialize the X thread
        // Should we?!?!
        XInitThreads( );

	    // Get the screen
	    m_Screen = DefaultScreen( m_pDisplay );

        // Creat the window attributes
        XSetWindowAttributes WindowAttributes;
        WindowAttributes.colormap = DefaultColormap( m_pDisplay, m_Screen );
        WindowAttributes.event_mask =   KeyPressMask | KeyReleaseMask |
                                        ButtonPressMask | ButtonReleaseMask | ButtonMotionMask | PointerMotionMask |
                                        EnterWindowMask | LeaveWindowMask | VisibilityChangeMask |
                                        FocusChangeMask | ExposureMask | StructureNotifyMask;

        // Create the window
        m_Window = XCreateWindow( m_pDisplay,
                                 DefaultRootWindow( m_pDisplay ),
                                 0, 0, p_VideoMode.GetSize( ).x, p_VideoMode.GetSize( ).y,
                                 0,
                                 DefaultDepth( m_pDisplay, m_Screen ),
                                 InputOutput,
                                 DefaultVisual( m_pDisplay, m_Screen ),
                                 CWBorderPixel | CWEventMask | CWColormap,
                                 &WindowAttributes );



        // It's very important to set the delete message. Else we wont be able to close the window.
        ::Atom wmDeleteMessage = XInternAtom( m_pDisplay, "WM_DELETE_WINDOW", false );
        XSetWMProtocols( m_pDisplay, m_Window, &wmDeleteMessage, 1 );

        // Set the window title
        SetTitle( p_Title.c_str( ) );


        // Let's set up the window decoration and the functionality
        ::Atom PropertyAtom = XInternAtom( m_pDisplay, "_MOTIF_WM_HINTS", false );
        if( PropertyAtom )
        {
            struct HintsStruct
            {
                Uint32 Flags;
                Uint32 Functions;
                Uint32 Decorations;
                Int32 InputMode;
                Uint32 State;
            };

            HintsStruct Hints;
            Hints.Flags = MWM_HINTS_FUNCTIONS | MWM_HINTS_FUNCTIONS;
            Hints.Functions = 0;
            Hints.Decorations = 0;


            // Go through all the styles we want to apply to the window
            if( p_Style == Bit::Style::Default )
            {
                Hints.Functions |= MWM_FUNC_ALL;
                Hints.Decorations |= MWM_DECOR_ALL;
            }
            else
            {
                // Always set the resize and maximize functions and decorations.
                // Some window managers seems to require this.
                // Resizing can be disabled.
                Hints.Functions |= MWM_FUNC_RESIZE | MWM_FUNC_MAXIMIZE;
                Hints.Decorations |= MWM_DECOR_RESIZEH | MWM_DECOR_MAXIMIZE;


                if( p_Style & Bit::Style::Close )
                {
                     Hints.Functions |= MWM_FUNC_CLOSE;
                }

                if( p_Style & Bit::Style::Minimize )
                {
                     Hints.Functions |= MWM_FUNC_MINIMIZE;
                     Hints.Decorations |= MWM_DECOR_MINIMIZE;
                }

                if( p_Style & Bit::Style::TitleBar )
                {
                     Hints.Functions |= MWM_FUNC_MOVE | MWM_FUNC_MINIMIZE;
                     Hints.Decorations |= MWM_DECOR_BORDER | MWM_DECOR_TITLE | MWM_DECOR_MENU | MWM_DECOR_MINIMIZE;
                }
            }

            // Apply the changes
            XChangeProperty( m_pDisplay, m_Window, PropertyAtom, PropertyAtom, 32, PropModeReplace, (unsigned char *) &Hints, 5 );

            // Force x server to disable window resizing
            if (!( p_Style & Bit::Style::Resize ) )
            {
                XSizeHints * SizeHints = XAllocSizeHints( );
                SizeHints->flags = PMinSize | PMaxSize;
                SizeHints->min_width = p_VideoMode.GetSize( ).x;
                SizeHints->min_height = p_VideoMode.GetSize( ).y;
                SizeHints->max_width = p_VideoMode.GetSize( ).x;
                SizeHints->max_height = p_VideoMode.GetSize( ).y;

                // Set the hints
                XSetWMNormalHints( m_pDisplay, m_Window, SizeHints);

                // Free the size hints
                XFree(SizeHints);
            }
        }
        else
        {
            std::cout << "[RenderWindowLinux::Open] Can not get the property atom \"_MOTIF_WM_HINTS\"." << std::endl;
        }

        // Display the window.
        XMapWindow( m_pDisplay, m_Window );
        XFlush( m_pDisplay );


		// Set the rest of the member variables
		m_VideoMode = p_VideoMode;
		m_Title = p_Title;
		m_Style = p_Style;
		m_Size = p_VideoMode.GetSize( );
		m_Open = true;
		m_Focused = true;
		return true;
	}
示例#24
0
void VideoSourceKinect::initKinect()
{
	init();
	cout<<("Initialize RGBD camera...");
	//inizialization 
	Status nRetVal = openni::OpenNI::initialize();
	printf("After initialization:\n%s\n", openni::OpenNI::getExtendedError());
	nRetVal = _device.open(openni::ANY_DEVICE);		

	nRetVal = _depth.create(_device, openni::SENSOR_DEPTH);
	_depth.setMirroringEnabled(false);
	nRetVal = _color.create(_device, openni::SENSOR_COLOR); 
	_color.setMirroringEnabled(false);
	_colorSensorInfo = _device.getSensorInfo(openni::SENSOR_COLOR);

	if( setVideoMode(_uResolution) == STATUS_OK )
	{
		nRetVal = _depth.start(); 
		nRetVal = _color.start(); 
	}

	if (_depth.isValid() && _color.isValid())
	{
		VideoMode depthVideoMode = _depth.getVideoMode();
		VideoMode colorVideoMode = _color.getVideoMode();

		int depthWidth = depthVideoMode.getResolutionX();
		int depthHeight = depthVideoMode.getResolutionY();
		int colorWidth = colorVideoMode.getResolutionX();
		int colorHeight = colorVideoMode.getResolutionY();

		if (depthWidth != colorWidth || depthHeight != colorHeight)
		{
			printf("Warning - expect color and depth to be in same resolution: D: %dx%d, C: %dx%d\n",
				depthWidth, depthHeight,
				colorWidth, colorHeight);
			//return ;
		}
	}

	_streams = new VideoStream*[2];
	_streams[0] = &_depth;
	_streams[1] = &_color;

	// set as the highest resolution 0 for 480x640 

	char _serial[100];
	int size = sizeof(_serial);
	_device.getProperty(openni::DEVICE_PROPERTY_SERIAL_NUMBER, &_serial, &size);
	_serial_number = string(_serial);
	cout << _serial_number << endl;

	boost::filesystem::path dir("..//" + _serial_number);
	if (boost::filesystem::create_directory(dir))
		std::cout << "Success" << "\n";
	else
		std::cout << "Fail" << "\n";

	boost::filesystem::path dir_dep("..//" + _serial_number +"//depth//");
	if (boost::filesystem::create_directory(dir_dep))
		std::cout << "Success" << "\n";
	else
		std::cout << "Fail" << "\n";

	Mat cpuClibXYxZ0, cpuMask0;

	if (true){
		_color.getCameraSettings()->setAutoExposureEnabled(false);
		_color.getCameraSettings()->setAutoWhiteBalanceEnabled(false);
		_color.getCameraSettings()->setExposure(_exposure);
		_color.getCameraSettings()->setGain(_gain);
	}
	cout<<(" Done.");

	return;
}
示例#25
0
	void Window::setVSync( bool vsync ) {
		log << "VSync change to " << vsync << ".\n";
		VideoMode newMode = mode_;
		newMode.setVSync(vsync);
		setVideoMode(newMode);
	}
示例#26
0
	void Window::setSize(unsigned width, unsigned height) {
		log << "Resizing to (" << width << ", " << height << ").\n";
		VideoMode newMode = mode_;
		newMode.setSize(width,height);
		setVideoMode(newMode);		
	}
示例#27
0
int _tmain(int argc, _TCHAR* argv[])
{
	DepthDetector detector(ThresholdMin, ThresholdMax);
	ScanLineSegmenter segmenter;

	OpenNI::initialize();

	Device device;
	if (device.open(ANY_DEVICE) != STATUS_OK)
	{
		std::cout << "could not open any device\r\n";
		return 1;
	}

	if (device.hasSensor(SENSOR_DEPTH))
	{
		auto info = device.getSensorInfo(SENSOR_DEPTH);
		auto& modes = info->getSupportedVideoModes();
		std::cout << "depth sensor supported modes:\r\n";
		for (int i = 0; i < modes.getSize(); ++i)
		{
			auto& mode = modes[i];
			std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
		}
	}

	VideoStream stream;
	stream.create(device, SENSOR_DEPTH);
	VideoMode mode;
	mode.setFps(25);
	mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
	mode.setResolution(320, 240);
	stream.setMirroringEnabled(true);
	stream.setVideoMode(mode);
	stream.start();

	std::cout << "press any key to capture background\r\n";
	std::cin.get();

	VideoFrameRef frame;
	stream.readFrame(&frame);

	DepthImage image(320, 240);
	copyFrameToImage(frame, image);

	detector.background(image);

	std::cout << "starting capture loop\r\n";

	CenterPointExtractor centerPointExtractor(MinBlobSize);
	std::chrono::high_resolution_clock timer;
	auto startTime = timer.now();
	int frameId = 0;
	while (true)
	{
		stream.readFrame(&frame);

		copyFrameToImage(frame, image);

		detector.detect(image);

		std::vector<LineSegment> segments;
		segmenter.segment(detector.mask(), segments);


		std::vector<std::pair<float, float>> centerPoints;
		centerPointExtractor.extract(segments, centerPoints);

		if (centerPoints.size())
		{
			std::cout << "point count: " << centerPoints.size();
		
			std::cout << "\t points: ";
		
			for (auto& point : centerPoints)
			{
				std::cout << "(" << point.first << ", " << point.second << ")  ";
			}
			std::cout << "\r\n";
		}

		++frameId;

		
		
		if (frameId % 64 == 0)
		{
			auto stopTime = timer.now();
			
			auto elapsedTime = stopTime - startTime;
			auto elapsedMilliseconds = std::chrono::duration_cast<std::chrono::milliseconds>(elapsedTime).count();

			std::cout << "\t total frames: " << frameId << "\t fps: " << elapsedMilliseconds / 64 << std::endl;

			startTime = stopTime;
		}
		
	}
	
	openni::OpenNI::shutdown();
	return 0;
}
示例#28
0
int _tmain(int argc, _TCHAR* argv[])
{
	sdl::Application app;

	DepthDetector detector(ThresholdMin, ThresholdMax);
	ScanLineSegmenter segmenter;

	OpenNI::initialize();

	Device device;
	if (device.open(ANY_DEVICE) != STATUS_OK)
	{
		std::cout << "could not open any device\r\n";
		return 1;
	}

	if (device.hasSensor(SENSOR_DEPTH))
	{
		auto info = device.getSensorInfo(SENSOR_DEPTH);
		auto& modes = info->getSupportedVideoModes();
		std::cout << "depth sensor supported modes:\r\n";
		for (int i = 0; i < modes.getSize(); ++i)
		{
			auto& mode = modes[i];
			std::cout << "pixel format: " << mode.getPixelFormat() << "\t with: " << mode.getResolutionX() << "x" << mode.getResolutionY() << "@" << mode.getFps() << " fps\r\n";
		}
	}

	VideoStream stream;
	stream.create(device, SENSOR_DEPTH);
	VideoMode mode;
	mode.setFps(25);
	mode.setPixelFormat(PIXEL_FORMAT_DEPTH_1_MM);
	mode.setResolution(320, 240);
	stream.setMirroringEnabled(true);
	stream.setVideoMode(mode);
	stream.start();

	std::cout << "press any key to capture background\r\n";
	std::cin.get();

	VideoFrameRef frame;
	stream.readFrame(&frame);

	DepthImage image(320, 240);
	copyFrameToImage(frame, image);

	detector.background(image);

	std::cout << "starting capture loop\r\n";

	sdl::GLContext::setVersion(4, 3);

	ImageViewer viewer;
	viewer.add(0, 0, 320, 240);
	viewer.add(320, 0, 320, 240);
	viewer.add(0, 240, 320, 240);
	viewer.add(320, 240, 320, 240);
	
	CenterPointExtractor centerPointExtractor(MinBlobSize);
	MotionRecorder recorder;

	while (true)
	{
		stream.readFrame(&frame);

		copyFrameToImage(frame, image);
		
		detector.detect(image);

		std::vector<LineSegment> segments;
		segmenter.segment(detector.mask(), segments);


		std::vector<std::pair<float, float>> centerPoints;
		centerPointExtractor.extract(segments, centerPoints);

		recorder.track(centerPoints);
		
		viewer.crosses.clear();
		std::transform(begin(centerPoints), end(centerPoints), std::back_inserter(viewer.crosses), [](std::pair<float, float>& coord) {
			return Cross{ coord.first, coord.second };
		});

		viewer.lines.clear();
		std::transform(begin(recorder.motions()), end(recorder.motions()), std::back_inserter(viewer.lines), [](const Motion& motion) {
			return Lines{ motion.points };
		});
		
		viewer[0].update(detector.mask());
		viewer[1].update(image);
		viewer[2].update(detector.background());
		viewer[3].update(detector.difference());
		
		viewer.update();
	}
	
	openni::OpenNI::shutdown();
	return 0;
}
int initializeOpenNIDevice(int deviceID ,const  char * deviceName  , Device &device , VideoStream &color , VideoStream &depth ,unsigned int width ,unsigned int height , unsigned int fps)
{
   unsigned int openMode=OPENNI2_OPEN_REGULAR_ENUM; /* 0 = regular deviceID and enumeration*/
   if (deviceName!=0)
   {
      //If our deviceName contains a .oni we assume that we have an oni file to open
      if (strstr(deviceName,".oni")!=0)
         {
           fprintf(stderr,"Found an .ONI filename , trying to open it..\n");
           openMode=OPENNI2_OPEN_USING_STRING;
         } else
      if (strlen(deviceName)>7)
        {
           fprintf(stderr,"deviceName is too long (%lu chars) , assuming it is a Device URI ..\n",strlen(deviceName));
           openMode=OPENNI2_OPEN_USING_STRING;
        }

   }

   switch (openMode)
   {
     //-------------------------------------------------------------------------------------
     //If we have an ONI file to open just pass it as an argument to device.open(deviceName)
     case OPENNI2_OPEN_USING_STRING :
      if (device.open(deviceName) != STATUS_OK)
      {
        fprintf(stderr,"Could not open using given string ( %s ) : %s \n",deviceName,OpenNI::getExtendedError());
        return 0;
      }
     break;
     //-------------------------------------------------------------------------------------
     //If we don't have a deviceName we assume deviceID points to the device we want to open so we will try to use
     //the openNI enumerator to get the specific device URI for device with number deviceID and use this to device.open( devURI )
     case OPENNI2_OPEN_REGULAR_ENUM :
     default :
      //We have to supply our own buffer to hold the uri device string , so we make one here
      char devURIBuffer[512]={0};
      if (device.open(getURIForDeviceNumber(deviceID,devURIBuffer,512)) != STATUS_OK)
      {
        fprintf(stderr,"Could not open an OpenNI device : %s \n",OpenNI::getExtendedError());
        return 0;
      }
     break;
   }

if (device.getSensorInfo(SENSOR_DEPTH)  != NULL)
    {
        Status rc = depth.create(device, SENSOR_DEPTH);
        if (rc == STATUS_OK)
        {
            VideoMode depthMode = depth.getVideoMode();
            depthMode.setResolution(width,height);
            depthMode.setFps(fps);
            Status rc = depth.setVideoMode(depthMode);
            if (rc != STATUS_OK) { fprintf(stderr,"Error getting color at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); }

            if(depth.start()!= STATUS_OK)
            {
                fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError());
                return 0;
            }
        }
        else
        {
            fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError());
            return 0;
        }
    }

    if (device.getSensorInfo(SENSOR_COLOR) != NULL)
    {
        Status rc = color.create(device, SENSOR_COLOR);
        if (rc == STATUS_OK)
        {
            VideoMode colorMode = color.getVideoMode();
            colorMode.setResolution(width,height);
            colorMode.setFps(fps);
            Status rc = color.setVideoMode(colorMode);
            if (rc != STATUS_OK) { fprintf(stderr,"Error getting depth at video mode requested %u x %u @ %u fps\n%s\n",width,height,fps,OpenNI::getExtendedError()); }

            if(color.start() != STATUS_OK)
            {
                fprintf(stderr,"Couldn't start the color stream: %s \n",OpenNI::getExtendedError());
                return 0;
            }
        }
        else
        {
            fprintf(stderr,"Couldn't create depth stream: %s \n",OpenNI::getExtendedError());
            OpenNI::getExtendedError();
            return 0;
        }
    }


  #if MOD_IR
    if(device.getSensorInfo(SENSOR_IR) != NULL)
    {
        Status rc = ir.create(device, SENSOR_IR);    // Create the VideoStream for IR
        if (rc == STATUS_OK)
        {
          rc = ir.start();                      // Start the IR VideoStream
        }
         else
        {
            fprintf(stderr,"Couldn't create IR stream: %s \n",OpenNI::getExtendedError());
            OpenNI::getExtendedError();
            return 0;
        }
    }
  #endif // MOD_IR

    //Mirroring is disabled
    depth.setMirroringEnabled (false);
    color.setMirroringEnabled (false);


    fprintf(stdout,"Device Initialization Requested %u x %u @ %u fps \n",width,height,fps);
   return 1;
}
示例#30
0
 void createVideoMode(VideoMode& m, int x, int y, int fps, PixelFormat format)
 {
   m.setResolution(x, y);
   m.setFps(fps);
   m.setPixelFormat(format);
 }