示例#1
0
/// <summary>
/// 
/// </summary>
void ProcessColor(){
	HRESULT hr;
	NUI_IMAGE_FRAME imageFrame;

	hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_pStreamColorHandle, 0, &imageFrame);
	if (FAILED(hr))	{
		return;
	}

	INuiFrameTexture * pTexture = imageFrame.pFrameTexture;
    NUI_LOCKED_RECT LockedRect;

    // Lock the frame data so the Kinect knows not to modify it while we're reading it
    pTexture->LockRect(0, &LockedRect, NULL, 0);

    // Make sure we've received valid data
    if (LockedRect.Pitch != 0) {
		cv::Mat colorFrame(cHeight, cWidth, CV_8UC3);

		for(int i = 0; i < cHeight; i++) {
			uchar *ptr = colorFrame.ptr<uchar>(i);
			uchar *pBuffer = (uchar*)(LockedRect.pBits) + i * LockedRect.Pitch;

			for(int j = 0; j < cWidth; j++)	{
				ptr[3*j] = pBuffer[4*j];
				ptr[3*j+1] = pBuffer[4*j+1];
				ptr[3*j+2] = pBuffer[4*j+2];
			}
		}

		// Draw image
		if (m_bShow) {
			cv::imshow("Color", colorFrame);
			cv::waitKey(1);
		}

		// If m_bRecord
		if (m_bRecord) {
			// Retrieve the path to My Photos
            WCHAR screenshotPath[MAX_PATH];

            // Write out the bitmap to disk
			GetScreenshotFileName(screenshotPath, _countof(screenshotPath), COLOR);

			std::wstring screenshotPathWStr(screenshotPath);
			std::string screenshotPathStr(screenshotPathWStr.begin(), screenshotPathWStr.end());
			
			cv::imwrite(screenshotPathStr, colorFrame);
		}
	}

	pTexture->UnlockRect(0);
	m_pNuiSensor->NuiImageStreamReleaseFrame(m_pStreamColorHandle, &imageFrame);
}
vector<vector<cv::Point2f>> CalibrationEngine::GrabCameraImagePoints( shared_ptr<lens::ICamera> capture, int poses2Capture )
{
	int successes = 0;
	bool found = false;
	vector< vector< cv::Point2f > > imagePoints;
	vector< cv::Point2f > pointBuffer;

	// Create a display to give the user some feedback
	Display display("Calibration");
		
	// While we have boards to grab, grab-em
	while ( successes < poses2Capture )
	{
	  // Let the user know how many more images we need and how to capture
	  std::stringstream message;
	  message << "Press <Enter> to capture pose \n";
	  message << successes;
	  message << "/";
	  message << poses2Capture;
	  display.OverlayText( message.str() );

	  while ( m_userWaitKey != cvWaitKey( 15 ) )
	  {
		// Just display to the user. They are setting up the calibration board
		cv::Mat frame( capture->getFrame() );
		cv::drawChessboardCorners( frame, m_boardSize, cv::Mat( pointBuffer ), found );
		display.ShowImage( frame );
	  }

	  // User is ready, try and find the circles
	  pointBuffer.clear();
	  cv::Mat colorFrame( capture->getFrame( ) );
	  cv::Mat gray;
	  cv::cvtColor( colorFrame, gray, CV_BGR2GRAY);
	  found = cv::findCirclesGrid( gray, m_boardSize, pointBuffer, cv::CALIB_CB_ASYMMETRIC_GRID );

	  // Make sure we found it, and that we found all the points
	  if(found && pointBuffer.size() == m_boardMarkerCount)
	  {
		imagePoints.push_back(pointBuffer);
		++successes;
	  }
	} // End collection while loop

	return imagePoints;
}
示例#3
0
bool ParticleEffect::Load(const XMLElement& source)
{
    // Reset to defaults first so that missing parameters in case of a live reload behave as expected
    material_.Reset();
    numParticles_ = DEFAULT_NUM_PARTICLES;
    updateInvisible_ = false;
    relative_ = true;
    scaled_ = true;
    sorted_ = false;
    fixedScreenSize_ = false;
    animationLodBias_ = 0.0f;
    emitterType_ = EMITTER_SPHERE;
    emitterSize_ = Vector3::ZERO;
    directionMin_ = DEFAULT_DIRECTION_MIN;
    directionMax_ = DEFAULT_DIRECTION_MAX;
    constantForce_ = Vector3::ZERO;
    dampingForce_ = 0.0f;
    activeTime_ = 0.0f;
    inactiveTime_ = 0.0;
    emissionRateMin_ = DEFAULT_EMISSION_RATE;
    emissionRateMax_ = DEFAULT_EMISSION_RATE;
    sizeMin_ = DEFAULT_PARTICLE_SIZE;
    sizeMax_ = DEFAULT_PARTICLE_SIZE;
    timeToLiveMin_ = DEFAULT_TIME_TO_LIVE;
    timeToLiveMax_ = DEFAULT_TIME_TO_LIVE;
    velocityMin_ = DEFAULT_VELOCITY;
    velocityMax_ = DEFAULT_VELOCITY;
    rotationMin_ = 0.0f;
    rotationMax_ = 0.0f;
    rotationSpeedMin_ = 0.0f;
    rotationSpeedMax_ = 0.0f;
    sizeAdd_ = 0.0f;
    sizeMul_ = 1.0f;
    colorFrames_.Clear();
    textureFrames_.Clear();
    faceCameraMode_ = FC_ROTATE_XYZ;

    if (source.IsNull())
    {
        URHO3D_LOGERROR("Can not load particle effect from null XML element");
        return false;
    }

    if (source.HasChild("material"))
    {
        loadMaterialName_ = source.GetChild("material").GetAttribute("name");
        // If async loading, can not GetResource() the material. But can do a background request for it
        if (GetAsyncLoadState() == ASYNC_LOADING)
            GetSubsystem<ResourceCache>()->BackgroundLoadResource<Material>(loadMaterialName_, true, this);
    }

    if (source.HasChild("numparticles"))
        SetNumParticles((unsigned)source.GetChild("numparticles").GetInt("value"));

    if (source.HasChild("updateinvisible"))
        updateInvisible_ = source.GetChild("updateinvisible").GetBool("enable");

    if (source.HasChild("relative"))
        relative_ = source.GetChild("relative").GetBool("enable");

    if (source.HasChild("scaled"))
        scaled_ = source.GetChild("scaled").GetBool("enable");

    if (source.HasChild("sorted"))
        sorted_ = source.GetChild("sorted").GetBool("enable");

    if (source.HasChild("fixedscreensize"))
        fixedScreenSize_ = source.GetChild("fixedscreensize").GetBool("enable");

    if (source.HasChild("animlodbias"))
        SetAnimationLodBias(source.GetChild("animlodbias").GetFloat("value"));

    if (source.HasChild("emittertype"))
    {
        String type = source.GetChild("emittertype").GetAttributeLower("value");
        if (type == "point")
        {
            // Point emitter type is deprecated, handled as zero sized sphere
            emitterType_ = EMITTER_SPHERE;
            emitterSize_ = Vector3::ZERO;
        }
        else
            emitterType_ = (EmitterType)GetStringListIndex(type.CString(), emitterTypeNames, EMITTER_SPHERE);
    }

    if (source.HasChild("emittersize"))
        emitterSize_ = source.GetChild("emittersize").GetVector3("value");

    if (source.HasChild("emitterradius"))
        emitterSize_.x_ = emitterSize_.y_ = emitterSize_.z_ = source.GetChild("emitterradius").GetFloat("value");

    if (source.HasChild("direction"))
        GetVector3MinMax(source.GetChild("direction"), directionMin_, directionMax_);

    if (source.HasChild("constantforce"))
        constantForce_ = source.GetChild("constantforce").GetVector3("value");

    if (source.HasChild("dampingforce"))
        dampingForce_ = source.GetChild("dampingforce").GetFloat("value");

    if (source.HasChild("activetime"))
        activeTime_ = source.GetChild("activetime").GetFloat("value");
    if (activeTime_ < 0.0f)
        activeTime_ = M_INFINITY;

    if (source.HasChild("inactivetime"))
        inactiveTime_ = source.GetChild("inactivetime").GetFloat("value");
    if (inactiveTime_ < 0.0f)
        inactiveTime_ = M_INFINITY;

    if (source.HasChild("emissionrate"))
        GetFloatMinMax(source.GetChild("emissionrate"), emissionRateMin_, emissionRateMax_);

    if (source.HasChild("interval"))
    {
        float intervalMin = 0.0f;
        float intervalMax = 0.0f;
        GetFloatMinMax(source.GetChild("interval"), intervalMin, intervalMax);
        emissionRateMax_ = 1.0f / intervalMin;
        emissionRateMin_ = 1.0f / intervalMax;
    }

    if (source.HasChild("particlesize"))
        GetVector2MinMax(source.GetChild("particlesize"), sizeMin_, sizeMax_);

    if (source.HasChild("timetolive"))
        GetFloatMinMax(source.GetChild("timetolive"), timeToLiveMin_, timeToLiveMax_);

    if (source.HasChild("velocity"))
        GetFloatMinMax(source.GetChild("velocity"), velocityMin_, velocityMax_);

    if (source.HasChild("rotation"))
        GetFloatMinMax(source.GetChild("rotation"), rotationMin_, rotationMax_);

    if (source.HasChild("rotationspeed"))
        GetFloatMinMax(source.GetChild("rotationspeed"), rotationSpeedMin_, rotationSpeedMax_);

    if (source.HasChild("faceCameraMode"))
    {
        String type = source.GetChild("faceCameraMode").GetAttributeLower("value");
        faceCameraMode_ = (FaceCameraMode)GetStringListIndex(type.CString(), faceCameraModeNames, FC_ROTATE_XYZ);
    }

    if (source.HasChild("sizedelta"))
    {
        XMLElement deltaElem = source.GetChild("sizedelta");
        if (deltaElem.HasAttribute("add"))
            sizeAdd_ = deltaElem.GetFloat("add");
        if (deltaElem.HasAttribute("mul"))
            sizeMul_ = deltaElem.GetFloat("mul");
    }

    if (source.HasChild("color"))
    {
        ColorFrame colorFrame(source.GetChild("color").GetColor("value"));
        SetColorFrame(0, colorFrame);
    }

    if (source.HasChild("colorfade"))
    {
        Vector<ColorFrame> fades;
        for (XMLElement colorFadeElem = source.GetChild("colorfade"); colorFadeElem;
             colorFadeElem = colorFadeElem.GetNext("colorfade"))
            fades.Push(ColorFrame(colorFadeElem.GetColor("color"), colorFadeElem.GetFloat("time")));

        SetColorFrames(fades);
    }

    if (colorFrames_.Empty())
        colorFrames_.Push(ColorFrame(Color::WHITE));

    if (source.HasChild("texanim"))
    {
        Vector<TextureFrame> animations;
        for (XMLElement animElem = source.GetChild("texanim"); animElem; animElem = animElem.GetNext("texanim"))
        {
            TextureFrame animation;
            animation.uv_ = animElem.GetRect("uv");
            animation.time_ = animElem.GetFloat("time");
            animations.Push(animation);
        }

        SetTextureFrames(animations);
    }

    return true;
}
vector<vector<cv::Point2f>> CalibrationEngine::GrabProjectorImagePoints(shared_ptr<lens::ICamera> capture, int poses2Capture )
{
  	int successes = 0;
	bool found = false;
	vector< vector< cv::Point2f > > imagePoints;
	vector< cv::Point2f > pointBuffer;

	// Create a display to give the user some feedback
	Display display("Calibration");

	// While we have boards to grab, grab-em
	while ( successes < poses2Capture )
	{
	  // Let the user know how many more images we need and how to capture
	  std::stringstream message;
	  message << "Press <Enter> to capture pose \n";
	  message << successes;
	  message << "/";
	  message << poses2Capture;
	  display.OverlayText( message.str() );

	  while ( m_userWaitKey != cvWaitKey( 15 ) )
	  {
		// Just display to the user. They are setting up the calibration board
		cv::Mat frame( capture->getFrame() );
		cv::drawChessboardCorners( frame, m_boardSize, cv::Mat( pointBuffer ), found );
		display.ShowImage( frame );
	  }

	  // User is ready, try and find the circles
	  pointBuffer.clear();

	  // TODO - make the projector project a white image
	  cv::Mat colorFrame( capture->getFrame( ) );
	  cv::Mat gray;
	  cv::cvtColor( colorFrame, gray, CV_BGR2GRAY);
	  found = cv::findCirclesGrid( gray, m_boardSize, pointBuffer, cv::CALIB_CB_ASYMMETRIC_GRID );

	  // Make sure we found it, and that we found all the points
	  if(found && pointBuffer.size() == m_boardMarkerCount)
	  {
		// We found all the markers in the camera view. Now we need to image with the projector
		vector<cv::Mat> wrappedPhase;
		NFringeStructuredLight fringeGenerator(5);
		TwoWavelengthPhaseUnwrapper phaseUnwrapper;

		// Horizontal set --------------------------
		auto smallWavelength = fringeGenerator.GenerateFringe(gray.size(), 70, IStructuredLight::Horizontal);
		wrappedPhase.push_back( ProjectAndCaptureWrappedPhase( capture, smallWavelength ) );
		auto largerWavelength = fringeGenerator.GenerateFringe(gray.size(), 75, IStructuredLight::Horizontal);
		wrappedPhase.push_back( ProjectAndCaptureWrappedPhase( capture, largerWavelength ) );
		auto horizontalUnwrappedPhase = phaseUnwrapper.UnwrapPhase(wrappedPhase);
		
		// Vertical set ----------------------------
		smallWavelength = fringeGenerator.GenerateFringe(gray.size(), 70, IStructuredLight::Vertical);
		wrappedPhase.push_back( ProjectAndCaptureWrappedPhase( capture, smallWavelength ) );
		largerWavelength = fringeGenerator.GenerateFringe(gray.size(), 75, IStructuredLight::Vertical);
		wrappedPhase.push_back( ProjectAndCaptureWrappedPhase( capture, largerWavelength ) );
		auto verticalUnwrappedPhase = phaseUnwrapper.UnwrapPhase(wrappedPhase);

		vector< cv::Point2f > projectorPointBuffer;
		// TODO - interpolate projector pixels from phase

		imagePoints.push_back(projectorPointBuffer);
		++successes;
	  }
	} // End collection while loop

	return imagePoints;
}