XnStatus XnSensorDepthStream::ConfigureStreamImpl()
{
	XnStatus nRetVal = XN_STATUS_OK;

	xnUSBShutdownReadThread(GetHelper()->GetPrivateData()->pSpecificDepthUsb->pUsbConnection->UsbEp);

	nRetVal = SetActualRead(TRUE);
	XN_IS_STATUS_OK(nRetVal);

	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_InputFormat));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(ResolutionProperty()));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(FPSProperty()));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_HoleFilter));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_Gain));

	// we need to turn decimation on when resolution is QVGA, and FPS is different than 60
	// NOTE: this is ugly as hell. This logic should be moved to firmware.
	XnBool bDecimation = (GetResolution() == XN_RESOLUTION_QVGA && GetFPS() != 60);
	nRetVal = GetFirmwareParams()->m_DepthDecimation.SetValue(bDecimation);
	XN_IS_STATUS_OK(nRetVal);

	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_FirmwareRegistration));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_FirmwareMirror));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_GMCMode));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_WhiteBalance));

	nRetVal = m_Helper.GetCmosInfo()->SetCmosConfig(XN_CMOS_TYPE_DEPTH, GetResolution(), GetFPS());
	XN_IS_STATUS_OK(nRetVal);

	return XN_STATUS_OK;
}
Beispiel #2
0
void ezRendererTestBasics::RenderObjects(ezBitflags<ezShaderBindFlags> ShaderBindFlags)
{
  ezCamera cam;
  cam.SetCameraMode(ezCameraMode::PerspectiveFixedFovX, 90, 0.5f, 1000.0f);
  cam.LookAt(ezVec3(0, 0, 0), ezVec3(0, 0, -1), ezVec3(0, 1, 0));
  ezMat4 mProj;
  cam.GetProjectionMatrix((float)GetResolution().width / (float)GetResolution().height, mProj);
  ezMat4 mView = cam.GetViewMatrix();

  ezMat4 mTransform, mOther, mRot;

  mRot.SetRotationMatrixX(ezAngle::Degree(-90));

  mOther.SetScalingMatrix(ezVec3(1.0f, 1.0f, 1.0f));
  mTransform.SetTranslationMatrix(ezVec3(-0.3f, -0.3f, 0.0f));
  RenderObject(m_hLongBox, mProj * mView * mTransform * mOther, ezColor(1, 0, 1, 0.25f), ShaderBindFlags);

  mOther.SetRotationMatrixX(ezAngle::Degree(80.0f));
  mTransform.SetTranslationMatrix(ezVec3(0.75f, 0, -1.8f));
  RenderObject(m_hTorus, mProj * mView * mTransform * mOther * mRot, ezColor(1, 0, 0, 0.5f), ShaderBindFlags);

  mOther.SetIdentity();
  mTransform.SetTranslationMatrix(ezVec3(0, 0.1f, -2.0f));
  RenderObject(m_hSphere, mProj * mView * mTransform * mOther, ezColor(0, 1, 0, 0.75f), ShaderBindFlags);

  mOther.SetScalingMatrix(ezVec3(1.5f, 1.0f, 1.0f));
  mTransform.SetTranslationMatrix(ezVec3(-0.6f, -0.2f, -2.2f));
  RenderObject(m_hSphere2, mProj * mView * mTransform * mOther * mRot, ezColor(0, 0, 1, 1), ShaderBindFlags);
}
XnStatus XnSensorImageStream::ConfigureStreamImpl()
{
	XnStatus nRetVal = XN_STATUS_OK;

	xnUSBShutdownReadThread(GetHelper()->GetPrivateData()->pSpecificImageUsb->pUsbConnection->UsbEp);

	nRetVal = SetActualRead(TRUE);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = ValidateMode();
	XN_IS_STATUS_OK(nRetVal);

	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_InputFormat));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(ResolutionProperty()));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(FPSProperty()));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_AntiFlicker));

	// image quality is only relevant for JPEG
	if (m_InputFormat.GetValue() == XN_IO_IMAGE_FORMAT_JPEG)
	{
		XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_ImageQuality));
	}

	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_FirmwareMirror));

	if (GetResolution() != XN_RESOLUTION_UXGA && GetResolution() != XN_RESOLUTION_SXGA)
	{
		nRetVal = m_Helper.GetCmosInfo()->SetCmosConfig(XN_CMOS_TYPE_IMAGE, GetResolution(), GetFPS());
		XN_IS_STATUS_OK(nRetVal);
	}

	return (XN_STATUS_OK);
}
Beispiel #4
0
void ImageChunk::ApplyData() {
	mImage.create(GetResolution(), GetResolution(), sf::Color::Black);

	const size_t ThreadCount = 16;
	std::thread ImageThreads[ThreadCount];

	int IncrementX = std::ceilf(GetResolution() / ThreadCount);

	for (int i = 0; i < ThreadCount; i++) {
		size_t MinX = i * IncrementX;
		size_t MaxX = (size_t)std::fminf((1 + i) * IncrementX, GetResolution());
		size_t MinY = 0;
		size_t MaxY = GetResolution();

		//ApplyArea(MinX, MinY, MaxX, MaxY);

		ImageThreads[i] = std::thread(
			ApplyArea, 
			this,
			MinX, 
			MinY, 
			MaxX, 
			MaxY
		);
	}
	for (int i = 0; i < ThreadCount; i++) {
		ImageThreads[i].join();
	}

	mTexture.loadFromImage(mImage);
	mDrawRectangle.setTexture(&mTexture, true);
	mDrawRectangle.setSize((sf::Vector2f)mTexture.getSize());
	mDrawRectangle.setPosition(GetX(), GetY());
	mDrawRectangle.setScale(sf::Vector2f(GetSize() / mTexture.getSize().x, GetSize() / mTexture.getSize().y));
}
Beispiel #5
0
XnStatus XnSensorIRStream::ConfigureStreamImpl()
{
	XnStatus nRetVal = XN_STATUS_OK;

	xnUSBShutdownReadThread(GetHelper()->GetPrivateData()->pSpecificImageUsb->pUsbConnection->UsbEp);

	nRetVal = SetActualRead(TRUE);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = m_Helper.ConfigureFirmware(ResolutionProperty());
	XN_IS_STATUS_OK(nRetVal);;
	nRetVal = m_Helper.ConfigureFirmware(FPSProperty());
	XN_IS_STATUS_OK(nRetVal);;

	// IR mirror is always off in firmware
	nRetVal = GetFirmwareParams()->m_IRMirror.SetValue(FALSE);
	XN_IS_STATUS_OK(nRetVal);

	// CMOS
	if (GetResolution() != XN_RESOLUTION_SXGA)
	{
		nRetVal = m_Helper.GetCmosInfo()->SetCmosConfig(XN_CMOS_TYPE_DEPTH, GetResolution(), GetFPS());
		XN_IS_STATUS_OK(nRetVal);
	}

	return (XN_STATUS_OK);
}
XnStatus XnSensorImageStream::ConfigureStreamImpl()
{
	XnStatus nRetVal = XN_STATUS_OK;

	xnUSBShutdownReadThread(GetHelper()->GetPrivateData()->pSpecificImageUsb->pUsbConnection->UsbEp);

	nRetVal = SetActualRead(TRUE);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = ValidateMode();
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = m_Helper.ConfigureFirmware(m_InputFormat);
	XN_IS_STATUS_OK(nRetVal);;
	nRetVal = m_Helper.ConfigureFirmware(ResolutionProperty());
	XN_IS_STATUS_OK(nRetVal);;
	nRetVal = m_Helper.ConfigureFirmware(FPSProperty());
	XN_IS_STATUS_OK(nRetVal);;
	nRetVal = m_Helper.ConfigureFirmware(m_AntiFlicker);
	XN_IS_STATUS_OK(nRetVal);;

	// image quality is only relevant for JPEG
	if (m_InputFormat.GetValue() == XN_IO_IMAGE_FORMAT_JPEG)
	{
		nRetVal = m_Helper.ConfigureFirmware(m_ImageQuality);
		XN_IS_STATUS_OK(nRetVal);;
	}

	nRetVal = m_Helper.ConfigureFirmware(m_FirmwareMirror);
	XN_IS_STATUS_OK(nRetVal);;

	if (GetResolution() != XN_RESOLUTION_UXGA && GetResolution() != XN_RESOLUTION_SXGA)
	{
		nRetVal = m_Helper.GetCmosInfo()->SetCmosConfig(XN_CMOS_TYPE_IMAGE, GetResolution(), GetFPS());
		XN_IS_STATUS_OK(nRetVal);
	}

	if (m_Helper.GetFirmwareVersion() >= XN_SENSOR_FW_VER_5_4)
	{
		nRetVal = m_Helper.ConfigureFirmware(m_Sharpness);
		XN_IS_STATUS_OK(nRetVal);
		nRetVal = m_Helper.ConfigureFirmware(m_FirmwareColorTemperature);
		XN_IS_STATUS_OK(nRetVal);
		nRetVal = m_Helper.ConfigureFirmware(m_FirmwareAutoWhiteBalance);
		XN_IS_STATUS_OK(nRetVal);
		nRetVal = m_Helper.ConfigureFirmware(m_FirmwareExposure);
		XN_IS_STATUS_OK(nRetVal);
		nRetVal = m_Helper.ConfigureFirmware(m_FirmwareAutoExposure);
		XN_IS_STATUS_OK(nRetVal);
		nRetVal = m_Helper.ConfigureFirmware(m_BackLightCompensation);
		XN_IS_STATUS_OK(nRetVal);
		nRetVal = m_Helper.ConfigureFirmware(m_Gain);
		XN_IS_STATUS_OK(nRetVal);
		nRetVal = m_Helper.ConfigureFirmware(m_LowLightCompensation);
		XN_IS_STATUS_OK(nRetVal);
	}

	return (XN_STATUS_OK);
}
void OSVRHMDDescription::GetMonitorInfo(IHeadMountedDisplay::MonitorInfo& MonitorDesc) const
{
	MonitorDesc.MonitorName = "OSVR-Display"; //@TODO
	MonitorDesc.MonitorId = 0;				  //@TODO
	MonitorDesc.DesktopX = GetDisplayOrigin(OSVRHMDDescription::LEFT_EYE).X;
	MonitorDesc.DesktopY = GetDisplayOrigin(OSVRHMDDescription::LEFT_EYE).Y;
	MonitorDesc.ResolutionX = GetResolution().X;
	MonitorDesc.ResolutionY = GetResolution().Y;
}
void CWinsys::Init () {
	Uint32 sdl_flags = SDL_INIT_VIDEO | SDL_INIT_NOPARACHUTE | SDL_INIT_TIMER;
	if (SDL_Init (sdl_flags) < 0) Message ("Could not initialize SDL");

	SDL_GL_SetAttribute(SDL_GL_CONTEXT_EGL, 1);
	SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 1); 
	SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 1); 

	SDL_GL_SetAttribute (SDL_GL_DOUBLEBUFFER, 1);
#if defined (USE_STENCIL_BUFFER)
	SDL_GL_SetAttribute (SDL_GL_STENCIL_SIZE, 8);
#endif
#ifdef USE_GLES
	glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_FASTEST);
	glHint(GL_LINE_SMOOTH_HINT, GL_FASTEST);
#endif

	SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
	SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
	SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
	SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 24);

	SetupVideoMode (GetResolution (param.res_type));
	context = SDL_GL_CreateContext(window);
	SetOrient(param.orient >= 0 ? param.orient : resolution.width < resolution.height);
	Reshape (resolution.width, resolution.height);

	//SDL_WM_SetCaption (WINDOW_TITLE, WINDOW_TITLE);
	KeyRepeat (false);
	if (USE_JOYSTICK) InitJoystick ();
//	SDL_EnableUNICODE (1);
}
void CLinuxRenderer::CalcNormalDisplayRect(float fOffsetX1, float fOffsetY1, float fScreenWidth, float fScreenHeight, float fInputFrameRatio, float fZoomAmount)
{
  // scale up image as much as possible
  // and keep the aspect ratio (introduces with black bars)
  // calculate the correct output frame ratio (using the users pixel ratio setting
  // and the output pixel ratio setting)

  float fOutputFrameRatio = fInputFrameRatio / g_settings.m_ResInfo[GetResolution()].fPixelRatio;

  // maximize the movie width
  float fNewWidth = fScreenWidth;
  float fNewHeight = fNewWidth / fOutputFrameRatio;

  if (fNewHeight > fScreenHeight)
  {
    fNewHeight = fScreenHeight;
    fNewWidth = fNewHeight * fOutputFrameRatio;
  }

  // Scale the movie up by set zoom amount
  fNewWidth *= fZoomAmount;
  fNewHeight *= fZoomAmount;

  // Centre the movie
  float fPosY = (fScreenHeight - fNewHeight) / 2;
  float fPosX = (fScreenWidth - fNewWidth) / 2;

  rd.left = (int)(fPosX + fOffsetX1);
  rd.right = (int)(rd.left + fNewWidth + 0.5f);
  rd.top = (int)(fPosY + fOffsetY1);
  rd.bottom = (int)(rd.top + fNewHeight + 0.5f);
}
Beispiel #10
0
 inline VoxelGrid::GRID_INDEX GetNextFromGradient(const VoxelGrid::GRID_INDEX& index, const Eigen::Vector3d& gradient) const
 {
     // Given the gradient, pick the "best fit" of the 26 neighboring points
     VoxelGrid::GRID_INDEX next_index = index;
     double half_resolution = GetResolution() * 0.5;
     if (gradient.x() > half_resolution)
     {
         next_index.x++;
     }
     else if (gradient.x() < -half_resolution)
     {
         next_index.x--;
     }
     if (gradient.y() > half_resolution)
     {
         next_index.y++;
     }
     else if (gradient.y() < -half_resolution)
     {
         next_index.y--;
     }
     if (gradient.z() > half_resolution)
     {
         next_index.z++;
     }
     else if (gradient.z() < -half_resolution)
     {
         next_index.z--;
     }
     return next_index;
 }
Beispiel #11
0
void
DviDoc::CreateDocument (/*[in]*/ const char * lpszPathName)
{
  fileStatus = DVIFILE_NOT_LOADED;
  modificationTime = File::GetLastWriteTime(lpszPathName);
  MIKTEXMFMODE mfmode;
  if (! pSession->GetMETAFONTMode(GetMetafontMode(), &mfmode))
    {
      UNEXPECTED_CONDITION ("DviDoc::CreateDocument");
    }
  MIKTEX_ASSERT (pDvi == 0);
  pDvi =
    Dvi::Create(lpszPathName,
		mfmode.szMnemonic,
		GetResolution(),
		GetShrinkFactor(),
		(IsPrintContext()
		 ? DviAccess::Sequential
		 : DviAccess::Random),
		(IsPrintContext()
		 ? DviPageMode::Dvips
		 : dviPageMode),
		pSession->GetPaperSizeInfo(dvipsPaperName.c_str()),
		landscape,
		this);
  pDvi->Scan ();
  fileStatus = DVIFILE_LOADED;
}
Beispiel #12
0
void CComboRenderer::ManageDisplay()
{
  const RECT& rv = g_graphicsContext.GetViewWindow();
  float fScreenWidth = (float)rv.right - rv.left;
  float fScreenHeight = (float)rv.bottom - rv.top;
  float fOffsetX1 = (float)rv.left;
  float fOffsetY1 = (float)rv.top;
  float fPixelRatio = CDisplaySettings::Get().GetPixelRatio();
  float fMaxScreenWidth = (float)CDisplaySettings::Get().GetResolutionInfo(g_graphicsContext.GetVideoResolution()).iWidth;
  float fMaxScreenHeight = (float)CDisplaySettings::Get().GetResolutionInfo(g_graphicsContext.GetVideoResolution()).iHeight;
  if (fOffsetX1 < 0) fOffsetX1 = 0;
  if (fOffsetY1 < 0) fOffsetY1 = 0;
  if (fScreenWidth + fOffsetX1 > fMaxScreenWidth) fScreenWidth = fMaxScreenWidth - fOffsetX1;
  if (fScreenHeight + fOffsetY1 > fMaxScreenHeight) fScreenHeight = fMaxScreenHeight - fOffsetY1;

  // Correct for HDTV_1080i -> 540p
  if (GetResolution() == HDTV_1080i)
  {
    fOffsetY1 /= 2;
    fScreenHeight /= 2;
    fPixelRatio *= 2;
  }

  // source rect
  rs.left = CMediaSettings::Get().GetCurrentVideoSettings().m_CropLeft;
  rs.top = CMediaSettings::Get().GetCurrentVideoSettings().m_CropTop;
  rs.right = m_iSourceWidth - CMediaSettings::Get().GetCurrentVideoSettings().m_CropRight;
  rs.bottom = m_iSourceHeight - CMediaSettings::Get().GetCurrentVideoSettings().m_CropBottom;

  CalcNormalDisplayRect(fOffsetX1, fOffsetY1, fScreenWidth, fScreenHeight, GetAspectRatio() * fPixelRatio, CDisplaySettings::Get().GetZoomAmount());

  // check whether we need to alter our source rect
  if (rd.left < fOffsetX1 || rd.right > fOffsetX1 + fScreenWidth)
  {
    // wants to be wider than we allow, so fix
    float fRequiredWidth = (float)rd.right - rd.left;
    if (rs.right <= rs.left) rs.right = rs.left+1;
    float fHorizScale = fRequiredWidth / (float)(rs.right - rs.left);
    float fNewWidth = fScreenWidth / fHorizScale;
    rs.left = (rs.right - rs.left - (int)fNewWidth) / 2;
    rs.right = rs.left + (int)fNewWidth;
    rd.left = (int)fOffsetX1;
    rd.right = (int)(fOffsetX1 + fScreenWidth);
  }
  if (rd.top < fOffsetY1 || rd.bottom > fOffsetY1 + fScreenHeight)
  {
    // wants to be wider than we allow, so fix
    float fRequiredHeight = (float)rd.bottom - rd.top;
    if (rs.bottom <= rs.top) rs.bottom = rs.top+1;
    float fVertScale = fRequiredHeight / (float)(rs.bottom - rs.top);
    float fNewHeight = fScreenHeight / fVertScale;
    rs.top = (rs.bottom - rs.top - (int)fNewHeight) / 2;
    rs.bottom = rs.top + (int)fNewHeight;
    rd.top = (int)fOffsetY1;
    rd.bottom = (int)(fOffsetY1 + fScreenHeight);
  }
}
XnStatus XnSensorImageStream::ValidateMode()
{
	XnStatus nRetVal = XN_STATUS_OK;
	
	// validity checks
	XnIOImageFormats nInputFormat = (XnIOImageFormats)m_InputFormat.GetValue();
	XnOutputFormats nOutputFormat = GetOutputFormat();
	XnResolutions nResolution = GetResolution();
	XnUInt32 nFPS = GetFPS();

	// check that input format matches output format
	switch (nOutputFormat)
	{
	case XN_OUTPUT_FORMAT_RGB24:
		if (nInputFormat != XN_IO_IMAGE_FORMAT_YUV422 &&
			nInputFormat != XN_IO_IMAGE_FORMAT_UNCOMPRESSED_YUV422 &&
			nInputFormat != XN_IO_IMAGE_FORMAT_BAYER)
		{
			// --avin mod--
			//XN_LOG_WARNING_RETURN(XN_STATUS_DEVICE_BAD_PARAM, XN_MASK_DEVICE_SENSOR, "Input format %d cannot be converted to RGB24!", nInputFormat);
		}
		break;
	case XN_OUTPUT_FORMAT_YUV422:
		if (nInputFormat != XN_IO_IMAGE_FORMAT_YUV422 &&
			nInputFormat != XN_IO_IMAGE_FORMAT_UNCOMPRESSED_YUV422)
		{
			XN_LOG_WARNING_RETURN(XN_STATUS_DEVICE_BAD_PARAM, XN_MASK_DEVICE_SENSOR, "Input format %d cannot be converted to YUV422!", nInputFormat);
		}
		break;
	case XN_OUTPUT_FORMAT_JPEG:
		if (nInputFormat != XN_IO_IMAGE_FORMAT_JPEG)
		{
			XN_LOG_WARNING_RETURN(XN_STATUS_DEVICE_BAD_PARAM, XN_MASK_DEVICE_SENSOR, "Input format %d cannot be converted to JPEG!", nInputFormat);
		}
		break;
	case XN_OUTPUT_FORMAT_GRAYSCALE8:
		if (nInputFormat != XN_IO_IMAGE_FORMAT_UNCOMPRESSED_GRAY8 &&
			nInputFormat != XN_IO_IMAGE_FORMAT_UNCOMPRESSED_BAYER &&
			nInputFormat != XN_IO_IMAGE_FORMAT_BAYER)
		{
			XN_LOG_WARNING_RETURN(XN_STATUS_DEVICE_BAD_PARAM, XN_MASK_DEVICE_SENSOR, "Input format %d cannot be converted to Gray8!", nInputFormat);
		}
		break;
	default:
		// we shouldn't have reached here. Theres a check at SetOutputFormat.
		XN_ASSERT(FALSE);
		XN_LOG_WARNING_RETURN(XN_STATUS_DEVICE_BAD_PARAM, XN_MASK_DEVICE_SENSOR, "Unsupported image output format: %d!", nOutputFormat);
	}

	// now check that mode exists
	XnCmosPreset preset = { (XnUInt16)nInputFormat, (XnUInt16)nResolution, (XnUInt16)nFPS };
	nRetVal = ValidateSupportedMode(preset);
	XN_IS_STATUS_OK(nRetVal);

	return (XN_STATUS_OK);
}
Beispiel #14
0
        inline std::pair<double, bool> EstimateDistance(const Eigen::Vector3d& location) const
        {
            const std::vector<int64_t> indices = LocationToGridIndex(location);
            if (indices.size() == 3)
            {
                const Eigen::Vector3d gradient = EigenHelpers::StdVectorDoubleToEigenVector3d(GetGradient(indices[0], indices[1], indices[2], true));
                const std::vector<double> cell_location = GridIndexToLocation(indices[0], indices[1], indices[2]);
                const Eigen::Vector3d cell_location_to_our_location(location.x() - cell_location[0], location.y() - cell_location[1], location.z() - cell_location[2]);
                const double nominal_distance = (double)distance_field_.GetImmutable(indices[0], indices[1], indices[2]).first;
                const double corrected_nominal_distance = (nominal_distance >= 0.0) ? nominal_distance - (GetResolution() * 0.5) : nominal_distance + (GetResolution() * 0.5);
                const double cell_location_to_our_location_dot_gradient = cell_location_to_our_location.dot(gradient);
                //const double gradient_dot_gradient = gradient.dot(gradient); // == squared norm of gradient
                //const Eigen::Vector3d cell_location_to_our_location_projected_on_gradient = (cell_location_to_our_location_dot_gradient / gradient.dot(gradient)) * gradient;
                //const double distance_adjustment = cell_location_to_our_location_projected_on_gradient.norm();
                const double distance_adjustment = cell_location_to_our_location_dot_gradient / gradient.norm();
                const double distance_estimate = corrected_nominal_distance + distance_adjustment;
                if ((corrected_nominal_distance >= 0.0) == (distance_estimate >= 0.0))
                {
                    return std::make_pair(distance_estimate, true);
                }
                else if (corrected_nominal_distance >= 0.0)
                {
                    const double fudge_distance = GetResolution() * 0.0625;
                    return std::make_pair(fudge_distance, true);
                }
                else
                {
                    const double fudge_distance = GetResolution() * -0.0625;
                    return std::make_pair(fudge_distance, true);
                }
//                else
//                {
//                    const double real_distance_adjustment = GetResolution() * 0.20710678118654757;
//                    const double revised_corrected_nominal_distance = (nominal_distance >= 0.0) ? nominal_distance - real_distance_adjustment : nominal_distance + real_distance_adjustment;
//                    const double revised_distance_estimate = revised_corrected_nominal_distance + distance_adjustment;
//                    return std::make_pair(revised_distance_estimate, true);
//                }
            }
            else
            {
                return std::make_pair((double)distance_field_.GetOOBValue(), false);
            }
        }
Beispiel #15
0
uint32 MultimediaClock::SetResolution(uint32 ms)
{
	if(m_CurrentPeriod == ms)
	{
		return m_CurrentPeriod;
	}
	Cleanup();
	SetPeriod(ms);
	return GetResolution();
}
Beispiel #16
0
XnStatus XnSensorIRStream::CalcRequiredSize(XnUInt32* pnRequiredSize) const
{
	// in IR, in all resolutions except SXGA, we get additional 8 lines
	XnUInt32 nYRes = GetYRes();
	if (GetResolution() != XN_RESOLUTION_SXGA)
	{
		nYRes += 8;
	}

	*pnRequiredSize = GetXRes() * nYRes * GetBytesPerPixel();
	return XN_STATUS_OK;
}
Beispiel #17
0
 inline bool GradientIsEffectiveFlat(const Eigen::Vector3d& gradient) const
 {
     // A gradient is at a local maxima if the absolute value of all components (x,y,z) are less than 1/2 SDF resolution
     double half_resolution = GetResolution() * 0.5;
     if (fabs(gradient.x()) <= half_resolution && fabs(gradient.y()) <= half_resolution && fabs(gradient.z()) <= half_resolution)
     {
         return true;
     }
     else
     {
         return false;
     }
 }
Beispiel #18
0
void CXBMCRenderManager::UpdateResolution()
{
  if (m_bReconfigured)
  {
    CRetakeLock<CExclusiveLock> lock(m_sharedSection);
    if (g_graphicsContext.IsFullScreenVideo() && g_graphicsContext.IsFullScreenRoot())
    {
      RESOLUTION res = GetResolution();
      g_graphicsContext.SetVideoResolution(res);
    }
    m_bReconfigured = false;
  }
}
Beispiel #19
0
XnStatus XnSensorDepthStream::DecideFirmwareRegistration(XnBool bRegistration, XnProcessingType registrationType, XnResolutions nRes)
{
	XnStatus nRetVal = XN_STATUS_OK;
	
	// start with request
	XnBool bFirmwareRegistration = bRegistration;

	if (bFirmwareRegistration)
	{
		// old chip (PS1000) does not support registration for VGA
		XnBool bHardwareRegistrationSupported = 
			m_Helper.GetPrivateData()->ChipInfo.nChipVer != XN_SENSOR_CHIP_VER_PS1000 || nRes == XN_RESOLUTION_QVGA;

		switch (registrationType)
		{
		case XN_PROCESSING_HARDWARE:
			if (!bHardwareRegistrationSupported)
			{
				XN_LOG_WARNING_RETURN(XN_STATUS_DEVICE_BAD_PARAM, XN_MASK_DEVICE_SENSOR, "Sensor does not support hardware registration for current configuration!");
			}
			break;
		case XN_PROCESSING_SOFTWARE:
			if (GetResolution() != XN_RESOLUTION_VGA)
			{
				XN_LOG_WARNING_RETURN(XN_STATUS_DEVICE_BAD_PARAM, XN_MASK_DEVICE_SENSOR, "Software registration is only supported for VGA resolution!");
			}
			bFirmwareRegistration = FALSE;
			break;
		case XN_PROCESSING_DONT_CARE:
			bFirmwareRegistration = bHardwareRegistrationSupported;
			break;
		default:
			XN_LOG_ERROR_RETURN(XN_STATUS_DEVICE_BAD_PARAM, XN_MASK_DEVICE_SENSOR, "Unknown registration type: %d", registrationType);
		}
	}

	if (bRegistration && !bFirmwareRegistration)
	{
		// make sure software registration is initialized
		if (!m_Registration.IsInitialized())
		{
			nRetVal = m_Registration.Init(m_Helper.GetPrivateData(), this, GetDepthToShiftTable());
			XN_IS_STATUS_OK(nRetVal);
		}
	}

	nRetVal = m_Helper.SimpleSetFirmwareParam(m_FirmwareRegistration, (XnUInt16)bFirmwareRegistration);
	XN_IS_STATUS_OK(nRetVal);

	return (XN_STATUS_OK);
}
Beispiel #20
0
float Tile::GetMaxHeight() const {
  static float max = std::numeric_limits<float>::min();
  if (max == std::numeric_limits<float>::min()) {
    const int res = GetResolution();
    for (int i = 1; i < res; ++i) {
      max = std::max(max, vertices_[i].y);
    }
    if (num_lod_ > 0) {
      for (int dir = 0; dir < 4; ++dir) {
        max = std::max(max, children_[dir]->GetMaxHeight());
      }
    }
  }
  return max;
}
XnStatus XnSensorDepthStream::ConfigureStreamImpl()
{
	XnStatus nRetVal = XN_STATUS_OK;

	xnUSBShutdownReadThread(GetHelper()->GetPrivateData()->pSpecificDepthUsb->pUsbConnection->UsbEp);

	nRetVal = SetActualRead(TRUE);
	XN_IS_STATUS_OK(nRetVal);

	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_InputFormat));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(ResolutionProperty()));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(FPSProperty()));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_HoleFilter));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_Gain));

	// we need to turn decimation on when resolution is QVGA, and FPS is different than 60
	// NOTE: this is ugly as hell. This logic should be moved to firmware.
	XnBool bDecimation = (GetResolution() == XN_RESOLUTION_QVGA && GetFPS() != 60);
	nRetVal = GetFirmwareParams()->m_DepthDecimation.SetValue(bDecimation);
	XN_IS_STATUS_OK(nRetVal);

	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_FirmwareRegistration));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_FirmwareMirror));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_GMCMode));
	XN_IS_STATUS_OK(m_Helper.ConfigureFirmware(m_WhiteBalance));

	nRetVal = m_Helper.GetCmosInfo()->SetCmosConfig(XN_CMOS_TYPE_DEPTH, GetResolution(), GetFPS());
	XN_IS_STATUS_OK(nRetVal);

	// --avin mod--
	//Turn off the IR projector anti-cover thingy. I find it annoying and It's off on the XBox360 so it must be safe :-)
	//This is probably not the best way to do it, but adding it as a real param is too much work for me at the moment...
	XnHostProtocolSetParam(GetHelper()->GetPrivateData(), 0x105, 0);
	
	return XN_STATUS_OK;
}
Beispiel #22
0
XnStatus XnSensorDepthStream::SetRegistrationType(XnProcessingType type)
{
	XnStatus nRetVal = XN_STATUS_OK;

	if (type != m_RegistrationType.GetValue())
	{
		nRetVal = DecideFirmwareRegistration((XnBool)m_DepthRegistration.GetValue(), type, GetResolution());
		XN_IS_STATUS_OK(nRetVal);

		nRetVal = m_RegistrationType.UnsafeUpdateValue(type);
		XN_IS_STATUS_OK(nRetVal);
	}

	return (XN_STATUS_OK);
}
Beispiel #23
0
    void
    ImageLoader::removeUnusedAlpha() {
        if (!_isI60 && GetBitsPerPixel() == 32 && !HasAlpha()) {
            // Compress unused fourth channel
            AC_DEBUG << "ImageLoader removing unused fourth channel";

            unsigned myHeight = GetHeight();
            unsigned myWidth  = GetWidth();
            //unsigned myLineStride = GetBytesPerLine();

            asl::Ptr<Block> myDestinationBlock = asl::Ptr<Block>(new Block());
            myDestinationBlock->resize(myHeight * myWidth * 3);

            unsigned char ** myLineArray   = GetLineArray();
            unsigned char *  myDestination = myDestinationBlock->begin();

            for (unsigned i = 0; i < myHeight; ++i) {
                unsigned char * mySource = *myLineArray;
                for (unsigned j = 0; j < myWidth; ++j) {
                    myDestination[0]  = mySource[0];
                    myDestination[1]  = mySource[1];
                    myDestination[2]  = mySource[2];

                    mySource      += 4;
                    myDestination += 3;
                }
                myLineArray++;
            }

            // Update internal representation
            switch (_myEncoding) {
                case RGBA :
                    _myEncoding = RGB;
                    break;
                case BGRA :
                    _myEncoding = BGR;
                    break;
                default:
                    throw ImageLoaderException("Unsupported pixel encoding", PLUS_FILE_LINE);
            }
            _myData = myDestinationBlock;
            PLPixelFormat myPixelFormat;
            mapPixelEncodingToFormat(_myEncoding, myPixelFormat);
            SetBmpInfo(GetSize(), GetResolution(), myPixelFormat);
            updateLineArray();
        }
    }
XnStatus XnSensorImageStream::ValidateMode()
{
	XnStatus nRetVal = XN_STATUS_OK;
	
	// validity checks
	XnIOImageFormats nInputFormat = (XnIOImageFormats)m_InputFormat.GetValue();
	XnOutputFormats nOutputFormat = GetOutputFormat();
	XnResolutions nResolution = GetResolution();

	// Avin: Removed to enable 1280x1024 Image
	// check resolution
	/*
	if ((nResolution == XN_RESOLUTION_UXGA || nResolution == XN_RESOLUTION_SXGA) && nInputFormat != XN_IO_IMAGE_FORMAT_BAYER)
	{
		XN_LOG_WARNING_RETURN(XN_STATUS_DEVICE_BAD_PARAM, XN_MASK_DEVICE_SENSOR, "UXGA resolution is only supported with BAYER input!");
	}
	*/

	// check output format
	if (nOutputFormat == XN_OUTPUT_FORMAT_GRAYSCALE8 && nInputFormat != XN_IO_IMAGE_FORMAT_BAYER)
	{
		XN_LOG_WARNING_RETURN(XN_STATUS_DEVICE_BAD_PARAM, XN_MASK_DEVICE_SENSOR, "Grayscale8 output requires BAYER input!");
	}
	else if (nOutputFormat == XN_OUTPUT_FORMAT_YUV422 && nInputFormat != XN_IO_IMAGE_FORMAT_YUV422 && nInputFormat != XN_IO_IMAGE_FORMAT_UNCOMPRESSED_YUV422)
	{
		XN_LOG_WARNING_RETURN(XN_STATUS_DEVICE_BAD_PARAM, XN_MASK_DEVICE_SENSOR, "YUV output requires YUV input!");
	}

	// check input format
	if (nInputFormat == XN_IO_IMAGE_FORMAT_BAYER && nResolution != XN_RESOLUTION_UXGA && nResolution != XN_RESOLUTION_SXGA)
	{
		XN_LOG_WARNING_RETURN(XN_STATUS_DEVICE_BAD_PARAM, XN_MASK_DEVICE_SENSOR, "BAYER input requires SXGA/UXGA resolution!");
	}
	else if (nInputFormat == XN_IO_IMAGE_FORMAT_JPEG && nOutputFormat != XN_OUTPUT_FORMAT_RGB24)
	{
		XN_LOG_WARNING_RETURN(XN_STATUS_DEVICE_BAD_PARAM, XN_MASK_DEVICE_SENSOR, "Jpeg input is only supported for RGB24 output!");
	}

	return (XN_STATUS_OK);
}
Beispiel #25
0
    void
    ImageLoader::loadI60File(asl::Ptr<ReadableBlockHandle> theImageBlock) {
        _myImageMatrix.makeIdentity();
        I60Header myHeader;
        theImageBlock->getBlock().readData(myHeader, 0);

        if (!myHeader.checkMagicNumber()) {
            throw ImageLoaderException(string("Image ") + _myFilename +
                " has a wrong magic number. '" + I60_MAGIC_NUMBER + "' expeced.", PLUS_FILE_LINE);
        }
        if (!myHeader.checkVersion()) {
            throw ImageLoaderException(string("Image ") + _myFilename + " file format version: " +
                as_string(myHeader.version) + " does not match current reader version: " +
                as_string(CURRENT_IMAGE_FORMAT_VERSION), PLUS_FILE_LINE);
        }

        _myHeaderSize = myHeader.headersize;
        _myEncoding   = PixelEncoding(myHeader.encoding);
        _isI60        = true;

        unsigned myWidthPowerOfTwo  = asl::nextPowerOfTwo(myHeader.width);
        unsigned myHeightPowerOfTwo = asl::nextPowerOfTwo(myHeader.height);

        _myImageMatrix.scale(Vector3f(float(myHeader.width) / myWidthPowerOfTwo,
                                        float(myHeader.height) / myHeightPowerOfTwo,
                                        1.0f));
        // maybe we should cut off the i60 header here?
        unsigned myBlockSize = theImageBlock->getBlock().size();
        _myData = asl::Ptr<Block>(new Block());
        _myData->resize(myBlockSize - sizeof(I60Header));
        std::copy(theImageBlock->getBlock().begin()+sizeof(I60Header),
                  theImageBlock->getBlock().end(), _myData->begin());

        // TODO: Add support for other compression formats
        SetBmpInfo(PLPoint(myWidthPowerOfTwo, myHeightPowerOfTwo * myHeader.layercount),
            GetResolution(), PLPixelFormat::L1 /* only correct for DXT5 */);
    }
Beispiel #26
0
XnStatus XnSensorDepthStream::Init()
{
	XnStatus nRetVal = XN_STATUS_OK;

	nRetVal = SetBufferPool(&m_BufferPool);
	XN_IS_STATUS_OK(nRetVal);

	// init base
	nRetVal = XnDepthStream::Init();
	XN_IS_STATUS_OK(nRetVal);

	m_InputFormat.UpdateSetCallback(SetInputFormatCallback, this);
	m_DepthRegistration.UpdateSetCallback(SetRegistrationCallback, this);
	m_HoleFilter.UpdateSetCallback(SetHoleFilterCallback, this);
	m_WhiteBalance.UpdateSetCallback(SetWhiteBalanceCallback, this);
	m_Gain.UpdateSetCallback(SetGainCallback, this);
	m_RegistrationType.UpdateSetCallback(SetRegistrationTypeCallback, this);
	m_AGCBin.UpdateSetCallback(SetAGCBinCallback, this);
	m_AGCBin.UpdateGetCallback(GetAGCBinCallback, this);
	m_GMCMode.UpdateSetCallback(SetGMCModeCallback, this);


	XN_VALIDATE_ADD_PROPERTIES(this, &m_InputFormat, &m_DepthRegistration, &m_HoleFilter, 
		&m_WhiteBalance, &m_Gain, &m_AGCBin, &m_SharedBufferName, &m_ActualRead, &m_GMCMode, 
		&m_RegistrationType);


	if (m_Helper.GetPrivateData()->pSensor->IsLowBandwidth())
	{
		nRetVal = m_InputFormat.UnsafeUpdateValue(XN_IO_DEPTH_FORMAT_COMPRESSED_PS);
		XN_IS_STATUS_OK(nRetVal);
	}

	// set base properties default values
	nRetVal = ResolutionProperty().UnsafeUpdateValue(XN_DEPTH_STREAM_DEFAULT_RESOLUTION);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = FPSProperty().UnsafeUpdateValue(XN_DEPTH_STREAM_DEFAULT_FPS);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = OutputFormatProperty().UnsafeUpdateValue(XN_DEPTH_STREAM_DEFAULT_OUTPUT_FORMAT);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = ParamCoefficientProperty().UnsafeUpdateValue(XN_SHIFTS_PARAM_COEFFICIENT);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = ShiftScaleProperty().UnsafeUpdateValue(XN_SHIFTS_SHIFT_SCALE);
	XN_IS_STATUS_OK(nRetVal);

	// read some data from firmware
	XnDepthInformation DepthInformation;
	nRetVal = XnHostProtocolAlgorithmParams(m_Helper.GetPrivateData(), XN_HOST_PROTOCOL_ALGORITHM_DEPTH_INFO, &DepthInformation, sizeof(XnDepthInformation), XN_RESOLUTION_VGA, 30);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = ConstShiftProperty().UnsafeUpdateValue(DepthInformation.nConstShift);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = ZeroPlaneDistanceProperty().UnsafeUpdateValue(m_Helper.GetFixedParams()->GetZeroPlaneDistance());
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = ZeroPlanePixelSizeProperty().UnsafeUpdateValue(m_Helper.GetFixedParams()->GetZeroPlanePixelSize());
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = EmitterDCmosDistanceProperty().UnsafeUpdateValue(m_Helper.GetFixedParams()->GetEmitterDCmosDistance());
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = GetDCmosRCmosDistanceProperty().UnsafeUpdateValue(m_Helper.GetFixedParams()->GetDCmosRCmosDistance());
	XN_IS_STATUS_OK(nRetVal);

	// init helper
	nRetVal = m_Helper.Init(this, this);
	XN_IS_STATUS_OK(nRetVal);

	if (m_Helper.GetFirmwareVersion() < XN_SENSOR_FW_VER_3_0)
	{
		nRetVal = m_GMCMode.UnsafeUpdateValue(FALSE);
		XN_IS_STATUS_OK(nRetVal);
	}


	if (m_Helper.GetFirmwareVersion() < XN_SENSOR_FW_VER_4_0)
	{
		nRetVal = m_WhiteBalance.UnsafeUpdateValue(FALSE);
		XN_IS_STATUS_OK(nRetVal);
	}

	// on old firmwares, the host decides on the default gain. On new firmwares, we read it from firmware
	if (m_Helper.GetFirmware()->GetInfo()->nFWVer > XN_SENSOR_FW_VER_1_2)
	{
		nRetVal = m_Gain.UnsafeUpdateValue(GetFirmwareParams()->m_DepthGain.GetValue());
		XN_IS_STATUS_OK(nRetVal);
	}

	// registration
	XnCallbackHandle hCallbackDummy;	
	nRetVal = ResolutionProperty().OnChangeEvent().Register(DecideFirmwareRegistrationCallback, this, &hCallbackDummy);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = DecideFirmwareRegistration((XnBool)m_DepthRegistration.GetValue(), (XnProcessingType)m_RegistrationType.GetValue(), GetResolution());
	XN_IS_STATUS_OK(nRetVal);

	// data processor
	nRetVal = m_Helper.RegisterDataProcessorProperty(m_InputFormat);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = m_Helper.RegisterDataProcessorProperty(ResolutionProperty());
	XN_IS_STATUS_OK(nRetVal);

	// pixel size factor
	nRetVal = GetFirmwareParams()->m_ReferenceResolution.OnChangeEvent().Register(DecidePixelSizeFactorCallback, this, &m_hReferenceSizeChangedCallback);
	XN_IS_STATUS_OK(nRetVal);

	nRetVal = DecidePixelSizeFactor();
	XN_IS_STATUS_OK(nRetVal);


	// register supported modes
	XnCmosPreset aSupportedModes[] = 
	{
		{ XN_IO_DEPTH_FORMAT_COMPRESSED_PS, XN_RESOLUTION_QVGA, 30 },
		{ XN_IO_DEPTH_FORMAT_COMPRESSED_PS, XN_RESOLUTION_QVGA, 60 },
		{ XN_IO_DEPTH_FORMAT_COMPRESSED_PS, XN_RESOLUTION_VGA, 30 },
		{ XN_IO_DEPTH_FORMAT_UNCOMPRESSED_11_BIT, XN_RESOLUTION_QVGA, 30 },
		{ XN_IO_DEPTH_FORMAT_UNCOMPRESSED_11_BIT, XN_RESOLUTION_QVGA, 60 },
		{ XN_IO_DEPTH_FORMAT_UNCOMPRESSED_11_BIT, XN_RESOLUTION_VGA, 30 },
		{ XN_IO_DEPTH_FORMAT_UNCOMPRESSED_16_BIT, XN_RESOLUTION_QVGA, 30 },
		{ XN_IO_DEPTH_FORMAT_UNCOMPRESSED_16_BIT, XN_RESOLUTION_QVGA, 60 },
		{ XN_IO_DEPTH_FORMAT_UNCOMPRESSED_16_BIT, XN_RESOLUTION_VGA, 30 },
	};
	nRetVal = AddSupportedModes(aSupportedModes, sizeof(aSupportedModes)/sizeof(aSupportedModes[0]));
	XN_IS_STATUS_OK(nRetVal);

	if (m_Helper.GetFirmwareVersion() >= XN_SENSOR_FW_VER_5_2)
	{
		XnCmosPreset aSupportedModes25[] = 
		{
			{ XN_IO_DEPTH_FORMAT_COMPRESSED_PS, XN_RESOLUTION_QVGA, 25 },
			{ XN_IO_DEPTH_FORMAT_COMPRESSED_PS, XN_RESOLUTION_VGA, 25 },
			{ XN_IO_DEPTH_FORMAT_UNCOMPRESSED_11_BIT, XN_RESOLUTION_QVGA, 25 },
			{ XN_IO_DEPTH_FORMAT_UNCOMPRESSED_11_BIT, XN_RESOLUTION_VGA, 25 },
			{ XN_IO_DEPTH_FORMAT_UNCOMPRESSED_16_BIT, XN_RESOLUTION_QVGA, 25 },
			{ XN_IO_DEPTH_FORMAT_UNCOMPRESSED_16_BIT, XN_RESOLUTION_VGA, 25 },
		};
		nRetVal = AddSupportedModes(aSupportedModes25, sizeof(aSupportedModes25)/sizeof(aSupportedModes25[0]));
		XN_IS_STATUS_OK(nRetVal);
	}

	return (XN_STATUS_OK);
}
//--------------------------------------------------------------------------
void VeRenderWindowD3D12::SetupCompositorList(const VeChar8** ppcList,
	VeSizeT stNum, VeUInt32 u32ThreadNum, const VeChar8* pcHint) noexcept
{
	VeRendererD3D12& kRenderer = *VeMemberCast(
		&VeRendererD3D12::m_kRenderWindowList, m_kNode.get_list());

	VeVector<VeRenderer::FrameTechnique*> kTechList;
	{		
		for (VeSizeT i(0); i < stNum; ++i)
		{
			auto it = kRenderer.m_kCompositorMap.find(ppcList[i]);
			if (it != kRenderer.m_kCompositorMap.end())
			{
				VeRenderer::FrameTechnique* pkTech = nullptr;
				if (pcHint)
				{
					VeFixedString kHint = pcHint;
					for (auto& tech : it->second->m_kTechniqueList)
					{
						if (tech.m_kName == kHint)
						{
							pkTech = &tech;
							break;
						}
					}
				}
				if (pkTech)
				{
					kTechList.push_back(pkTech);
				}
				else if(it->second->m_kTechniqueList.size())
				{
					kTechList.push_back(&(it->second->m_kTechniqueList.front()));
				}
			}
		}
	}

	{
		m_kResourceMap.clear();
		VeStringMap<VeFloat64> kValueMap;
		kValueMap["screen_w"] = (VeFloat64)GetWidth();
		kValueMap["screen_h"] = (VeFloat64)GetHeight();

		for (auto pTech : kTechList)
		{
			for (auto& res : pTech->m_kResourceList)
			{
				auto it = m_kResourceMap.find(res.m_kName);
				if (it == m_kResourceMap.end())
				{
					VeUInt32 w = (VeUInt32)ve_parser.CalculateExpression(
						kValueMap, res.m_kWidth, 0);
					VeUInt32 h = (VeUInt32)ve_parser.CalculateExpression(
						kValueMap, res.m_kHeight, 0);
					VeUInt16 d = (VeUInt16)ve_parser.CalculateExpression(
						kValueMap, res.m_kDepth, 0);
					VeUInt16 m = (VeUInt16)ve_parser.CalculateExpression(
						kValueMap, res.m_kMipLevels, 0);
					VeUInt16 c = (VeUInt16)ve_parser.CalculateExpression(
						kValueMap, res.m_kCount, 0);
					VeUInt16 q = (VeUInt16)ve_parser.CalculateExpression(
						kValueMap, res.m_kQuality, 0);
					VeUInt32 u32Use(0);
					if (res.m_kSRVList.size()) u32Use |= VeRenderTexture::USEAGE_SRV;
					if (res.m_kRTVList.size()) u32Use |= VeRenderTexture::USEAGE_RTV;
					if (res.m_kDSVList.size()) u32Use |= VeRenderTexture::USEAGE_DSV;

					VeRenderTextureD3D12* pkTexture = VE_NEW VeRenderTextureD3D12(
						res.m_eDimension, (VeRenderTexture::Useage)u32Use,
						res.m_eFormat, w, h, d, m, c, q);
					pkTexture->Init(kRenderer);
					if (res.m_kSRVList.size())
					{
						pkTexture->SetSRVNum((VeUInt32)res.m_kSRVList.size());
						for (VeUInt32 i(0); i < res.m_kSRVList.size(); ++i)
						{
							auto& srv = res.m_kSRVList[i];
							pkTexture->SetSRV(i, srv.m_eType, srv.m_eFormat,
								srv.m_u32Param0, srv.m_u32Param1);
						}
					}
					if (res.m_kRTVList.size())
					{
						pkTexture->SetRTVNum((VeUInt32)res.m_kRTVList.size());
						for (VeUInt32 i(0); i < res.m_kRTVList.size(); ++i)
						{
							auto& rtv = res.m_kRTVList[i];
							pkTexture->SetRTV(i, rtv.m_eType, rtv.m_eFormat,
								rtv.m_u32Param0, rtv.m_u32Param1);
						}
					}
					if (res.m_kDSVList.size())
					{
						pkTexture->SetDSVNum((VeUInt32)res.m_kDSVList.size());
						for (VeUInt32 i(0); i < res.m_kDSVList.size(); ++i)
						{
							auto& dsv = res.m_kDSVList[i];
							pkTexture->SetDSV(i, dsv.m_eType, dsv.m_eFormat,
								dsv.m_u32Param0, dsv.m_u32Param1);
						}
					}
					m_kResourceMap[res.m_kName] = pkTexture;
				}
			}
		}
	}
	
	VeVector<VeStringMap<TargetCache>> kTargets;

	kTargets.resize(kTechList.size());
	for (VeSizeT i(kTechList.size() - 1); i < kTechList.size(); --i)
	{
		VeRenderer::FrameTechnique& kTech = *kTechList[i];
		for (VeSizeT j(0); j < kTech.m_kTargetList.size(); ++j)
		{
			VeRenderer::FrameTarget& kTarget = kTech.m_kTargetList[j];
			RecordRenderTargetPtr spTarget;
			VeVector<ViewData> kRTVList;
			ViewData kDSV;
			if (kTarget.m_eType == VeRenderer::TARGET_OUTPUT)
			{
				if (i == (kTechList.size() - 1))
				{
					spTarget = VE_NEW RecordRenderTarget();
					for (VeSizeT k(0); k < VeRendererD3D12::FRAME_COUNT; ++k)
					{
						spTarget->m_akRTVList[k].push_back(m_akFrameCache[k].m_hHandle);
						spTarget->m_ahDSV[k].ptr = 0;
					}
					kRTVList.resize(kRTVList.size() + 1);
					kRTVList.back().m_u32Width = GetWidth();
					kRTVList.back().m_u32Height = GetHeight();
					kRTVList.back().m_u32Depth = 1;
					kRTVList.back().m_u32SubResource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
				}
				else
				{
					auto it = kTargets[i + 1].find(kTarget.m_kName);
					if (it != kTargets[i + 1].end() && it->second.m_pkConfig->m_eType == VeRenderer::TARGET_INPUT)
					{
						spTarget = it->second.m_spRecorder;
					}
				}
			}
			else
			{
				spTarget = VE_NEW RecordRenderTarget();
				for (auto& rtv : kTarget.m_kRTVList)
				{
					auto it = m_kResourceMap.find(rtv.m_kResName);
					if (it != m_kResourceMap.end())
					{
						if (rtv.m_u32Index < it->second->m_kRTVList.size())
						{
							auto& kView = it->second->m_kRTVList[rtv.m_u32Index];
							for (VeSizeT k(0); k < VeRendererD3D12::FRAME_COUNT; ++k)
							{
								spTarget->m_akRTVList[k].push_back(kView.m_hCPUHandle);
							}
							kRTVList.resize(kRTVList.size() + 1);
							kRTVList.back().m_pkResource = it->second->m_pkResource;
							kRTVList.back().m_u32Width = kView.m_u32Width;
							kRTVList.back().m_u32Height = kView.m_u32Height;
							kRTVList.back().m_u32Depth = kView.m_u32Depth;
							kRTVList.back().m_u32SubResource = kView.m_u32SubResource;
						}
					}
				}
				auto it = m_kResourceMap.find(kTarget.m_kDSV.m_kResName);
				if (it != m_kResourceMap.end())
				{
					if (kTarget.m_kDSV.m_u32Index < it->second->m_kDSVList.size())
					{
						auto& kView = it->second->m_kDSVList[kTarget.m_kDSV.m_u32Index];
						for (VeSizeT k(0); k < VeRendererD3D12::FRAME_COUNT; ++k)
						{
							spTarget->m_ahDSV[k] = kView.m_hCPUHandle;
						}						
						kDSV.m_pkResource = it->second->m_pkResource;
						kDSV.m_u32Width = kView.m_u32Width;
						kDSV.m_u32Height = kView.m_u32Height;
						kDSV.m_u32Depth = kView.m_u32Depth;
						kDSV.m_u32SubResource = kView.m_u32SubResource;
					}
				}
			}

			if (spTarget)
			{
				auto& tar = kTargets[i][kTarget.m_kName];
				VE_ASSERT(!tar.m_pkConfig);
				tar.m_pkConfig = &kTarget;
				tar.m_spRecorder = spTarget;
				tar.m_kRTVList = std::move(kRTVList);
				tar.m_kDSV = kDSV;
			}
		}
	}

	RecordBarrierMap kBarriers;
	kBarriers.resize(kTechList.size());
	
	BarrierPathCache kResPathCache;
	VeMap<ID3D12Resource*, VeMap<VeUInt32, D3D12_RESOURCE_STATES>> kClickCache;

	for (VeSizeT i(0); i < kTechList.size(); ++i)
	{
		auto& kMap = kTargets[i];
		VeRenderer::FrameTechnique& kTech = *kTechList[i];
		kBarriers[i].resize(kTech.m_kClickList.size());
		for (VeSizeT j(0); j < kTech.m_kClickList.size(); ++j)
		{			
			VeRenderer::FrameClick& kClick = kTech.m_kClickList[j];
			auto it = kMap.find(kClick.m_kTarget);
			if (it == kMap.end()) continue;
			auto& tar = it->second;
			kClickCache.clear();
			for (auto rtv : tar.m_kRTVList)
			{
				kClickCache[rtv.m_pkResource][rtv.m_u32SubResource] = D3D12_RESOURCE_STATE_RENDER_TARGET;
			}
			if(tar.m_kDSV.m_pkResource)
			{
				kClickCache[tar.m_kDSV.m_pkResource][tar.m_kDSV.m_u32SubResource] = D3D12_RESOURCE_STATE_DEPTH_WRITE;
			}
			for (auto& con : kClick.m_kContextList)
			{
				auto itRes = m_kResourceMap.find(con);
				if (itRes != m_kResourceMap.end())
				{					
					auto& cache = kClickCache[itRes->second->m_pkResource];
					cache.clear();
					cache[D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES] = D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE;
				}
			}
			for (auto& cache : kClickCache)
			{
				for (auto& state : cache.second)
				{
					BarrierEvent(kBarriers, kResPathCache, i, j,
						cache.first, state.first, state.second);
				}
			}
		}
	}

	VeUInt32 u32GCLIndex(0);
	bool bScene = false;

	m_kRecorderList.resize(1);
	m_kRecorderList.back().m_u32CommandIndex = 0;
	m_kRecorderList.back().m_kTaskList.clear();

	m_kProcessList.resize(1);
	m_kProcessList.back().m_eType = TYPE_EXCUTE;
	m_kProcessList.back().m_u16Start = 0;
	m_kProcessList.back().m_u16Num = 1;

	m_kCameraList.clear();

	RectMap kRectMap;

	RecordRenderTargetPtr spCurrent;
	RecordViewportPtr spViewport;
	RecordScissorRectPtr spScissorRect;
	
	for (VeSizeT i(0); i < kTechList.size(); ++i)
	{
		VeRenderer::FrameTechnique& kTech = *kTechList[i];
		auto& kMap = kTargets[i];
		for (VeSizeT j(0); j < kTech.m_kClickList.size(); ++j)
		{
			VeRenderer::FrameClick& kClick = kTech.m_kClickList[j];
			auto it = kMap.find(kClick.m_kTarget);
			if(it == kMap.end()) continue;
			TargetCache& kCache = it->second;

			auto& bar = kBarriers[i][j];
			if(bar.first)
			{
				if (bScene)
				{
					if (m_kProcessList.back().m_eType == TYPE_EXCUTE)
					{
						m_kProcessList.back().m_u16Num += 1;
					}
					else
					{
						m_kProcessList.resize(m_kProcessList.size() + 1);
						m_kProcessList.back().m_eType = TYPE_EXCUTE;
						m_kProcessList.back().m_u16Start = u32GCLIndex + 1;
						m_kProcessList.back().m_u16Num = 1;
					}
					++u32GCLIndex;
					m_kRecorderList.resize(m_kRecorderList.size() + 1);
					m_kRecorderList.back().m_u32CommandIndex = u32GCLIndex;
					m_kRecorderList.back().m_kTaskList.clear();
					bScene = false;
				}
				m_kRecorderList.back().m_kTaskList.push_back(bar.first);
			}			
			for (VeSizeT k(0); k < kClick.m_kPassList.size(); ++k)
			{
				switch (kClick.m_kPassList[k]->m_eType)
				{
				case VeRenderer::PASS_CLEAR:
				{
					if (bScene)
					{
						if (m_kProcessList.back().m_eType == TYPE_EXCUTE)
						{
							m_kProcessList.back().m_u16Num += 1;
						}
						else
						{
							m_kProcessList.resize(m_kProcessList.size() + 1);
							m_kProcessList.back().m_eType = TYPE_EXCUTE;
							m_kProcessList.back().m_u16Start = u32GCLIndex + 1;
							m_kProcessList.back().m_u16Num = 1;
						}
						++u32GCLIndex;
						m_kRecorderList.resize(m_kRecorderList.size() + 1);
						m_kRecorderList.back().m_u32CommandIndex = u32GCLIndex;
						m_kRecorderList.back().m_kTaskList.clear();
						bScene = false;
					}
					VeRenderer::FrameClear& kClear = (VeRenderer::FrameClear&)*kClick.m_kPassList[k];
					if (VE_MASK_HAS_ANY(kClear.m_u32Flags, VeRenderer::CLEAR_COLOR)
						&& kClear.m_kColorArray.size())
					{
						VeSizeT stColorNum = kCache.m_spRecorder->m_akRTVList->size();
						for (VeSizeT l(0); l < stColorNum; ++l)
						{
							RecordClearRTV* pkTask = VE_NEW RecordClearRTV();
							for (VeSizeT m(0); m < VeRendererD3D12::FRAME_COUNT; ++m)
							{
								pkTask->m_ahHandle[m] = kCache.m_spRecorder->m_akRTVList[m][l];
							}
							if (l < kClear.m_kColorArray.size())
							{
								pkTask->m_kColor = kClear.m_kColorArray[l];
							}
							else
							{
								pkTask->m_kColor = kClear.m_kColorArray.back();
							}
							m_kRecorderList.back().m_kTaskList.push_back(pkTask);
						}
					}
					if (VE_MASK_HAS_ANY(kClear.m_u32Flags, VeRenderer::CLEAR_DEPTH)
						|| VE_MASK_HAS_ANY(kClear.m_u32Flags, VeRenderer::CLEAR_STENCIL))
					{
						if (kCache.m_spRecorder->m_ahDSV->ptr)
						{
							RecordClearDSV* pkTask = VE_NEW RecordClearDSV();
							pkTask->m_eFlags = VE_TMIN(D3D12_CLEAR_FLAGS);
							if (VE_MASK_HAS_ANY(kClear.m_u32Flags, VeRenderer::CLEAR_DEPTH))
							{
								pkTask->m_eFlags |= D3D12_CLEAR_FLAG_DEPTH;
							}
							if (VE_MASK_HAS_ANY(kClear.m_u32Flags, VeRenderer::CLEAR_STENCIL))
							{
								pkTask->m_eFlags |= D3D12_CLEAR_FLAG_STENCIL;
							}
							pkTask->m_f32Depth = kClear.m_f32Depth;
							pkTask->m_u8Stencil = kClear.m_u8Stencil;
							for (VeSizeT m(0); m < VeRendererD3D12::FRAME_COUNT; ++m)
							{
								pkTask->m_ahHandle[m] = kCache.m_spRecorder->m_ahDSV[m];
							}							
							m_kRecorderList.back().m_kTaskList.push_back(pkTask);
						}
					}
				}
				break;
				case VeRenderer::PASS_QUAD:
				{
					if (bScene)
					{
						if (m_kProcessList.back().m_eType == TYPE_EXCUTE)
						{
							m_kProcessList.back().m_u16Num += 1;
						}
						else
						{
							m_kProcessList.resize(m_kProcessList.size() + 1);
							m_kProcessList.back().m_eType = TYPE_EXCUTE;
							m_kProcessList.back().m_u16Start = u32GCLIndex + 1;
							m_kProcessList.back().m_u16Num = 1;
						}
						++u32GCLIndex;
						m_kRecorderList.resize(m_kRecorderList.size() + 1);
						m_kRecorderList.back().m_u32CommandIndex = u32GCLIndex;
						m_kRecorderList.back().m_kTaskList.clear();
						bScene = false;
					}
					if (spCurrent != kCache.m_spRecorder)
					{
						m_kRecorderList.back().m_kTaskList.push_back(kCache.m_spRecorder);
						spCurrent = kCache.m_spRecorder;
					}
					auto& rect = Get(kRectMap, GetResolution(kCache));
					if (spViewport != rect.first)
					{
						m_kRecorderList.back().m_kTaskList.push_back(rect.first);
						spViewport = rect.first;
					}
					if (spScissorRect != rect.second)
					{
						m_kRecorderList.back().m_kTaskList.push_back(rect.second);
						spScissorRect = rect.second;
					}
					VeRenderer::FrameQuad& kQuad = (VeRenderer::FrameQuad&)*kClick.m_kPassList[k];					
					ID3D12RootSignature* pkRootSignature = nullptr;
					ID3D12PipelineState* pkPipelineState = nullptr;
					auto itRoot = kRenderer.m_kRootSignatureMap.find(kQuad.m_kRootSignature);
					if(itRoot != kRenderer.m_kRootSignatureMap.end())
					{
						pkRootSignature = VeSmartPointerCast(VeRendererD3D12::RootSignatureD3D12,
							itRoot->second)->m_pkRootSignature;
					}
					auto itPSO = kRenderer.m_kPipelineStateMap.find(kQuad.m_kPipelineState);
					if (itPSO != kRenderer.m_kPipelineStateMap.end())
					{
						pkPipelineState = VeSmartPointerCast(VeRendererD3D12::PipelineStateD3D12,
							itPSO->second)->m_pkPipelineState;
					}
					if (pkRootSignature && pkPipelineState)
					{
						RecordRenderQuad* pkQuad = VE_NEW RecordRenderQuad();
						pkQuad->m_pkRootSignature = pkRootSignature;
						pkQuad->m_pkPipelineState = pkPipelineState;
						for (auto& itTab : kQuad.m_kTable)
						{
							bool bFinished = false;
							for (auto context : kClick.m_kContextList)
							{
								if (context == itTab.second)
								{
									auto itRTRes = m_kResourceMap.find(itTab.second);
									if (itRTRes != m_kResourceMap.end())
									{
										VeRenderTextureD3D12* pkTex = itRTRes->second;
										if (pkTex->m_kSRVList.size())
										{
											pkQuad->m_kTable.push_back(std::make_pair(itTab.first, pkTex->m_kSRVList.front().m_hGPUHandle));
											bFinished = true;
										}										
										break;
									}
								}
							}
							if(!bFinished)
							{
								auto itRes = kRenderer.m_kResourceMap.find(itTab.second);
								if (itRes != kRenderer.m_kResourceMap.end())
								{
									switch (itRes->second->GetDimension())
									{
									case VeRenderResource::DIMENSION_TEXTURE1D:
									case VeRenderResource::DIMENSION_TEXTURE2D:
									case VeRenderResource::DIMENSION_TEXTURE3D:
									{
										VeRenderTextureD3D12* pkTex = VeDynamicCast(VeRenderTextureD3D12, itRes->second.p());
										if (pkTex && pkTex->m_kSRVList.size())
										{
											pkQuad->m_kTable.push_back(std::make_pair(itTab.first, pkTex->m_kSRVList.front().m_hGPUHandle));
										}
									}
									break;
									default:
										break;
									}
								}
							}							
						}
						m_kRecorderList.back().m_kTaskList.push_back(pkQuad);
					}
				}
				break;
				case VeRenderer::PASS_SCENE:
				{
					VeRenderer::FrameScene& kScene = (VeRenderer::FrameScene&)*kClick.m_kPassList[k];
					if (!bScene)
					{
						spCurrent = nullptr;
						spViewport = nullptr;
						spScissorRect = nullptr;
						bScene = true;
					}
					m_kCameraList.resize(m_kCameraList.size() + 1);
					m_kCameraList.back().m_u32CameraMask = VE_MASK(kScene.m_u32Camera);
					m_kCameraList.back().m_kStageList.resize(kScene.m_u32Stage);
					for (auto& stage : m_kCameraList.back().m_kStageList)
					{
						stage = u32GCLIndex + 1;
						if (m_kProcessList.back().m_eType == TYPE_EXCUTE)
						{
							m_kProcessList.back().m_u16Num += u32ThreadNum;
						}
						else
						{
							m_kProcessList.resize(m_kProcessList.size() + 1);
							m_kProcessList.back().m_eType = TYPE_EXCUTE;
							m_kProcessList.back().m_u16Start = stage;
							m_kProcessList.back().m_u16Num = u32ThreadNum;
						}
						u32GCLIndex += u32ThreadNum;
					}		
				}
				break;
				default:
					break;
				}
			}		
			if (bar.second)
			{
				if (bScene)
				{
					if (m_kProcessList.back().m_eType == TYPE_EXCUTE)
					{
						m_kProcessList.back().m_u16Num += 1;
					}
					else
					{
						m_kProcessList.resize(m_kProcessList.size() + 1);
						m_kProcessList.back().m_eType = TYPE_EXCUTE;
						m_kProcessList.back().m_u16Start = u32GCLIndex + 1;
						m_kProcessList.back().m_u16Num = 1;
					}
					++u32GCLIndex;
					m_kRecorderList.resize(m_kRecorderList.size() + 1);
					m_kRecorderList.back().m_u32CommandIndex = u32GCLIndex;
					m_kRecorderList.back().m_kTaskList.clear();
					bScene = false;
				}
				m_kRecorderList.back().m_kTaskList.push_back(bar.second);
			}
		}
	}

	auto it = kResPathCache.find(nullptr);
	if (it != kResPathCache.end())
	{
		if (it->second.size())
		{
			D3D12_RESOURCE_STATES eResPresent = GetState(it->second.back(), D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES);
			RecordBarrier* pkBarrier = VE_NEW RecordBarrier();
			for (VeSizeT i(0); i < VeRendererD3D12::FRAME_COUNT; ++i)
			{
				pkBarrier->m_akBarrierList[i].resize(pkBarrier->m_akBarrierList[i].size() + 1);
				pkBarrier->m_akBarrierList[i].back().Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION;
				pkBarrier->m_akBarrierList[i].back().Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE;
				pkBarrier->m_akBarrierList[i].back().Transition.pResource = m_akFrameCache[i].m_pkBufferResource;
				pkBarrier->m_akBarrierList[i].back().Transition.Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES;
				pkBarrier->m_akBarrierList[i].back().Transition.StateBefore = eResPresent;
				pkBarrier->m_akBarrierList[i].back().Transition.StateAfter = D3D12_RESOURCE_STATE_PRESENT;
			}
			if (bScene)
			{
				if (m_kProcessList.back().m_eType == TYPE_EXCUTE)
				{
					m_kProcessList.back().m_u16Num += 1;
				}
				else
				{
					m_kProcessList.resize(m_kProcessList.size() + 1);
					m_kProcessList.back().m_eType = TYPE_EXCUTE;
					m_kProcessList.back().m_u16Start = u32GCLIndex + 1;
					m_kProcessList.back().m_u16Num = 1;
				}
				++u32GCLIndex;
				m_kRecorderList.resize(m_kRecorderList.size() + 1);
				m_kRecorderList.back().m_u32CommandIndex = u32GCLIndex;
				m_kRecorderList.back().m_kTaskList.clear();
				bScene = false;
			}
			m_kRecorderList.back().m_kTaskList.push_back(pkBarrier);
		}
	}

	m_u32ThreadNum = u32ThreadNum;
	ResizeDirectList(u32GCLIndex + 1);
}
Beispiel #28
0
 inline std::vector<double> GetGradient(const int64_t x_index, const int64_t y_index, const int64_t z_index, const bool enable_edge_gradients=false) const
 {
     // Make sure the index is inside bounds
     if ((x_index >= 0) && (y_index >= 0) && (z_index >= 0) && (x_index < GetNumXCells()) && (y_index < GetNumYCells()) && (z_index < GetNumZCells()))
     {
         // Make sure the index we're trying to query is one cell in from the edge
         if ((x_index > 0) && (y_index > 0) && (z_index > 0) && (x_index < (GetNumXCells() - 1)) && (y_index < (GetNumYCells() - 1)) && (z_index < (GetNumZCells() - 1)))
         {
             double inv_twice_resolution = 1.0 / (2.0 * GetResolution());
             double gx = (Get(x_index + 1, y_index, z_index) - Get(x_index - 1, y_index, z_index)) * inv_twice_resolution;
             double gy = (Get(x_index, y_index + 1, z_index) - Get(x_index, y_index - 1, z_index)) * inv_twice_resolution;
             double gz = (Get(x_index, y_index, z_index + 1) - Get(x_index, y_index, z_index - 1)) * inv_twice_resolution;
             return std::vector<double>{gx, gy, gz};
         }
         // If we're on the edge, handle it specially
         else if (enable_edge_gradients)
         {
             // Get the "best" indices we can use
             int64_t low_x_index = std::max((int64_t)0, x_index - 1);
             int64_t high_x_index = std::min(GetNumXCells() - 1, x_index + 1);
             int64_t low_y_index = std::max((int64_t)0, y_index - 1);
             int64_t high_y_index = std::min(GetNumYCells() - 1, y_index + 1);
             int64_t low_z_index = std::max((int64_t)0, z_index - 1);
             int64_t high_z_index = std::min(GetNumZCells() - 1, z_index + 1);
             // Compute the axis increments
             double x_increment = (high_x_index - low_x_index) * GetResolution();
             double y_increment = (high_y_index - low_y_index) * GetResolution();
             double z_increment = (high_z_index - low_z_index) * GetResolution();
             // Compute the gradients for each axis - by default these are zero
             double gx = 0.0;
             double gy = 0.0;
             double gz = 0.0;
             // Only if the increments are non-zero do we compute the gradient of an axis
             if (x_increment > 0.0)
             {
                 double inv_x_increment = 1.0 / x_increment;
                 double high_x_value = Get(high_x_index, y_index, z_index);
                 double low_x_value = Get(low_x_index, y_index, z_index);
                 // Compute the gradient
                 gx = (high_x_value - low_x_value) * inv_x_increment;
             }
             if (y_increment > 0.0)
             {
                 double inv_y_increment = 1.0 / y_increment;
                 double high_y_value = Get(x_index, high_y_index, z_index);
                 double low_y_value = Get(x_index, low_y_index, z_index);
                 // Compute the gradient
                 gy = (high_y_value - low_y_value) * inv_y_increment;
             }
             if (z_increment > 0.0)
             {
                 double inv_z_increment = 1.0 / z_increment;
                 double high_z_value = Get(x_index, y_index, high_z_index);
                 double low_z_value = Get(x_index, y_index, low_z_index);
                 // Compute the gradient
                 gz = (high_z_value - low_z_value) * inv_z_increment;
             }
             // Assemble and return the computed gradient
             return std::vector<double>{gx, gy, gz};
         }
         // Edge gradients disabled, return no gradient
         else
         {
             return std::vector<double>();
         }
     }
     // If we're out of bounds, return no gradient
     else
     {
         return std::vector<double>();
     }
 }
void CLinuxRenderer::DrawAlpha(int x0, int y0, int w, int h, unsigned char *src, unsigned char *srca, int stride)
{
  // OSD is drawn after draw_slice / put_image
  // this means that the buffer has already been handed off to the RGB converter
  // solution: have separate OSD textures

  // if it's down the bottom, use sub alpha blending
  //  m_SubsOnOSD = (y0 > (int)(rs.bottom - rs.top) * 4 / 5);

  //Sometimes happens when switching between fullscreen and small window
  if( w == 0 || h == 0 )
  {
    CLog::Log(LOGINFO, "Zero dimensions specified to DrawAlpha, skipping");
    return;
  }

  //use temporary rect for calculation to avoid messing with module-rect while other functions might be using it.
  DRAWRECT osdRect;
  RESOLUTION res = GetResolution();

  if (w > m_iOSDTextureWidth)
  {
    //delete osdtextures so they will be recreated with the correct width
    for (int i = 0; i < 2; ++i)
    {
      DeleteOSDTextures(i);
    }
    m_iOSDTextureWidth = w;
  }
  else
  {
    // clip to buffer
    if (w > m_iOSDTextureWidth) w = m_iOSDTextureWidth;
    if (h > g_settings.m_ResInfo[res].Overscan.bottom - g_settings.m_ResInfo[res].Overscan.top)
    {
      h = g_settings.m_ResInfo[res].Overscan.bottom - g_settings.m_ResInfo[res].Overscan.top;
    }
  }

  // scale to fit screen
  const RECT& rv = g_graphicsContext.GetViewWindow();

  // Vobsubs are defined to be 720 wide.
  // NOTE: This will not work nicely if we are allowing mplayer to render text based subs
  //       as it'll want to render within the pixel width it is outputting.

  float xscale;
  float yscale;

  if(true /*isvobsub*/) // xbox_video.cpp is fixed to 720x576 osd, so this should be fine
  { // vobsubs are given to us unscaled
    // scale them up to the full output, assuming vobsubs have same 
    // pixel aspect ratio as the movie, and are 720 pixels wide

    float pixelaspect = m_fSourceFrameRatio * m_iSourceHeight / m_iSourceWidth;
    xscale = (rv.right - rv.left) / 720.0f;
    yscale = xscale * g_settings.m_ResInfo[res].fPixelRatio / pixelaspect;
  }
  else
  { // text subs/osd assume square pixels, but will render to full size of view window
    // if mplayer could be fixed to use monitorpixelaspect when rendering it's osd
    // this would give perfect output, however monitorpixelaspect currently doesn't work
    // that way
    xscale = 1.0f;
    yscale = 1.0f;
  }
  
  // horizontal centering, and align to bottom of subtitles line
  osdRect.left = (float)rv.left + (float)(rv.right - rv.left - (float)w * xscale) / 2.0f;
  osdRect.right = osdRect.left + (float)w * xscale;
  float relbottom = ((float)(g_settings.m_ResInfo[res].iSubtitles - g_settings.m_ResInfo[res].Overscan.top)) / (g_settings.m_ResInfo[res].Overscan.bottom - g_settings.m_ResInfo[res].Overscan.top);
  osdRect.bottom = (float)rv.top + (float)(rv.bottom - rv.top) * relbottom;
  osdRect.top = osdRect.bottom - (float)h * yscale;

  int iOSDBuffer = (m_iOSDRenderBuffer + 1) % m_NumOSDBuffers;

  //if new height is heigher than current osd-texture height, recreate the textures with new height.
  if (h > m_iOSDTextureHeight[iOSDBuffer])
  {
    CSingleLock lock(g_graphicsContext);

    DeleteOSDTextures(iOSDBuffer);
    m_iOSDTextureHeight[iOSDBuffer] = h;
    // Create osd textures for this buffer with new size
#if defined(HAS_SDL_OPENGL)
    m_pOSDYTexture[iOSDBuffer] = new CGLTexture(SDL_CreateRGBSurface(SDL_HWSURFACE, m_iOSDTextureWidth, m_iOSDTextureHeight[iOSDBuffer], 32, RMASK, GMASK, BMASK, AMASK),false,true);

    m_pOSDATexture[iOSDBuffer] = new CGLTexture(SDL_CreateRGBSurface(SDL_HWSURFACE, m_iOSDTextureWidth, m_iOSDTextureHeight[iOSDBuffer], 32, RMASK, GMASK, BMASK, AMASK),false,true);

    if (m_pOSDYTexture[iOSDBuffer] == NULL || m_pOSDATexture[iOSDBuffer] == NULL) 
#else
    m_pOSDYTexture[iOSDBuffer] = SDL_CreateRGBSurface(SDL_HWSURFACE, m_iOSDTextureWidth, m_iOSDTextureHeight[iOSDBuffer], 
		32, RMASK, GMASK, BMASK, AMASK);

    m_pOSDATexture[iOSDBuffer] = SDL_CreateRGBSurface(SDL_HWSURFACE, m_iOSDTextureWidth, m_iOSDTextureHeight[iOSDBuffer], 
		32, RMASK, GMASK, BMASK, AMASK);

    if (m_pOSDYTexture[iOSDBuffer] == NULL || m_pOSDATexture[iOSDBuffer] == NULL) 
#endif
    {
      CLog::Log(LOGERROR, "Could not create OSD/Sub textures");
      DeleteOSDTextures(iOSDBuffer);
      return;
    }
    else
    {
      CLog::Log(LOGDEBUG, "Created OSD textures (%i)", iOSDBuffer);
    }
  }

  //We know the resources have been used at this point (or they are the second buffer, wich means they aren't in use anyways)
  //reset these so the gpu doesn't try to block on these
#if defined(HAS_SDL_OPENGL)

  int textureBytesSize = m_pOSDYTexture[iOSDBuffer]->textureWidth * m_pOSDYTexture[iOSDBuffer]->textureHeight * 4; 
  unsigned char *dst = new unsigned char[textureBytesSize];
  unsigned char *dsta = new unsigned char[textureBytesSize];

  //clear the textures
  memset(dst, 0, textureBytesSize);
  memset(dsta, 0, textureBytesSize);
  
   //draw the osd/subs
  int dstPitch = m_pOSDYTexture[iOSDBuffer]->textureWidth * 4;
  CopyAlpha(w, h, src, srca, stride, dst, dsta, dstPitch);

  m_pOSDYTexture[iOSDBuffer]->Update(m_pOSDYTexture[iOSDBuffer]->textureWidth,
				     m_pOSDYTexture[iOSDBuffer]->textureHeight,
				     dstPitch,
				     dst,
				     false);

  m_pOSDATexture[iOSDBuffer]->Update(m_pOSDATexture[iOSDBuffer]->textureWidth,
				     m_pOSDATexture[iOSDBuffer]->textureHeight,
				     dstPitch,
				     dst,
				     false);
  delete [] dst;
  delete [] dsta;

#else
  if (SDL_LockSurface(m_pOSDYTexture[iOSDBuffer]) == 0 &&
      SDL_LockSurface(m_pOSDATexture[iOSDBuffer]) == 0) 
  {
    //clear the textures
    memset(m_pOSDYTexture[iOSDBuffer]->pixels, 0, m_pOSDYTexture[iOSDBuffer]->pitch*m_iOSDTextureHeight[iOSDBuffer]);
    memset(m_pOSDATexture[iOSDBuffer]->pixels, 0, m_pOSDATexture[iOSDBuffer]->pitch*m_iOSDTextureHeight[iOSDBuffer]);

    //draw the osd/subs
    CopyAlpha(w, h, src, srca, stride, (BYTE*)m_pOSDYTexture[iOSDBuffer]->pixels, (BYTE*)m_pOSDATexture[iOSDBuffer]->pixels, m_pOSDYTexture[iOSDBuffer]->pitch);
  }
  SDL_UnlockSurface(m_pOSDYTexture[iOSDBuffer]);
  SDL_UnlockSurface(m_pOSDATexture[iOSDBuffer]);
#endif

  //set module variables to calculated values
  m_OSDRect = osdRect;
  m_OSDWidth = (float)w;
  m_OSDHeight = (float)h;
  m_OSDRendered = true;
}
Beispiel #30
0
void Context::init()
{
    gHMDCTX = ohmd_ctx_create();
    int num_devices = ohmd_ctx_probe(gHMDCTX);
    if(num_devices < 0){
        printf("failed to probe devices: %s\n", ohmd_ctx_get_error(gHMDCTX));
        exit(-1);
    }

    if( num_devices==0 )
    {
        printf( "no HMD present!, goodbye!\n");
        exit(-1);
    }

    printf( "num_devices<%d>\n", num_devices );

    for( int i=0; i<num_devices; i++ )
    {
        auto vendor = GetDevString(i,OHMD_VENDOR);
        auto product = GetDevString(i,OHMD_PRODUCT);
        auto path = GetDevString(i,OHMD_PATH);
        printf("hmd<%d> vendor<%s>\n", i, vendor.c_str() );
        printf("hmd<%d> product<%s>\n", i, product.c_str() );
        printf("hmd<%d> path<%s>\n", i, path.c_str() );
    }

    static const int khmddevno = 0;

    auto vendor = GetDevString(khmddevno,OHMD_VENDOR);
    auto product = GetDevString(khmddevno,OHMD_PRODUCT);
    auto path = GetDevString(khmddevno,OHMD_PATH);

    printf("hmd vendor<%s>\n", vendor.c_str() );
    printf("hmd product<%s>\n", product.c_str() );
    printf("hmd path<%s>\n", path.c_str() );


    gHMDDEV = ohmd_list_open_device(gHMDCTX, khmddevno);
        
    if(!gHMDDEV){
        printf("failed to open device: %s\n", ohmd_ctx_get_error(gHMDCTX));
        exit(-1);
    }

    Resolution res = GetResolution();
    DistortionK dk = GetDistortionK();

    float lens_sep = GetFloat(OHMD_LENS_HORIZONTAL_SEPARATION);
    float lens_vctr = GetFloat(OHMD_LENS_VERTICAL_POSITION);
    float fov_l = GetFloat(OHMD_LEFT_EYE_FOV);
    float fov_r = GetFloat(OHMD_RIGHT_EYE_FOV);
    float asp_l = GetFloat(OHMD_LEFT_EYE_ASPECT_RATIO);
    float asp_r = GetFloat(OHMD_RIGHT_EYE_ASPECT_RATIO);

    printf("hmd resolution<%d %d>\n", res.x, res.y );
    printf("hmd lens separation<%f>\n", lens_sep );
    printf("hmd lens vcenter<%f>\n", lens_vctr );
    printf("hmd left FOV<%f>\n", fov_l );
    printf("hmd left ASPECT<%f>\n", asp_l );
    printf("hmd right FOV<%f>\n", fov_r );
    printf("hmd right ASPECT<%f>\n", asp_r );

    const auto&k = dk.k;


    printf("hmd distortion K<%f %f %f %f %f %f>\n", k[0], k[1], k[2], k[3], k[4], k[5] );

    ohmd_device_getf(gHMDDEV, OHMD_EYE_IPD, &gIPD);

    gIPD = 0.043f;
    printf( "IPD<%f>\n", gIPD );

}