// I appreciate this is not an idea place for this function, but it didn't seem to be
// being linked properly when in OVR_CAPI.cpp. 
// Please relocate if you know of a better place
ovrBool ovrHmd_CreateDistortionMeshInternal( ovrHmdStruct *  hmd,
                                             ovrEyeType eyeType, ovrFovPort fov,
                                             unsigned int distortionCaps,
                                             ovrDistortionMesh *meshData,
											 float overrideEyeReliefIfNonZero )
{
    if (!meshData)
        return 0;
    HMDState* hmds = (HMDState*)hmd;

    // Not used now, but Chromatic flag or others could possibly be checked for in the future.
    OVR_UNUSED1(distortionCaps); 
   
#if defined (OVR_CC_MSVC)
    static_assert(sizeof(DistortionMeshVertexData) == sizeof(ovrDistortionVertex), "DistortionMeshVertexData size mismatch");
#endif
	
    // *** Calculate a part of "StereoParams" needed for mesh generation

    // Note that mesh distortion generation is invariant of RenderTarget UVs, allowing
    // render target size and location to be changed after the fact dynamically. 
    // eyeToSourceUV is computed here for convenience, so that users don't need
    // to call ovrHmd_GetRenderScaleAndOffset unless changing RT dynamically.

    const HmdRenderInfo&  hmdri          = hmds->RenderState.RenderInfo;    
    StereoEye             stereoEye      = (eyeType == ovrEye_Left) ? StereoEye_Left : StereoEye_Right;

    DistortionRenderDesc& distortion = hmds->RenderState.Distortion[eyeType];
	if (overrideEyeReliefIfNonZero)
	{
		distortion.Lens = GenerateLensConfigFromEyeRelief(overrideEyeReliefIfNonZero,hmdri);
	}

    // Find the mapping from TanAngle space to target NDC space.
    ScaleAndOffset2D      eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov(fov);

    int triangleCount = 0;
    int vertexCount = 0;

    DistortionMeshCreate((DistortionMeshVertexData**)&meshData->pVertexData,
                         (uint16_t**)&meshData->pIndexData,
                          &vertexCount, &triangleCount,
                          (stereoEye == StereoEye_Right),
                          hmdri, distortion, eyeToSourceNDC);

    if (meshData->pVertexData)
    {
        // Convert to index
        meshData->IndexCount = triangleCount * 3;
        meshData->VertexCount = vertexCount;
        return 1;
    }

    return 0;
}
コード例 #2
0
ファイル: OVR_CAPI.cpp プロジェクト: DJoser/minko
// Converts Fov Tan angle units to [-1,1] render target NDC space
Vector2f FovPort::TanAngleToRendertargetNDC(Vector2f const &tanEyeAngle)
{  
    ScaleAndOffset2D eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov(*this);
    return tanEyeAngle * eyeToSourceNDC.Scale + eyeToSourceNDC.Offset;
}
コード例 #3
0
ファイル: OVR_StereoProjection.cpp プロジェクト: Borf/VrLib
Matrix4f CreateProjection( bool rightHanded, bool isOpenGL, FovPort tanHalfFov, StereoEye /*eye*/, 
                           float zNear /*= 0.01f*/, float zFar /*= 10000.0f*/,
                           bool flipZ /*= false*/, bool farAtInfinity /*= false*/)
{
    if(!flipZ && farAtInfinity)
    {
        //OVR_ASSERT_M(false, "Cannot push Far Clip to Infinity when Z-order is not flipped"); Assertion disabled because this code no longer has access to LibOVRKernel assertion functionality.
        farAtInfinity = false;
    }

    // A projection matrix is very like a scaling from NDC, so we can start with that.
    ScaleAndOffset2D scaleAndOffset = CreateNDCScaleAndOffsetFromFov ( tanHalfFov );

    float handednessScale = rightHanded ? -1.0f : 1.0f;

    Matrix4f projection;
    // Produces X result, mapping clip edges to [-w,+w]
    projection.M[0][0] = scaleAndOffset.Scale.x;
    projection.M[0][1] = 0.0f;
    projection.M[0][2] = handednessScale * scaleAndOffset.Offset.x;
    projection.M[0][3] = 0.0f;

    // Produces Y result, mapping clip edges to [-w,+w]
    // Hey - why is that YOffset negated?
    // It's because a projection matrix transforms from world coords with Y=up,
    // whereas this is derived from an NDC scaling, which is Y=down.
    projection.M[1][0] = 0.0f;
    projection.M[1][1] = scaleAndOffset.Scale.y;
    projection.M[1][2] = handednessScale * -scaleAndOffset.Offset.y;
    projection.M[1][3] = 0.0f;

    // Produces Z-buffer result - app needs to fill this in with whatever Z range it wants.
    // We'll just use some defaults for now.
    projection.M[2][0] = 0.0f;
    projection.M[2][1] = 0.0f;

    if (farAtInfinity)
    {
        if (isOpenGL)
        {
            // It's not clear this makes sense for OpenGL - you don't get the same precision benefits you do in D3D.
            projection.M[2][2] = -handednessScale;
            projection.M[2][3] = 2.0f * zNear;
        }
        else
        {
            projection.M[2][2] = 0.0f;
            projection.M[2][3] = zNear;
        }
    }
    else
    {
        if (isOpenGL)
        {
            // Clip range is [-w,+w], so 0 is at the middle of the range.
            projection.M[2][2] = -handednessScale * (flipZ ? -1.0f : 1.0f) * (zNear + zFar) / (zNear - zFar);
            projection.M[2][3] =                    2.0f * ((flipZ ? -zFar : zFar) * zNear) / (zNear - zFar);
        }
        else
        {
            // Clip range is [0,+w], so 0 is at the start of the range.
            projection.M[2][2] = -handednessScale * (flipZ ? -zNear : zFar)                 / (zNear - zFar);
            projection.M[2][3] =                           ((flipZ ? -zFar : zFar) * zNear) / (zNear - zFar);
        }
    }

    // Produces W result (= Z in)
    projection.M[3][0] = 0.0f;
    projection.M[3][1] = 0.0f;
    projection.M[3][2] = handednessScale;
    projection.M[3][3] = 0.0f;

    return projection;
}