Пример #1
0
// Called to draw scene
void RenderScene(void)
{
	// Clear the window with current clearing color
	//glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);

	RenderToTexture();

	// 绑定默认FBO(窗体帧缓冲区的ID是0)
	glBindFramebuffer(GL_FRAMEBUFFER, 0);
	glBindTexture(GL_TEXTURE_2D, textureId);
	glViewport(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT);

	// 渲染
	glClearColor(0, 0, 0, 0);
	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

	glBegin(GL_POLYGON);

	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();

	glColor3f(1, 1, 1);

	// 开始纹理贴图
	glTexCoord2f(1, 1);
	glVertex3d(1, 1, 0);

	glTexCoord2f(0, 1);
	glVertex3d(-1, 1, 0);

	glTexCoord2f(0, 0);
	glVertex3d(-1, -1, 0);

	glTexCoord2f(1, 0);
	glVertex3d(1, -1, 0);

	glEnd();

	// Perform the buffer swap to display back buffer
	glutSwapBuffers();
}
Пример #2
0
void GrayFilter::PerformSteps(Ptr<Texture> output)
{
    /*List<VertexPosition> vertices(2);
    vertices[0].Position = Vector3(10, 10, 0);
    vertices[1].Position = Vector3(500, 400, 0);
    auto vb = New<VertexBuffer>();
    vb->SetData(vertices);
    GraphicsDevice::SetBuffers(vb, nullptr);
    
    auto program = LoadProgram("VertexTexture");
    program->Use();
    program->Uniforms["Texture"].SetValue(*Input);
    program->Uniforms["Color"].SetValue(cv::Vec4f(1, 1, 1, 1));
    
    glClearColor(0, 0, 0, 1);
    RenderToTexture(output, PrimitiveType::Lines, GL_COLOR_BUFFER_BIT);
    GraphicsDevice::UseDefaultBuffers();*/
    
    grayscale->Use();
    grayscale->Uniforms["Texture"].SetValue(*Input);
    RenderToTexture(output, PrimitiveType::Unspecified, GL_COLOR_BUFFER_BIT);
}
Пример #3
0
void CannyFilter::PerformSteps(Ptr<Texture> output)
{
    ReserveColorBuffers(2);

    glBlendEquation(GL_FUNC_ADD);
    glBlendFunc(GL_ONE, GL_ONE);
    glEnable(GL_BLEND);
    GraphicsDevice::SetBuffers(PerPixelVertices, nullptr);
    histogram->Use();
    histogram->Uniforms["Texture"].SetValue(*Input);
    RenderToTexture(ColorBuffers[1], PrimitiveType::Points, GL_COLOR_BUFFER_BIT);
    glDisable(GL_BLEND);
    GraphicsDevice::UseDefaultBuffers();
    
    // todo: maybe the histogram generation can be done in the gray filter. Slower because of the per pixel vertices, but probably faster than a whole extra pass
    auto pixels = FrameBuffer::GetCurrentlyBound()->ReadPixels<float>(0, 0, 255, 1, GL_RED, GL_FLOAT);
    // Estimate median from frequencies
    float count = Input->GetWidth() * Input->GetHeight();
    float percentile = 0;
    int i;
    for(i = 0; i < 255 && percentile < 0.5; ++i)
        percentile += pixels[i] / count;
    float median = i / 255.0f;
    
    gaussian->Input = Input;
    ApplyFilter(*gaussian, ColorBuffers[0]);
    
    ScharrAveraging(*ColorBuffers[0], output);
    Differentiation(*output, ColorBuffers[0]);
    
    //glEnable(GL_STENCIL_TEST);
    // todo: why / 2 ? Would it benefit from contrast stretch? Or should I use the 0.33rd and 0.66th percentile?? That would actually make a lot more sense...
    DetectEdges(*ColorBuffers[0], 0.33f * median, 0.66f * median, output); // Buffer0 contains gradients
    DEBUG_FB("Edges");
    //glDisable(GL_STENCIL_TEST);
}
Пример #4
0
void GLVideo::UpdateScreen (DCanvas *canvas)
{
   if(palettechanged)
   {
	   extern void UpdateShaderPalette(SDL_Color *colors);
	   UpdateShaderPalette(newPalette);
	   palettechanged = false;
   }

	SDL_GL_SwapBuffers();
	// viewheight or viewwidth not the same as screen?
	if(screenw != realviewheight || screenh != realviewheight)
		glViewport(viewwindowx, screenh - (realviewheight + viewwindowy), realviewwidth, realviewheight);

	glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);

	// make textures transparent where they are black?
    glEnable(GL_BLEND);
    glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
    glEnable(GL_ALPHA_TEST);
    glAlphaFunc(GL_GREATER, 0);

	// Switch to 3D
	glMatrixMode(GL_PROJECTION);
	glLoadIdentity();

	float aspect = (float)realviewwidth/realviewheight;
	float fov = 70.0;

	gluPerspective(fov, aspect, 0.08f, 100.0f);

	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();

	// Render doom level
	if(gamestate == GS_LEVEL && !automapactive)
		RenderWorld();

	// Switch to 2D
	glDisable(GL_DEPTH_TEST);

	glMatrixMode(GL_PROJECTION);
	glLoadIdentity();

	glOrtho(0.0f, 1, 1, 0.0f, -1.0f, 1.0f);

	glMatrixMode(GL_MODELVIEW);
	glLoadIdentity();

	if(gamestate == GS_LEVEL && !automapactive)
	{
		// Draw player weapon
		DrawScreenSprites();

		// Draw crosshair
		DrawCrosshair();
	}

	// Render menu/console/automap over whole screen
	if(screenw != realviewwidth || screenh != realviewheight)
	{
		glViewport(0, 0, screenw, screenh);
	}

	// Draw automap
	if(automapactive)
		RenderAutomap();

	// Render view border
	extern void R_DrawViewBorder();
	R_DrawViewBorder();

	// Draw status bar
	if(gamestate == GS_LEVEL)
	if(screenw != realviewwidth || screenh != realviewheight)
	{
		glColor4f(0,1,0,1);
		glDisable(GL_TEXTURE_2D);
		extern int ST_X, ST_Y, ST_WIDTH, ST_HEIGHT;
		DrawQuad((float)ST_X/screenw, (float)ST_Y/screenh, (float)ST_WIDTH/screenw, (float)ST_HEIGHT/screenh);
	}

	// Draw status bar border

	// Draw player redscreen, berserk, pickup
//	DrawTint();

	// Render the grayscale+colormap result to a texture
	extern void RenderToTexture(size_t screenw, size_t screenh);
	RenderToTexture(screenw, screenh);

	// Shader will convert this into RGB output
	extern void RenderedTextureToScreen(size_t screenw, size_t screenh);
	RenderedTextureToScreen(screenw, screenh);

	// Console
	DrawConsole(screenw, screenh);

	// Menu
	DrawMenu();
}
Пример #5
0
//--------------------------------------------------------------------------------------
//  PerformPostProcessing( )
//
//      DESC:
//          This is the core function for this module - it takes the raw HDR image
//          generated by the 'HDRScene' component and puts it through 4 post
//          processing stages - to end up with a bloom effect on the over-exposed
//          (HDR) parts of the image.
//
//      PARAMS:
//          pDevice : The device that will be rendered to
//
//      NOTES:
//          n/a
//
//--------------------------------------------------------------------------------------
HRESULT PerformPostProcessing( IDirect3DDevice9* pDevice )
{

    // [ 0 ] BRIGHT PASS
    //------------------
    LPDIRECT3DTEXTURE9 pHDRSource = NULL;
    if( FAILED( HDRScene::GetOutputTexture( &pHDRSource ) ) )
    {
        // Couldn't get the input - means that none of the subsequent
        // work is worth attempting!
        OutputDebugString( L"PostProcess::PerformPostProcessing() - Unable to retrieve source HDR information!\n" );
        return E_FAIL;

    }

    LPDIRECT3DSURFACE9 pBrightPassSurf = NULL;
    if( FAILED( PostProcess::g_pBrightPassTex->GetSurfaceLevel( 0, &pBrightPassSurf ) ) )
    {

        // Can't get the render target. Not good news!
        OutputDebugString(
            L"PostProcess::PerformPostProcessing() - Couldn't retrieve top level surface for bright pass render target.\n" );
        return E_FAIL;

    }

    pDevice->SetRenderTarget( 0, pBrightPassSurf );         // Configure the output of this stage
    pDevice->SetTexture( 0, pHDRSource );                   // Configure the input..
    pDevice->SetPixelShader( PostProcess::g_pBrightPassPS );
    PostProcess::g_pBrightPassConstants->SetFloat( pDevice, "fBrightPassThreshold", PostProcess::g_BrightThreshold );

    // We need to compute the sampling offsets used for this pass.
    // A 2x2 sampling pattern is used, so we need to generate 4 offsets
    D3DXVECTOR4 offsets[4];

    // Find the dimensions for the source data
    D3DSURFACE_DESC srcDesc;
    pHDRSource->GetLevelDesc( 0, &srcDesc );

    // Because the source and destination are NOT the same sizes, we
    // need to provide offsets to correctly map between them.
    float sU = ( 1.0f / static_cast< float >( srcDesc.Width ) );
    float sV = ( 1.0f / static_cast< float >( srcDesc.Height ) );

    // The last two components (z,w) are unused. This makes for simpler code, but if
    // constant-storage is limited then it is possible to pack 4 offsets into 2 float4's
    offsets[0] = D3DXVECTOR4( -0.5f * sU, 0.5f * sV, 0.0f, 0.0f );
    offsets[1] = D3DXVECTOR4( 0.5f * sU, 0.5f * sV, 0.0f, 0.0f );
    offsets[2] = D3DXVECTOR4( -0.5f * sU, -0.5f * sV, 0.0f, 0.0f );
    offsets[3] = D3DXVECTOR4( 0.5f * sU, -0.5f * sV, 0.0f, 0.0f );

    PostProcess::g_pBrightPassConstants->SetVectorArray( pDevice, "tcDownSampleOffsets", offsets, 4 );

    RenderToTexture( pDevice );



    // [ 1 ] DOWN SAMPLE
    //------------------
    LPDIRECT3DSURFACE9 pDownSampleSurf = NULL;
    if( FAILED( PostProcess::g_pDownSampledTex->GetSurfaceLevel( 0, &pDownSampleSurf ) ) )
    {

        // Can't get the render target. Not good news!
        OutputDebugString(
            L"PostProcess::PerformPostProcessing() - Couldn't retrieve top level surface for down sample render target.\n" );
        return E_FAIL;

    }

    pDevice->SetRenderTarget( 0, pDownSampleSurf );
    pDevice->SetTexture( 0, PostProcess::g_pBrightPassTex );
    pDevice->SetPixelShader( PostProcess::g_pDownSamplePS );

    // We need to compute the sampling offsets used for this pass.
    // A 4x4 sampling pattern is used, so we need to generate 16 offsets

    // Find the dimensions for the source data
    PostProcess::g_pBrightPassTex->GetLevelDesc( 0, &srcDesc );

    // Find the dimensions for the destination data
    D3DSURFACE_DESC destDesc;
    pDownSampleSurf->GetDesc( &destDesc );

    // Compute the offsets required for down-sampling. If constant-storage space
    // is important then this code could be packed into 8xFloat4's. The code here
    // is intentionally less efficient to aid readability...
    D3DXVECTOR4 dsOffsets[16];
    int idx = 0;
    for( int i = -2; i < 2; i++ )
    {
        for( int j = -2; j < 2; j++ )
        {
            dsOffsets[idx++] = D3DXVECTOR4(
                ( static_cast< float >( i ) + 0.5f ) * ( 1.0f / static_cast< float >( destDesc.Width ) ),
                ( static_cast< float >( j ) + 0.5f ) * ( 1.0f / static_cast< float >( destDesc.Height ) ),
                0.0f, // unused 
                0.0f  // unused
                );
        }
    }

    PostProcess::g_pDownSampleConstants->SetVectorArray( pDevice, "tcDownSampleOffsets", dsOffsets, 16 );

    RenderToTexture( pDevice );



    // [ 2 ] BLUR HORIZONTALLY
    //------------------------
    LPDIRECT3DSURFACE9 pHBloomSurf = NULL;
    if( FAILED( PostProcess::g_pBloomHorizontal->GetSurfaceLevel( 0, &pHBloomSurf ) ) )
    {

        // Can't get the render target. Not good news!
        OutputDebugString(
            L"PostProcess::PerformPostProcessing() - Couldn't retrieve top level surface for horizontal bloom render target.\n" );
        return E_FAIL;

    }

    pDevice->SetRenderTarget( 0, pHBloomSurf );
    pDevice->SetTexture( 0, PostProcess::g_pDownSampledTex );
    pDevice->SetPixelShader( PostProcess::g_pHBloomPS );

    // Configure the sampling offsets and their weights
    float HBloomWeights[9];
    float HBloomOffsets[9];

    for( int i = 0; i < 9; i++ )
    {
        // Compute the offsets. We take 9 samples - 4 either side and one in the middle:
        //     i =  0,  1,  2,  3, 4,  5,  6,  7,  8
        //Offset = -4, -3, -2, -1, 0, +1, +2, +3, +4
        HBloomOffsets[i] = ( static_cast< float >( i ) - 4.0f ) * ( 1.0f / static_cast< float >( destDesc.Width ) );

        // 'x' is just a simple alias to map the [0,8] range down to a [-1,+1]
        float x = ( static_cast< float >( i ) - 4.0f ) / 4.0f;

        // Use a gaussian distribution. Changing the standard-deviation
        // (second parameter) as well as the amplitude (multiplier) gives
        // distinctly different results.
        HBloomWeights[i] = g_GaussMultiplier * ComputeGaussianValue( x, g_GaussMean, g_GaussStdDev );
    }

    // Commit both arrays to the device:
    PostProcess::g_pHBloomConstants->SetFloatArray( pDevice, "HBloomWeights", HBloomWeights, 9 );
    PostProcess::g_pHBloomConstants->SetFloatArray( pDevice, "HBloomOffsets", HBloomOffsets, 9 );

    RenderToTexture( pDevice );



    // [ 3 ] BLUR VERTICALLY
    //----------------------
    LPDIRECT3DSURFACE9 pVBloomSurf = NULL;
    if( FAILED( PostProcess::g_pBloomVertical->GetSurfaceLevel( 0, &pVBloomSurf ) ) )
    {

        // Can't get the render target. Not good news!
        OutputDebugString(
            L"PostProcess::PerformPostProcessing() - Couldn't retrieve top level surface for vertical bloom render target.\n" );
        return E_FAIL;

    }

    pDevice->SetRenderTarget( 0, pVBloomSurf );
    pDevice->SetTexture( 0, PostProcess::g_pBloomHorizontal );
    pDevice->SetPixelShader( PostProcess::g_pVBloomPS );

    // Configure the sampling offsets and their weights

    // It is worth noting that although this code is almost identical to the
    // previous section ('H' weights, above) there is an important difference: destDesc.Height.
    // The bloom render targets are *not* square, such that you can't re-use the same offsets in
    // both directions.
    float VBloomWeights[9];
    float VBloomOffsets[9];

    for( int i = 0; i < 9; i++ )
    {
        // Compute the offsets. We take 9 samples - 4 either side and one in the middle:
        //     i =  0,  1,  2,  3, 4,  5,  6,  7,  8
        //Offset = -4, -3, -2, -1, 0, +1, +2, +3, +4
        VBloomOffsets[i] = ( static_cast< float >( i ) - 4.0f ) * ( 1.0f / static_cast< float >( destDesc.Height ) );

        // 'x' is just a simple alias to map the [0,8] range down to a [-1,+1]
        float x = ( static_cast< float >( i ) - 4.0f ) / 4.0f;

        // Use a gaussian distribution. Changing the standard-deviation
        // (second parameter) as well as the amplitude (multiplier) gives
        // distinctly different results.
        VBloomWeights[i] = g_GaussMultiplier * ComputeGaussianValue( x, g_GaussMean, g_GaussStdDev );
    }

    // Commit both arrays to the device:
    PostProcess::g_pVBloomConstants->SetFloatArray( pDevice, "VBloomWeights", VBloomWeights, 9 );
    PostProcess::g_pVBloomConstants->SetFloatArray( pDevice, "VBloomOffsets", VBloomOffsets, 9 );

    RenderToTexture( pDevice );



    // [ 4 ] CLEAN UP
    //---------------
    SAFE_RELEASE( pHDRSource );
    SAFE_RELEASE( pBrightPassSurf );
    SAFE_RELEASE( pDownSampleSurf );
    SAFE_RELEASE( pHBloomSurf );
    SAFE_RELEASE( pVBloomSurf );

    return S_OK;

}
Пример #6
0
void CannyFilter::Differentiation(const Texture &input, Ptr<Texture> output)
{
    diffCanny->Use();
    diffCanny->Uniforms["Texture"].SetValue(input);
    RenderToTexture(output);
}
Пример #7
0
void CannyFilter::ScharrAveraging(const Texture &input, Ptr<Texture> output)
{
    scharr->Use();
    scharr->Uniforms["Texture"].SetValue(input);
    RenderToTexture(output);
}
Пример #8
0
//--------------------------------------------------------------------------------------
//  MeasureLuminance( )
//
//      DESC:
//          This is the core function for this particular part of the application, it's
//          job is to take the previously rendered (in the 'HDRScene' namespace) HDR
//          image and compute the overall luminance for the scene. This is done by
//          repeatedly downsampling the image until it is only 1x1 in size. Doing it
//          this way (pixel shaders and render targets) keeps as much of the work on
//          the GPU as possible, consequently avoiding any resource transfers, locking
//          and modification.
//
//      PARAMS:
//          pDevice : The currently active device that will be used for rendering.
//
//      NOTES:
//          The results from this function will eventually be used to compose the final
//          image. See OnFrameRender() in 'HDRDemo.cpp'.
//
//--------------------------------------------------------------------------------------
HRESULT MeasureLuminance( IDirect3DDevice9* pDevice )
{

    //[ 0 ] DECLARE VARIABLES AND ALIASES
    //-----------------------------------
    LPDIRECT3DTEXTURE9 pSourceTex = NULL;     // We use this texture as the input
    LPDIRECT3DTEXTURE9 pDestTex = NULL;     // We render to this texture...
    LPDIRECT3DSURFACE9 pDestSurf = NULL;     // ... Using this ptr to it's top-level surface


    //[ 1 ] SET THE DEVICE TO RENDER TO THE HIGHEST
    //      RESOLUTION LUMINANCE MAP.
    //---------------------------------------------
    HDRScene::GetOutputTexture( &pSourceTex );
    pDestTex = Luminance::g_pTexLuminance[ Luminance::g_dwLumTextures - 1 ];
    if( FAILED( pDestTex->GetSurfaceLevel( 0, &pDestSurf ) ) )
    {

        // Couldn't acquire this surface level. Odd!
        OutputDebugString(
            L"Luminance::MeasureLuminance( ) : Couldn't acquire surface level for hi-res luminance map!\n" );
        return E_FAIL;

    }

    pDevice->SetRenderTarget( 0, pDestSurf );
    pDevice->SetTexture( 0, pSourceTex );

    pDevice->SetSamplerState( 0, D3DSAMP_MAGFILTER, D3DTEXF_LINEAR );
    pDevice->SetSamplerState( 0, D3DSAMP_MINFILTER, D3DTEXF_LINEAR );


    //[ 2 ] RENDER AND DOWNSAMPLE THE HDR TEXTURE
    //      TO THE LUMINANCE MAP.
    //-------------------------------------------

    // Set which shader we're going to use. g_pLum1PS corresponds
    // to the 'GreyScaleDownSample' entry point in 'Luminance.psh'.
    pDevice->SetPixelShader( Luminance::g_pLum1PS );

    // We need to compute the sampling offsets used for this pass.
    // A 2x2 sampling pattern is used, so we need to generate 4 offsets.
    //
    // NOTE: It is worth noting that some information will likely be lost
    //       due to the luminance map being less than 1/2 the size of the
    //       original render-target. This mis-match does not have a particularly
    //       big impact on the final luminance measurement. If necessary,
    //       the same process could be used - but with many more samples, so as
    //       to correctly map from HDR->Luminance without losing information.
    D3DXVECTOR4 offsets[4];

    // Find the dimensions for the source data
    D3DSURFACE_DESC srcDesc;
    pSourceTex->GetLevelDesc( 0, &srcDesc );

    // Because the source and destination are NOT the same sizes, we
    // need to provide offsets to correctly map between them.
    float sU = ( 1.0f / static_cast< float >( srcDesc.Width ) );
    float sV = ( 1.0f / static_cast< float >( srcDesc.Height ) );

    // The last two components (z,w) are unused. This makes for simpler code, but if
    // constant-storage is limited then it is possible to pack 4 offsets into 2 float4's
    offsets[0] = D3DXVECTOR4( -0.5f * sU, 0.5f * sV, 0.0f, 0.0f );
    offsets[1] = D3DXVECTOR4( 0.5f * sU, 0.5f * sV, 0.0f, 0.0f );
    offsets[2] = D3DXVECTOR4( -0.5f * sU, -0.5f * sV, 0.0f, 0.0f );
    offsets[3] = D3DXVECTOR4( 0.5f * sU, -0.5f * sV, 0.0f, 0.0f );

    // Set the offsets to the constant table
    Luminance::g_pLum1PSConsts->SetVectorArray( pDevice, "tcLumOffsets", offsets, 4 );

    // With everything configured we can now render the first, initial, pass
    // to the luminance textures.
    RenderToTexture( pDevice );

    // Make sure we clean up the remaining reference
    SAFE_RELEASE( pDestSurf );
    SAFE_RELEASE( pSourceTex );


    //[ 3 ] SCALE EACH RENDER TARGET DOWN
    //      The results ("dest") of each pass feeds into the next ("src")
    //-------------------------------------------------------------------
    for( int i = ( Luminance::g_dwLumTextures - 1 ); i > 0; i-- )
    {

        // Configure the render targets for this iteration
        pSourceTex = Luminance::g_pTexLuminance[ i ];
        pDestTex = Luminance::g_pTexLuminance[ i - 1 ];
        if( FAILED( pDestTex->GetSurfaceLevel( 0, &pDestSurf ) ) )
        {

            // Couldn't acquire this surface level. Odd!
            OutputDebugString( L"Luminance::MeasureLuminance( ) : Couldn't acquire surface level for luminance map!\n"
                               );
            return E_FAIL;

        }

        pDevice->SetRenderTarget( 0, pDestSurf );
        pDevice->SetTexture( 0, pSourceTex );

        // We don't want any filtering for this pass
        pDevice->SetSamplerState( 0, D3DSAMP_MAGFILTER, D3DTEXF_LINEAR );
        pDevice->SetSamplerState( 0, D3DSAMP_MINFILTER, D3DTEXF_LINEAR );

        // Because each of these textures is a factor of 3
        // different in dimension, we use a 3x3 set of sampling
        // points to downscale.
        D3DSURFACE_DESC srcTexDesc;
        pSourceTex->GetLevelDesc( 0, &srcTexDesc );

        // Create the 3x3 grid of offsets
        D3DXVECTOR4 DSoffsets[9];
        int idx = 0;
        for( int x = -1; x < 2; x++ )
        {
            for( int y = -1; y < 2; y++ )
            {
                DSoffsets[idx++] = D3DXVECTOR4(
                    static_cast< float >( x ) / static_cast< float >( srcTexDesc.Width ),
                    static_cast< float >( y ) / static_cast< float >( srcTexDesc.Height ),
                    0.0f,   //unused
                    0.0f    //unused
                    );
            }
        }

        // Set them to the current pixel shader
        pDevice->SetPixelShader( Luminance::g_pLum3x3DSPS );
        Luminance::g_pLum3x3DSPSConsts->SetVectorArray( pDevice, "tcDSOffsets", DSoffsets, 9 );

        // Render the display to this texture
        RenderToTexture( pDevice );

        // Clean-up by releasing the level-0 surface
        SAFE_RELEASE( pDestSurf );

    }


    // =============================================================
    //    At this point, the g_pTexLuminance[0] texture will contain
    //    a 1x1 texture that has the downsampled luminance for the
    //    scene as it has currently been rendered.
    // =============================================================

    return S_OK;

}
Пример #9
0
HRESULT MPMadPresenter::RenderOsd(LPCSTR name, REFERENCE_TIME frameStart, RECT* fullOutputRect, RECT* activeVideoRect)
{
  HRESULT hr = E_UNEXPECTED;

  if (m_pShutdown)
  {
    Log("MPMadPresenter::RenderOsd() shutdown");
    return hr;
  }

  // Lock madVR thread while Shutdown()
  //CAutoLock lock(&m_dsLock);

  WORD videoHeight = (WORD)activeVideoRect->bottom - (WORD)activeVideoRect->top;
  WORD videoWidth = (WORD)activeVideoRect->right - (WORD)activeVideoRect->left;

  CAutoLock cAutoLock(this);

  ReinitOSD();

  //// Ugly hack to avoid flickering (most occurs on Intel GPU)
  //bool isFullScreen = m_pCallback->IsFullScreen();
  //bool isUiVisible = m_pCallback->IsUiVisible();
  //if (isUiVisible)
  //{
  //  // Disabled for now (see http://forum.kodi.tv/showthread.php?tid=154534&pid=1964715#pid1964715)
  //  // Present frame in advance option lead to GUI lag and/or stuttering for Intel GPU
  //  //int pRefreshrate = static_cast<int>(m_pRefreshrate);
  //  //Sleep(100 / m_pRefreshrate);
  //  int CountPass = uiVisible ? 3 : 6;
  //  //Log("MPMadPresenter::RenderOsd() uiVisible %x", CountPass);
  //  for (int x = 0; x < CountPass; ++x) // need to let in a loop to slow down why ???
  //  {
  //    // commented out (it slown down video on GPU Nvidia)
  //    //m_pDevice->PresentEx(nullptr, nullptr, nullptr, nullptr, D3DPRESENT_FORCEIMMEDIATE);
  //  }
  //  //m_mpWait.Unlock();
  //  //m_dsLock.Unlock();
  //  //return uiVisible ? CALLBACK_USER_INTERFACE : CALLBACK_INFO_DISPLAY;
  //}

  uiVisible = false;

  //Log("MPMadPresenter::RenderOsd()");

  if (!m_pMPTextureOsd || !m_pMadOsdVertexBuffer || !m_pRenderTextureOsd || !m_pCallback)
    return CALLBACK_INFO_DISPLAY;

  IDirect3DSurface9* SurfaceMadVr = nullptr; // This will be released by C# side

  m_dwHeight = (WORD)fullOutputRect->bottom - (WORD)fullOutputRect->top;
  m_dwWidth = (WORD)fullOutputRect->right - (WORD)fullOutputRect->left;

  // Handle GetBackBuffer to be done only 2 frames
  //countFrame++;
  //if (countFrame == firstFrame || countFrame == secondFrame)
  {
    if (SUCCEEDED(hr = m_pMadD3DDev->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO, &SurfaceMadVr)))
    {
      if (SUCCEEDED(hr = m_pCallback->RenderFrame(videoWidth, videoHeight, videoWidth, videoHeight, reinterpret_cast<DWORD>(SurfaceMadVr))))
      {
        SurfaceMadVr->Release();
      }
      //if (countFrame == secondFrame)
      //{
      //  countFrame = resetFrame;
      //}
    }
  }

  RenderToTexture(m_pMPTextureOsd);

  if (SUCCEEDED(hr = m_deviceState.Store()))
  {
    hr = m_pCallback->RenderOverlay(videoWidth, videoHeight, videoWidth, videoHeight);
    if (m_pCallback->IsUiVisible())
    {
      for (int x = 0; x < m_pMadVRFrameCount; ++x) // need to let in a loop to slow down why ???
      {
        if (x <= 3)
        {
          // commented out (it slown down video on GPU Nvidia)
          m_pDevice->PresentEx(nullptr, nullptr, nullptr, nullptr, D3DPRESENT_FORCEIMMEDIATE);
          //Log("MPMadPresenter::RenderOsd() IsUiVisible");
        }
      }
    }
  }

  uiVisible = hr == S_OK ? true : false;

  //Log("RenderOsd() hr: 0x%08x - 2", hr);

  if (SUCCEEDED(hr = m_pDevice->PresentEx(nullptr, nullptr, nullptr, nullptr, D3DPRESENT_FORCEIMMEDIATE)))
    if (SUCCEEDED(hr = SetupMadDeviceState()))
      if (SUCCEEDED(hr = SetupOSDVertex(m_pMadOsdVertexBuffer)))
        // Draw MP texture on madVR device's side
        RenderTexture(m_pMadOsdVertexBuffer, m_pRenderTextureOsd);

  // For 3D
  if (m_madVr3DEnable)
  {
    if (SUCCEEDED(hr = SetupOSDVertex3D(m_pMadOsdVertexBuffer)))
      // Draw MP texture on madVR device's side
      RenderTexture(m_pMadOsdVertexBuffer, m_pRenderTextureOsd);
  }

  m_deviceState.Restore();

  //// if we don't unlock, OSD will be slow because it will reach the timeout set in SetOSDCallback()
  //m_mpWait.Unlock();
  //m_dsLock.Unlock();

  return uiVisible ? CALLBACK_USER_INTERFACE : CALLBACK_INFO_DISPLAY;
}
Пример #10
0
HRESULT MPMadPresenter::ClearBackground(LPCSTR name, REFERENCE_TIME frameStart, RECT* fullOutputRect, RECT* activeVideoRect)
{
  HRESULT hr = E_UNEXPECTED;

  if (m_pShutdown)
  {
    Log("MPMadPresenter::ClearBackground() shutdown or init OSD");
    return hr;
  }

  // Lock madVR thread while Shutdown()
  //CAutoLock lock(&m_dsLock);

  WORD videoHeight = (WORD)activeVideoRect->bottom - (WORD)activeVideoRect->top;
  WORD videoWidth = (WORD)activeVideoRect->right - (WORD)activeVideoRect->left;

  CAutoLock cAutoLock(this);

  ReinitOSD();

  //// Ugly hack to avoid flickering (most occurs on Intel GPU)
  //bool isFullScreen = m_pCallback->IsFullScreen();
  //bool isUiVisible = m_pCallback->IsUiVisible();
  //if (isFullScreen)
  //{
  //  if (isUiVisible)
  //  {
  //    //int pRefreshrate = static_cast<int>(m_pRefreshrate);
  //    //Sleep(100 / m_pRefreshrate);
  //    int CountPass = uiVisible ? 1 : 3;
  //    //Log("MPMadPresenter::ClearBackground() uiVisible %x", CountPass);
  //    for (int x = 0; x < CountPass; ++x) // need to let in a loop to slow down why ???
  //    {
  //      // commented out (it slown down video on GPU Nvidia)
  //      //m_pDevice->PresentEx(nullptr, nullptr, nullptr, nullptr, D3DPRESENT_FORCEIMMEDIATE);
  //    }
  //  }
  //  //m_mpWait.Unlock();
  //  //m_dsLock.Unlock();
  //  return uiVisible ? CALLBACK_USER_INTERFACE : CALLBACK_INFO_DISPLAY;
  //}

  uiVisible = false;

  //Log("MPMadPresenter::ClearBackground()");

  if (!m_pMPTextureGui || !m_pMadGuiVertexBuffer || !m_pRenderTextureGui || !m_pCallback)
    return CALLBACK_INFO_DISPLAY;

  m_dwHeight = (WORD)fullOutputRect->bottom - (WORD)fullOutputRect->top; // added back
  m_dwWidth = (WORD)fullOutputRect->right - (WORD)fullOutputRect->left;

  RenderToTexture(m_pMPTextureGui);

  if (SUCCEEDED(hr = m_deviceState.Store()))
  {
    hr = m_pCallback->RenderGui(videoWidth, videoHeight, videoWidth, videoHeight);
    if (m_pCallback->IsUiVisible())
    {
      for (int x = 0; x < m_pMadVRFrameCount; ++x) // need to let in a loop to slow down why ???
      {
        if (x <= 3)
        {
          // commented out (it slown down video on GPU Nvidia)
          m_pDevice->PresentEx(nullptr, nullptr, nullptr, nullptr, D3DPRESENT_FORCEIMMEDIATE);
          //Log("MPMadPresenter::ClearBackground() IsUiVisible");
        }
      }
    }
  }

  uiVisible = hr == S_OK ? true : false;

  //Log("ClearBackground() hr: 0x%08x - 2", hr);

  if (SUCCEEDED(hr = m_pDevice->PresentEx(nullptr, nullptr, nullptr, nullptr, D3DPRESENT_FORCEIMMEDIATE)))
    if (SUCCEEDED(hr = SetupMadDeviceState()))
      if (SUCCEEDED(hr = SetupOSDVertex(m_pMadGuiVertexBuffer)))
        // Draw MP texture on madVR device's side
        RenderTexture(m_pMadGuiVertexBuffer, m_pRenderTextureGui);

  // For 3D
  if (m_madVr3DEnable)
  {
    if (SUCCEEDED(hr = SetupOSDVertex3D(m_pMadGuiVertexBuffer)))
      // Draw MP texture on madVR device's side
      RenderTexture(m_pMadGuiVertexBuffer, m_pRenderTextureGui);
  }

  m_deviceState.Restore();

  //// if we don't unlock, OSD will be slow because it will reach the timeout set in SetOSDCallback()
  //m_mpWait.Unlock();
  //m_dsLock.Unlock();

  return uiVisible ? CALLBACK_USER_INTERFACE : CALLBACK_INFO_DISPLAY;
}
//--------------------------------------------------------------------------------------
void CreateSpectrogram( ID3D10Device* pd3dDevice )
{
    //store the original rt and ds buffer views
    ID3D10RenderTargetView* apOldRTVs[1] = { NULL };
    ID3D10DepthStencilView* pOldDS = NULL;
    pd3dDevice->OMGetRenderTargets( 1, apOldRTVs, &pOldDS );

    // Reverse Indices
    RenderToTexture( pd3dDevice, g_pDestRTV, g_pSourceTexRV, false, g_pReverse );
    pd3dDevice->OMSetRenderTargets( 1, apOldRTVs, pOldDS );

    // Danielson-Lanczos routine
    UINT iterations = 0;
    float wtemp, wr, wpr, wpi, wi, theta;
    UINT n = g_uiTexX;
    UINT mmax = 1;
    while( n > mmax )
    {
        UINT istep = mmax << 1;
        theta = 6.28318530717959f / ( ( float )mmax * 2.0f );
        wtemp = sinf( 0.5f * theta );
        wpr = -2.0f * wtemp * wtemp;
        wpi = sinf( theta );
        wr = 1.0f;
        wi = 0.0f;

        for( UINT m = 0; m < mmax; m++ )
        {
            // Inner loop is handled on the GPU
            {
                g_pWR->SetFloat( wr );
                g_pWI->SetFloat( wi );
                g_pMMAX->SetInt( mmax );
                g_pM->SetInt( m );
                g_pISTEP->SetInt( istep );

                // Make sure we immediately unbind the previous RT from the shader pipeline
                ID3D10ShaderResourceView* const pSRV[1] = {NULL};
                pd3dDevice->PSSetShaderResources( 0, 1, pSRV );

                if( 0 == iterations % 2 )
                {
                    RenderToTexture( pd3dDevice, g_pSourceRTV, g_pDestTexRV, false, g_pFFTInner );
                }
                else
                {
                    RenderToTexture( pd3dDevice, g_pDestRTV, g_pSourceTexRV, false, g_pFFTInner );
                }
                pd3dDevice->OMSetRenderTargets( 1, apOldRTVs, pOldDS );

                iterations++;
            }

            wtemp = wr;
            wr = wtemp * wpr - wi * wpi + wr;
            wi = wi * wpr + wtemp * wpi + wi;
        }
        mmax = istep;
    }

    // Restore the original RT and DS
    pd3dDevice->OMSetRenderTargets( 1, apOldRTVs, pOldDS );
    SAFE_RELEASE( apOldRTVs[0] );
    SAFE_RELEASE( pOldDS );

    return;
}