//----------------------------------------------------------------------------- // Purpose: Render current view into specified rectangle // Input : *rect - is computed by CVideoMode_Common::GetClientViewRect() //----------------------------------------------------------------------------- void CViewRender::Render( vrect_t *rect ) { Assert(s_DbgSetupOrigin == m_View.origin); Assert(s_DbgSetupAngles == m_View.angles); VPROF_BUDGET( "CViewRender::Render", "CViewRender::Render" ); tmZone( TELEMETRY_LEVEL0, TMZF_NONE, "%s", __FUNCTION__ ); vrect_t vr = *rect; // Stub out the material system if necessary. CMatStubHandler matStub; engine->EngineStats_BeginFrame(); // Assume normal vis m_bForceNoVis = false; C_BasePlayer *pPlayer = C_BasePlayer::GetLocalPlayer(); // Set for console commands, etc. render->SetMainView ( m_View.origin, m_View.angles ); for( StereoEye_t eEye = GetFirstEye(); eEye <= GetLastEye(); eEye = (StereoEye_t)(eEye+1) ) { CViewSetup &view = GetView( eEye ); #if 0 && defined( CSTRIKE_DLL ) const bool bPlayingBackReplay = g_pEngineClientReplay && g_pEngineClientReplay->IsPlayingReplayDemo(); if ( pPlayer && !bPlayingBackReplay ) { C_BasePlayer *pViewTarget = pPlayer; if ( pPlayer->IsObserver() && pPlayer->GetObserverMode() == OBS_MODE_IN_EYE ) { pViewTarget = dynamic_cast<C_BasePlayer*>( pPlayer->GetObserverTarget() ); } if ( pViewTarget ) { float targetFOV = (float)pViewTarget->m_iFOV; if ( targetFOV == 0 ) { // FOV of 0 means use the default FOV targetFOV = g_pGameRules->DefaultFOV(); } float deltaFOV = view.fov - m_flLastFOV; float FOVDirection = targetFOV - pViewTarget->m_iFOVStart; // Clamp FOV changes to stop FOV oscillation if ( ( deltaFOV < 0.0f && FOVDirection > 0.0f ) || ( deltaFOV > 0.0f && FOVDirection < 0.0f ) ) { view.fov = m_flLastFOV; } // Catch case where FOV overshoots its target FOV if ( ( view.fov < targetFOV && FOVDirection <= 0.0f ) || ( view.fov > targetFOV && FOVDirection >= 0.0f ) ) { view.fov = targetFOV; } m_flLastFOV = view.fov; } } #endif static ConVarRef sv_restrict_aspect_ratio_fov( "sv_restrict_aspect_ratio_fov" ); float aspectRatio = engine->GetScreenAspectRatio() * 0.75f; // / (4/3) float limitedAspectRatio = aspectRatio; if ( ( sv_restrict_aspect_ratio_fov.GetInt() > 0 && engine->IsWindowedMode() && gpGlobals->maxClients > 1 ) || sv_restrict_aspect_ratio_fov.GetInt() == 2 ) { limitedAspectRatio = MIN( aspectRatio, 1.85f * 0.75f ); // cap out the FOV advantage at a 1.85:1 ratio (about the widest any legit user should be) } view.fov = ScaleFOVByWidthRatio( view.fov, limitedAspectRatio ); view.fovViewmodel = ScaleFOVByWidthRatio( view.fovViewmodel, aspectRatio ); // Let the client mode hook stuff. g_pClientMode->PreRender(&view); g_pClientMode->AdjustEngineViewport( vr.x, vr.y, vr.width, vr.height ); ToolFramework_AdjustEngineViewport( vr.x, vr.y, vr.width, vr.height ); float flViewportScale = mat_viewportscale.GetFloat(); view.m_nUnscaledX = vr.x; view.m_nUnscaledY = vr.y; view.m_nUnscaledWidth = vr.width; view.m_nUnscaledHeight = vr.height; switch( eEye ) { case STEREO_EYE_MONO: { #if 0 // Good test mode for debugging viewports that are not full-size. view.width = vr.width * flViewportScale * 0.75f; view.height = vr.height * flViewportScale * 0.75f; view.x = vr.x + view.width * 0.10f; view.y = vr.y + view.height * 0.20f; #else view.x = vr.x * flViewportScale; view.y = vr.y * flViewportScale; view.width = vr.width * flViewportScale; view.height = vr.height * flViewportScale; #endif float engineAspectRatio = engine->GetScreenAspectRatio(); view.m_flAspectRatio = ( engineAspectRatio > 0.0f ) ? engineAspectRatio : ( (float)view.width / (float)view.height ); } break; case STEREO_EYE_RIGHT: case STEREO_EYE_LEFT: { g_pSourceVR->GetViewportBounds( (ISourceVirtualReality::VREye)(eEye - 1 ), &view.x, &view.y, &view.width, &view.height ); view.m_nUnscaledWidth = view.width; view.m_nUnscaledHeight = view.height; view.m_nUnscaledX = view.x; view.m_nUnscaledY = view.y; } break; default: Assert ( false ); break; } // if we still don't have an aspect ratio, compute it from the view size if( view.m_flAspectRatio <= 0.f ) view.m_flAspectRatio = (float)view.width / (float)view.height; int nClearFlags = VIEW_CLEAR_DEPTH | VIEW_CLEAR_STENCIL; if( gl_clear_randomcolor.GetBool() ) { CMatRenderContextPtr pRenderContext( materials ); pRenderContext->ClearColor3ub( rand()%256, rand()%256, rand()%256 ); pRenderContext->ClearBuffers( true, false, false ); pRenderContext->Release(); } else if ( gl_clear.GetBool() ) { nClearFlags |= VIEW_CLEAR_COLOR; } else if ( IsPosix() ) { MaterialAdapterInfo_t adapterInfo; materials->GetDisplayAdapterInfo( materials->GetCurrentAdapter(), adapterInfo ); // On Posix, on ATI, we always clear color if we're antialiasing if ( adapterInfo.m_VendorID == 0x1002 ) { if ( g_pMaterialSystem->GetCurrentConfigForVideoCard().m_nAASamples > 0 ) { nClearFlags |= VIEW_CLEAR_COLOR; } } } // Determine if we should draw view model ( client mode override ) bool drawViewModel = g_pClientMode->ShouldDrawViewModel(); if ( cl_leveloverview.GetFloat() > 0 ) { SetUpOverView(); nClearFlags |= VIEW_CLEAR_COLOR; drawViewModel = false; } // Apply any player specific overrides if ( pPlayer ) { // Override view model if necessary if ( !pPlayer->m_Local.m_bDrawViewmodel ) { drawViewModel = false; } } int flags = 0; if( eEye == STEREO_EYE_MONO || eEye == STEREO_EYE_LEFT || ( g_ClientVirtualReality.ShouldRenderHUDInWorld() ) ) { flags = RENDERVIEW_DRAWHUD; } if ( drawViewModel ) { flags |= RENDERVIEW_DRAWVIEWMODEL; } if( eEye == STEREO_EYE_RIGHT ) { // we should use the monitor view from the left eye for both eyes flags |= RENDERVIEW_SUPPRESSMONITORRENDERING; } RenderView( view, nClearFlags, flags ); if ( UseVR() ) { bool bDoUndistort = ! engine->IsTakingScreenshot(); if ( bDoUndistort ) { g_ClientVirtualReality.PostProcessFrame( eEye ); } // logic here all cloned from code in viewrender.cpp around RenderHUDQuad: // figure out if we really want to draw the HUD based on freeze cam bool bInFreezeCam = ( pPlayer && pPlayer->GetObserverMode() == OBS_MODE_FREEZECAM ); // draw the HUD after the view model so its "I'm closer" depth queues work right. if( !bInFreezeCam && g_ClientVirtualReality.ShouldRenderHUDInWorld() ) { // TODO - a bit of a shonky test - basically trying to catch the main menu, the briefing screen, the loadout screen, etc. bool bTranslucent = !g_pMatSystemSurface->IsCursorVisible(); g_ClientVirtualReality.OverlayHUDQuadWithUndistort( view, bDoUndistort, g_pClientMode->ShouldBlackoutAroundHUD(), bTranslucent ); } } } // TODO: should these be inside or outside the stereo eye stuff? g_pClientMode->PostRender(); engine->EngineStats_EndFrame(); #if !defined( _X360 ) // Stop stubbing the material system so we can see the budget panel matStub.End(); #endif // Draw all of the UI stuff "fullscreen" // (this is not health, ammo, etc. Nor is it pre-game briefing interface stuff - this is the stuff that appears when you hit Esc in-game) // In stereo mode this is rendered inside of RenderView so it goes into the render target if( !g_ClientVirtualReality.ShouldRenderHUDInWorld() ) { CViewSetup view2d; view2d.x = rect->x; view2d.y = rect->y; view2d.width = rect->width; view2d.height = rect->height; render->Push2DView( view2d, 0, NULL, GetFrustum() ); render->VGui_Paint( PAINT_UIPANELS | PAINT_CURSOR ); render->PopView( GetFrustum() ); } }
void CViewRender::WriteSaveGameScreenshotOfSize( const char *pFilename, int width, int height, bool bCreatePowerOf2Padded/*=false*/, bool bWriteVTF/*=false*/ ) { #ifndef _X360 CMatRenderContextPtr pRenderContext( materials ); pRenderContext->MatrixMode( MATERIAL_PROJECTION ); pRenderContext->PushMatrix(); pRenderContext->MatrixMode( MATERIAL_VIEW ); pRenderContext->PushMatrix(); g_bRenderingScreenshot = true; // Push back buffer on the stack with small viewport pRenderContext->PushRenderTargetAndViewport( NULL, 0, 0, width, height ); // render out to the backbuffer CViewSetup viewSetup = GetView ( STEREO_EYE_MONO ); viewSetup.x = 0; viewSetup.y = 0; viewSetup.width = width; viewSetup.height = height; viewSetup.fov = ScaleFOVByWidthRatio( viewSetup.fov, ( (float)width / (float)height ) / ( 4.0f / 3.0f ) ); viewSetup.m_bRenderToSubrectOfLargerScreen = true; // draw out the scene // Don't draw the HUD or the viewmodel RenderView( viewSetup, VIEW_CLEAR_DEPTH | VIEW_CLEAR_COLOR, 0 ); // get the data from the backbuffer and save to disk // bitmap bits unsigned char *pImage = ( unsigned char * )malloc( width * height * 3 ); // Get Bits from the material system pRenderContext->ReadPixels( 0, 0, width, height, pImage, IMAGE_FORMAT_RGB888 ); // Some stuff to be setup dependent on padded vs. not padded int nSrcWidth, nSrcHeight; unsigned char *pSrcImage; // Create a padded version if necessary unsigned char *pPaddedImage = NULL; if ( bCreatePowerOf2Padded ) { // Setup dimensions as needed int nPaddedWidth = SmallestPowerOfTwoGreaterOrEqual( width ); int nPaddedHeight = SmallestPowerOfTwoGreaterOrEqual( height ); // Allocate int nPaddedImageSize = nPaddedWidth * nPaddedHeight * 3; pPaddedImage = ( unsigned char * )malloc( nPaddedImageSize ); // Zero out the entire thing V_memset( pPaddedImage, 255, nPaddedImageSize ); // Copy over each row individually for ( int nRow = 0; nRow < height; ++nRow ) { unsigned char *pDst = pPaddedImage + 3 * ( nRow * nPaddedWidth ); const unsigned char *pSrc = pImage + 3 * ( nRow * width ); V_memcpy( pDst, pSrc, 3 * width ); } // Setup source data nSrcWidth = nPaddedWidth; nSrcHeight = nPaddedHeight; pSrcImage = pPaddedImage; } else { // Use non-padded info nSrcWidth = width; nSrcHeight = height; pSrcImage = pImage; } // allocate a buffer to write the tga into CUtlBuffer buffer; bool bWriteResult; if ( bWriteVTF ) { // Create and initialize a VTF texture IVTFTexture *pVTFTexture = CreateVTFTexture(); const int nFlags = TEXTUREFLAGS_NOMIP | TEXTUREFLAGS_NOLOD | TEXTUREFLAGS_SRGB; if ( pVTFTexture->Init( nSrcWidth, nSrcHeight, 1, IMAGE_FORMAT_RGB888, nFlags, 1, 1 ) ) { // Copy the image data over to the VTF unsigned char *pDestBits = pVTFTexture->ImageData(); int nDstSize = nSrcWidth * nSrcHeight * 3; V_memcpy( pDestBits, pSrcImage, nDstSize ); // Allocate output buffer int iMaxVTFSize = 1024 + ( nSrcWidth * nSrcHeight * 3 ); void *pVTF = malloc( iMaxVTFSize ); buffer.SetExternalBuffer( pVTF, iMaxVTFSize, 0 ); // Serialize to the buffer bWriteResult = pVTFTexture->Serialize( buffer ); // Free the VTF texture DestroyVTFTexture( pVTFTexture ); } else { bWriteResult = false; } } else { // Write TGA format to buffer int iMaxTGASize = 1024 + ( nSrcWidth * nSrcHeight * 4 ); void *pTGA = malloc( iMaxTGASize ); buffer.SetExternalBuffer( pTGA, iMaxTGASize, 0 ); bWriteResult = TGAWriter::WriteToBuffer( pSrcImage, buffer, nSrcWidth, nSrcHeight, IMAGE_FORMAT_RGB888, IMAGE_FORMAT_RGB888 ); } if ( !bWriteResult ) { Error( "Couldn't write bitmap data snapshot.\n" ); } free( pImage ); free( pPaddedImage ); // async write to disk (this will take ownership of the memory) char szPathedFileName[_MAX_PATH]; Q_snprintf( szPathedFileName, sizeof(szPathedFileName), "//MOD/%s", pFilename ); filesystem->AsyncWrite( szPathedFileName, buffer.Base(), buffer.TellPut(), true ); // restore our previous state pRenderContext->PopRenderTargetAndViewport(); pRenderContext->MatrixMode( MATERIAL_PROJECTION ); pRenderContext->PopMatrix(); pRenderContext->MatrixMode( MATERIAL_VIEW ); pRenderContext->PopMatrix(); g_bRenderingScreenshot = false; #endif }
void CViewRender::WriteSaveGameScreenshotOfSize( const char *pFilename, int width, int height ) { CMatRenderContextPtr pRenderContext( materials ); pRenderContext->MatrixMode( MATERIAL_PROJECTION ); pRenderContext->PushMatrix(); pRenderContext->MatrixMode( MATERIAL_VIEW ); pRenderContext->PushMatrix(); g_bRenderingScreenshot = true; // Push back buffer on the stack with small viewport pRenderContext->PushRenderTargetAndViewport( NULL, 0, 0, width, height ); // render out to the backbuffer CViewSetup viewSetup = m_View; viewSetup.x = 0; viewSetup.y = 0; viewSetup.width = width; viewSetup.height = height; viewSetup.fov = ScaleFOVByWidthRatio( m_View.fov, ( (float)width / (float)height ) / ( 4.0f / 3.0f ) ); viewSetup.m_bRenderToSubrectOfLargerScreen = true; // draw out the scene // Don't draw the HUD or the viewmodel RenderView( viewSetup, VIEW_CLEAR_DEPTH | VIEW_CLEAR_COLOR, 0 ); // get the data from the backbuffer and save to disk // bitmap bits unsigned char *pImage = ( unsigned char * )malloc( width * 3 * height ); // Get Bits from the material system pRenderContext->ReadPixels( 0, 0, width, height, pImage, IMAGE_FORMAT_RGB888 ); // allocate a buffer to write the tga into int iMaxTGASize = 1024 + (width * height * 4); void *pTGA = malloc( iMaxTGASize ); CUtlBuffer buffer( pTGA, iMaxTGASize ); if( !TGAWriter::WriteToBuffer( pImage, buffer, width, height, IMAGE_FORMAT_RGB888, IMAGE_FORMAT_RGB888 ) ) { Error( "Couldn't write bitmap data snapshot.\n" ); } free( pImage ); // async write to disk (this will take ownership of the memory) char szPathedFileName[_MAX_PATH]; Q_snprintf( szPathedFileName, sizeof(szPathedFileName), "//MOD/%s", pFilename ); filesystem->AsyncWrite( szPathedFileName, buffer.Base(), buffer.TellPut(), true ); // restore our previous state pRenderContext->PopRenderTargetAndViewport(); pRenderContext->MatrixMode( MATERIAL_PROJECTION ); pRenderContext->PopMatrix(); pRenderContext->MatrixMode( MATERIAL_VIEW ); pRenderContext->PopMatrix(); g_bRenderingScreenshot = false; }
//----------------------------------------------------------------------------- // Purpose: Render current view into specified rectangle // Input : *rect - //----------------------------------------------------------------------------- void CViewRender::Render( vrect_t *rect ) { Assert(s_DbgSetupOrigin == m_View.origin); Assert(s_DbgSetupAngles == m_View.angles); VPROF_BUDGET( "CViewRender::Render", "CViewRender::Render" ); vrect_t vr = *rect; // Stub out the material system if necessary. CMatStubHandler matStub; bool drawViewModel; engine->EngineStats_BeginFrame(); // Assume normal vis m_bForceNoVis = false; float aspectRatio = engine->GetScreenAspectRatio() * 0.75f; // / (4/3) m_View.fov = ScaleFOVByWidthRatio( m_View.fov, aspectRatio ); m_View.fovViewmodel = ScaleFOVByWidthRatio( m_View.fovViewmodel, aspectRatio ); // Let the client mode hook stuff. g_pClientMode->PreRender(&m_View); g_pClientMode->AdjustEngineViewport( vr.x, vr.y, vr.width, vr.height ); ToolFramework_AdjustEngineViewport( vr.x, vr.y, vr.width, vr.height ); float flViewportScale = mat_viewportscale.GetFloat(); float engineAspectRatio = engine->GetScreenAspectRatio(); m_View.x = vr.x; m_View.y = vr.y; m_View.width = vr.width * flViewportScale; m_View.height = vr.height * flViewportScale; m_View.m_flAspectRatio = ( engineAspectRatio > 0.0f ) ? engineAspectRatio : ( (float)m_View.width / (float)m_View.height ); int nClearFlags = VIEW_CLEAR_DEPTH | VIEW_CLEAR_STENCIL; if( gl_clear_randomcolor.GetBool() ) { CMatRenderContextPtr pRenderContext( materials ); pRenderContext->ClearColor3ub( rand()%256, rand()%256, rand()%256 ); pRenderContext->ClearBuffers( true, false, false ); pRenderContext->Release(); } else if ( gl_clear.GetBool() ) { nClearFlags |= VIEW_CLEAR_COLOR; } // Determine if we should draw view model ( client mode override ) drawViewModel = g_pClientMode->ShouldDrawViewModel(); if ( cl_leveloverview.GetFloat() > 0 ) { SetUpOverView(); nClearFlags |= VIEW_CLEAR_COLOR; drawViewModel = false; } // Apply any player specific overrides C_BasePlayer *pPlayer = C_BasePlayer::GetLocalPlayer(); if ( pPlayer ) { // Override view model if necessary if ( !pPlayer->m_Local.m_bDrawViewmodel ) { drawViewModel = false; } } ApplyHeadShake(&m_View); // (torbensko) render->SetMainView( m_View.origin, m_View.angles ); int flags = RENDERVIEW_DRAWHUD; if ( drawViewModel ) { flags |= RENDERVIEW_DRAWVIEWMODEL; } RenderView( m_View, nClearFlags, flags ); g_pClientMode->PostRender(); engine->EngineStats_EndFrame(); #if !defined( _X360 ) // Stop stubbing the material system so we can see the budget panel matStub.End(); #endif CViewSetup view2d; // Draw all of the UI stuff "fullscreen" view2d.x = rect->x; view2d.y = rect->y; view2d.width = rect->width; view2d.height = rect->height; render->Push2DView( view2d, 0, NULL, GetFrustum() ); render->VGui_Paint( PAINT_UIPANELS ); render->PopView( GetFrustum() ); }
//----------------------------------------------------------------------------- // Purpose: Render current view into specified rectangle // Input : *rect - //----------------------------------------------------------------------------- void CViewRender::Render( vrect_t *rect ) { /*static*/ std::vector<smCoord3f> view_head_pos; /*static*/ std::vector<smRotEuler> view_head_rot; static float learnt_x = 0.0; static float learnt_y = 0.0; static float learnt_z = fa_default_depth; static float learnt_xRot = fa_default_pitch; static float learnt_yRot = 0.0; static float learnt_zRot = 0.0; int i = 0; fa_fov_min = ( fov_desired.GetInt() + fov_fapi_window_adj_amount.GetInt() ); Assert(s_DbgSetupOrigin == m_View.origin); Assert(s_DbgSetupAngles == m_View.angles); VPROF_BUDGET( "CViewRender::Render", "CViewRender::Render" ); vrect_t vr = *rect; // Stub out the material system if necessary. CMatStubHandler matStub; bool drawViewModel; engine->EngineStats_BeginFrame(); // Assume normal vis m_bForceNoVis = false; C_BasePlayer *pPlayer = C_BasePlayer::GetLocalPlayer(); // IMPORTANT: Please acknowledge the author Torben Sko ([email protected], torbensko.com/software/head_tracking), // if you: // 1.1 Use or replicate any of the code pertaining to the utilisation of the head tracking data. // 1.2 Use any of the custom assets, including the modified crossbow and the human // character model. float aspectRatio = engine->GetScreenAspectRatio() * 0.75f; // / (4/3) if(pPlayer && pPlayer->IsAlive() && face_api.m_bFaceAPIHasCamera) { int head_pos_size = 0; if(!fa_paused) { // Warning: this code does not take parellel operations into account if(head_confidence > 0.0f) { view_head_pos.push_back(latest_head_pos); view_head_rot.push_back(latest_head_rot); } // Restore to a neutral position on loss of the head by // scaling down the last recieved head position static float lost_time = 0.0f; if(fa_lost) { if(head_confidence == 0.0f && view_head_pos.size() > 0) { if(lost_time == 0.0f) { lost_time = engine->Time(); } else if(engine->Time() > lost_time + fa_lost_pause) { smCoord3f previous_offset = view_head_pos.back(); previous_offset.x *= fa_lost_scale; previous_offset.y *= fa_lost_scale; previous_offset.z = ((previous_offset.z - learnt_z) * fa_lost_scale) + learnt_z; smRotEuler previous_rotation = view_head_rot.back(); previous_rotation.x_rads = ((previous_rotation.x_rads - learnt_xRot) * fa_lost_scale) + learnt_xRot; previous_rotation.y_rads *= fa_lost_scale; previous_rotation.z_rads *= fa_lost_scale; view_head_pos.push_back(previous_offset); view_head_rot.push_back(previous_rotation); } } else { if(lost_time > 0.0f) { /*char log[40]; sprintf(log, "lost the head for %f seconds", engine->Time() - lost_time); record(log);*/ lost_time = 0.0f; } } } // Use a while statement in case the user has decreased the // smoothing rate since last time head_pos_size = view_head_pos.size(); while( head_pos_size > fa_smoothing ) { view_head_pos.erase(view_head_pos.begin()); view_head_rot.erase(view_head_rot.begin()); } } x = 0.0f; y = 0.0f; z = 0.0f; float xRot = 0.0f; float yRot = 0.0f; float zRot = 0.0f; // Compute the smoothed head movements head_pos_size = view_head_pos.size(); if(head_pos_size > 0) { for(i = 0; i < head_pos_size; i++) { x += view_head_pos[i].x; y += view_head_pos[i].y; z += view_head_pos[i].z; xRot += view_head_rot[i].x_rads; yRot += view_head_rot[i].y_rads; zRot += view_head_rot[i].z_rads; } x /= view_head_pos.size(); y /= view_head_pos.size(); z /= view_head_pos.size(); xRot /= view_head_pos.size(); yRot /= view_head_pos.size(); zRot /= view_head_pos.size(); } // Corrects the arching that occurs when moving towards the camera if(fa_arcCorrection) y += (z - fa_default_depth) * fa_arcCorrection_scale; // Show the head data //if(fa_show_preHeadData) DevMsg(" pre: pos\tx:%f\ty:%f\tz:%f\n rot\tx:%f\ty:%f\tz:%f\n", x, y, z, xRot, yRot, zRot); // IMPORTANT: Please acknowledge the author Torben Sko ([email protected], torbensko.com/software/head_tracking), // if you: // 1.1 Use or replicate any of the code pertaining to the utilisation of the head tracking data. // 1.2 Use any of the custom assets, including the modified crossbow and the human // character model. // Learns the player's neutral position static bool reset_learning = false; if(!fa_learning) { if(reset_learning) { learnt_x = 0.0f; learnt_y = 0.0f; learnt_z = fa_default_depth; learnt_xRot = fa_default_pitch; learnt_yRot = 0.0f; learnt_zRot = 0.0f; reset_learning = true; } } else if(fa_learning && head_confidence > 0.0f && !fa_paused) { float diff, change; diff = learnt_x - x; (diff != 0.0f) ? change = (0.0000001 * fa_learning_influence) / diff : change = 0.0f; (fabs(change) < fabs(diff)) ? learnt_x -= change : learnt_x = x; x -= learnt_x; diff = learnt_y - y; (diff != 0.0f) ? change = (0.0000001 * fa_learning_influence) / diff : change = 0.0f; (fabs(change) < fabs(diff)) ? learnt_y -= change : learnt_y = y; y -= learnt_y; diff = learnt_z - z; (diff != 0.0f) ? change = (0.0000001 * fa_learning_influence) / diff : change = 0.0f; (fabs(change) < fabs(diff)) ? learnt_z -= change : learnt_z = z; z = fa_default_depth + (z - learnt_z); diff = learnt_xRot - xRot; (diff != 0.0f) ? change = (0.0000001 * fa_learning_influence) / diff : change = 0.0f; (fabs(change) < fabs(diff)) ? learnt_xRot -= change : learnt_xRot = xRot; xRot = fa_default_pitch + (xRot - learnt_xRot); diff = learnt_yRot - yRot; (diff != 0.0f) ? change = (0.0000001 * fa_learning_influence) / diff : change = 0.0f; (fabs(change) < fabs(diff)) ? learnt_yRot -= change : learnt_yRot = yRot; yRot -= learnt_yRot; diff = learnt_zRot - zRot; (diff != 0.0f) ? change = (0.0000001 * fa_learning_influence) / diff : change = 0.0f; (fabs(change) < fabs(diff)) ? learnt_zRot -= change : learnt_zRot = zRot; zRot -= learnt_zRot; reset_learning = true; } // IMPORTANT: Please acknowledge the author Torben Sko ([email protected], torbensko.com/software/head_tracking), // if you: // 1.1 Use or replicate any of the code pertaining to the utilisation of the head tracking data. // 1.2 Use any of the custom assets, including the modified crossbow and the human // character model. // Resets the tracker on low confidence static float reset_time = 0.0f; static float waiting_for_reset = 0.0f; if(fa_confidenceMinimum) { if(waiting_for_reset > 0.0f) { if(head_confidence > 0.0f) { /*char log[40]; sprintf(log, "Reset FaceAPI engine and regained head after %f seconds", engine->Time() - waiting_for_reset); record(log);*/ waiting_for_reset = 0.0f; } } else if(head_confidence < fa_confidenceMinimum_threshold && learnt_x <= fabs(fa_confidenceMinimum_widthRange) && learnt_zRot <= fabs(fa_confidenceMinimum_yollRange)) { if(reset_time == 0.0f) { reset_time = engine->Time() + fa_confidenceMinimum_timeout; } else if(engine->Time() > reset_time) { //char logMsg[256]; reset_time = 0.0f; face_api.reset(); waiting_for_reset = engine->Time(); // The learnt values were probably wrong, so reset them learnt_x = 0.0f; learnt_y = 0.0f; learnt_z = fa_default_depth; learnt_xRot = fa_default_pitch; learnt_yRot = 0.0f; learnt_zRot = 0.0f; /*sprintf(logMsg, "confidence droped below %.2f%% for %.2f seconds, whilst (learnt) head.width <= |%.2f| and (learnt) head.roll <= |%.2f|", fa_confidenceMinimum_threshold, fa_confidenceMinimum_timeout, fa_confidenceMinimum_widthRange, fa_confidenceMinimum_yollRange); record(logMsg);*/ } } else { reset_time = 0.0f; } } // IMPORTANT: Please acknowledge the author Torben Sko ([email protected], torbensko.com/software/head_tracking), // if you: // 1.1 Use or replicate any of the code pertaining to the utilisation of the head tracking data. // 1.2 Use any of the custom assets, including the modified crossbow and the human // character model. if(faceapi_mode.GetInt() > 1 && !engine->IsPaused()) { // alters the fov based user's head position float forward = fa_fov_depthScale * (z + fa_fov_depthOffset); float head_fov = fa_fov_min + (1 - fa_fov_influence) * default_fov.GetFloat() + fa_fov_influence * (2 * radToDeg(atan((fa_fov_screenWidth / 2) / (forward)))); m_View.fov = ScaleFOVByWidthRatio( head_fov, aspectRatio ); m_View.fovViewmodel = m_View.fov * fa_fov_modelViewScale; // rotate the camera based on the user's head offsets m_View.angles[YAW] += fa_camRotByHeadOff_globalScale * fa_camRotByHeadOff_yawScale * radToDeg(atan(x / z)); m_View.angles[PITCH] += fa_camRotByHeadOff_globalScale * fa_camRotByHeadOff_pitchScale * radToDeg(atan(y / z)); // offset the camera based on the user's head offsets float depth, height, width; depth = fa_camOffByHeadOff_depthScale * fa_camOffByHeadOff_globalScale * (z - fa_default_depth); m_View.origin.x -= depth * cos(degToRad(m_View.angles[YAW])); m_View.origin.y -= depth * sin(degToRad(m_View.angles[YAW])); width = fa_camOffByHeadOff_widthScale * fa_camOffByHeadOff_globalScale * x; m_View.origin.y -= width * cos(degToRad(m_View.angles[YAW])); m_View.origin.x += width * sin(degToRad(m_View.angles[YAW])); height = fa_camOffByHeadOff_heightScale * fa_camOffByHeadOff_globalScale * y; m_View.origin.z += height; // Alters the vanishing point based on the user's head offset offHor = -fa_vanish_depth * (x / z); offVert = -fa_vanish_depth * (y / z); m_View.m_bOffCenter = true; m_View.m_flOffCenterTop = 1.0f - offVert; m_View.m_flOffCenterBottom = 0.0f - offVert; m_View.m_flOffCenterLeft = 0.0f - offHor; m_View.m_flOffCenterRight = 1.0f - offHor; } else { m_View.fov = ScaleFOVByWidthRatio( m_View.fov, aspectRatio ); m_View.fovViewmodel = ScaleFOVByWidthRatio( m_View.fovViewmodel, aspectRatio ); m_View.m_bOffCenter = false; offHor = 0.0f; offVert = 0.0f; } // Show the head data //if(fa_show_postHeadData) DevMsg(" post: pos\tx:%f\ty:%f\tz:%f\n rot\tx:%f\ty:%f\tz:%f\n", x, y, z, xRot, yRot, zRot); // Show the learnt head data //if(fa_show_learntHeadData) DevMsg("learnt: pos\tx:%f\ty:%f\tz:%f\n rot\tx:%f\ty:%f\tz:%f\n", learnt_x, learnt_y, learnt_z, learnt_xRot, learnt_yRot, learnt_zRot); // IMPORTANT: Please acknowledge the author Torben Sko ([email protected], torbensko.com/software/head_tracking), // if you: // 1.1 Use or replicate any of the code pertaining to the utilisation of the head tracking data. // 1.2 Use any of the custom assets, including the modified crossbow and the human // character model. if((faceapi_mode.GetInt() == 1 || faceapi_mode.GetInt() == 3) && !engine->IsPaused()) { //float offPeer = 0.0f; float rollPeer = 0.0f; //float yawPeer = 0.0f; /*if(fa_peering_off) if(x > fa_peering_offStart) offPeer = (x - fa_peering_offStart) / (fa_peering_offEnd - fa_peering_offStart); else if(x < -fa_peering_offStart) offPeer = (x + fa_peering_offStart) / (fa_peering_offEnd - fa_peering_offStart);*/ if(fa_peering_roll) if(zRot > fa_peering_rollStart) rollPeer = -(zRot - fa_peering_rollStart) / (fa_peering_rollEnd - fa_peering_rollStart); else if(zRot < -fa_peering_rollStart) rollPeer = -(zRot + fa_peering_rollStart) / (fa_peering_rollEnd - fa_peering_rollStart); /*if(fa_peering_yaw) if(yRot > fa_peering_yawStart) yawPeer = -(yRot - fa_peering_yawStart) / (fa_peering_yawEnd - fa_peering_yawStart); else if(yRot < -fa_peering_yawStart) yawPeer = -(yRot + fa_peering_yawStart) / (fa_peering_yawEnd - fa_peering_yawStart);*/ float peer = /*offPeer + */rollPeer /*+ yawPeer*/; if(peer > 1.0f) peer = 1.0f; if(peer < -1.0f) peer = -1.0f; if(peer != 0.0f) { peer = pow(fabs(peer), fa_peering_ease) * (fabs(peer) / peer); QAngle angles = pPlayer->GetViewModel()->GetAbsAngles(); angles[PITCH] += fabs(peer) * fa_peering_gunTilt; pPlayer->GetViewModel()->SetAbsAngles(angles); m_View.angles[ROLL] += peer * fa_peering_headTilt; Vector eyes, eye_offset; eyes = pPlayer->EyePosition(); float hor_move = peer * fa_peering_size; eye_offset.y = -hor_move * cos(degToRad(m_View.angles[YAW])); eye_offset.x = hor_move * sin(degToRad(m_View.angles[YAW])); eye_offset.z = 0.0f; // Don't allow peering through walls trace_t tr; UTIL_TraceHull(eyes, eyes + eye_offset, PEER_HULL_MIN, PEER_HULL_MAX, MASK_SOLID, pPlayer, COLLISION_GROUP_NONE, &tr); eye_offset.z = -fabs(peer) * fa_peering_headLower; m_View.origin += eye_offset * tr.fraction; static float peer_right = 0.0f; if(peer_right == 0.0f && peer == 1.0f) { peer_right = engine->Time(); } else if(peer_right != 0.0f && peer != 1.0f) { /*char log[40]; sprintf(log, "peered right for %f seconds", engine->Time() - peer_right); record(log);*/ peer_right = 0.0f; } static float peer_left = 0.0f; if(peer_left == 0.0f && peer == -1.0f) { peer_left = engine->Time(); } else if(peer_left != 0.0f && peer != -1.0f) { /*char log[40]; sprintf(log, "peered left for %f seconds", engine->Time() - peer_left); record(log);*/ peer_left = 0.0f; } } } // IMPORTANT: Please acknowledge the author Torben Sko ([email protected], torbensko.com/software/head_tracking), // if you: // 1.1 Use or replicate any of the code pertaining to the utilisation of the head tracking data. // 1.2 Use any of the custom assets, including the modified crossbow and the human // character model. rotate_x = 0.0f; rotate_y = 0.0f; if(fa_plyRotByHeadRot) { if(fabs(yRot) > fa_plyRotByHeadRot_yawMin) { float n_yRot = (fabs(yRot) - fa_plyRotByHeadRot_yawMin) / (fa_plyRotByHeadRot_yawMax - fa_plyRotByHeadRot_yawMin); if(n_yRot > 1.0f) n_yRot = 1.0f; if(n_yRot > 0.0f) n_yRot = pow(n_yRot, fa_plyRotByHeadRot_ease); rotate_x = n_yRot * fa_plyRotByHeadRot_yawSpeed * (yRot / fabs(yRot)); } float off_xRot = xRot - learnt_xRot; if(fabs(off_xRot) > fa_plyRotByHeadRot_pitchMin) { float n_xRot = (fabs(off_xRot) - fa_plyRotByHeadRot_pitchMin) / (fa_plyRotByHeadRot_pitchMax - fa_plyRotByHeadRot_pitchMin); if(n_xRot > 1.0f) n_xRot = 1.0f; if(n_xRot > 0.0f) n_xRot = pow(n_xRot, fa_plyRotByHeadRot_ease); rotate_y = n_xRot * fa_plyRotByHeadRot_pitchSpeed * (off_xRot / fabs(off_xRot)); } } } else { m_View.fov = ScaleFOVByWidthRatio( m_View.fov, aspectRatio ); m_View.fovViewmodel = ScaleFOVByWidthRatio( m_View.fovViewmodel, aspectRatio ); } //m_View.fov = ScaleFOVByWidthRatio( m_View.fov, aspectRatio ); //m_View.fovViewmodel = ScaleFOVByWidthRatio( m_View.fovViewmodel, aspectRatio ); // Let the client mode hook stuff. g_pClientMode->PreRender(&m_View); g_pClientMode->AdjustEngineViewport( vr.x, vr.y, vr.width, vr.height ); ToolFramework_AdjustEngineViewport( vr.x, vr.y, vr.width, vr.height ); float flViewportScale = mat_viewportscale.GetFloat(); float engineAspectRatio = engine->GetScreenAspectRatio(); m_View.x = vr.x; m_View.y = vr.y; m_View.width = vr.width * flViewportScale; m_View.height = vr.height * flViewportScale; m_View.m_flAspectRatio = ( engineAspectRatio > 0.0f ) ? engineAspectRatio : ( (float)m_View.width / (float)m_View.height ); int nClearFlags = VIEW_CLEAR_DEPTH | VIEW_CLEAR_STENCIL; if( gl_clear_randomcolor.GetBool() ) { CMatRenderContextPtr pRenderContext( materials ); pRenderContext->ClearColor3ub( rand()%256, rand()%256, rand()%256 ); pRenderContext->ClearBuffers( true, false, false ); pRenderContext->Release(); } else if ( gl_clear.GetBool() ) { nClearFlags |= VIEW_CLEAR_COLOR; } // Determine if we should draw view model ( client mode override ) drawViewModel = g_pClientMode->ShouldDrawViewModel(); if ( cl_leveloverview.GetFloat() > 0 ) { SetUpOverView(); nClearFlags |= VIEW_CLEAR_COLOR; drawViewModel = false; } // Apply any player specific overrides //C_BasePlayer *pPlayer = C_BasePlayer::GetLocalPlayer(); if ( pPlayer ) { // Override view model if necessary if ( !pPlayer->m_Local.m_bDrawViewmodel ) { drawViewModel = false; } } if(fa_weapon) drawViewModel = false; render->SetMainView( m_View.origin, m_View.angles ); int flags = RENDERVIEW_DRAWHUD; if ( drawViewModel ) { flags |= RENDERVIEW_DRAWVIEWMODEL; } RenderView( m_View, nClearFlags, flags ); g_pClientMode->PostRender(); engine->EngineStats_EndFrame(); #if !defined( _X360 ) // Stop stubbing the material system so we can see the budget panel matStub.End(); #endif CViewSetup view2d; // Draw all of the UI stuff "fullscreen" view2d.x = rect->x; view2d.y = rect->y; view2d.width = rect->width; view2d.height = rect->height; render->Push2DView( view2d, 0, NULL, GetFrustum() ); render->VGui_Paint( PAINT_UIPANELS ); render->PopView( GetFrustum() ); }
void CViewRender::Render( vrect_t *rect ) { VPROF_BUDGET( "CViewRender::Render", "CViewRender::Render" ); m_bAllowViewAccess = true; CUtlVector< vgui::Panel * > roots; VGui_GetPanelList( roots ); // Stub out the material system if necessary. CMatStubHandler matStub; engine->EngineStats_BeginFrame(); // Assume normal vis m_bForceNoVis = false; float flViewportScale = mat_viewportscale.GetFloat(); vrect_t engineRect = *rect; // The tool framework wants to adjust the entire 3d viewport, not the per-split screen one from below ToolFramework_AdjustEngineViewport( engineRect.x, engineRect.y, engineRect.width, engineRect.height ); IterateRemoteSplitScreenViewSlots_Push( true ); FOR_EACH_VALID_SPLITSCREEN_PLAYER( hh ) { ACTIVE_SPLITSCREEN_PLAYER_GUARD_VGUI( hh ); CViewSetup &view = GetView( hh ); float engineAspectRatio = engine->GetScreenAspectRatio( view.width, view.height ); Assert( s_DbgSetupOrigin[ hh ] == view.origin ); Assert( s_DbgSetupAngles[ hh ] == view.angles ); // Using this API gives us a chance to "inset" the 3d views as needed for splitscreen int insetX, insetY; VGui_GetEngineRenderBounds( hh, view.x, view.y, view.width, view.height, insetX, insetY ); float aspectRatio = engineAspectRatio * 0.75f; // / (4/3) view.fov = ScaleFOVByWidthRatio( view.fov, aspectRatio ); view.fovViewmodel = ScaleFOVByWidthRatio( view.fovViewmodel, aspectRatio ); // Let the client mode hook stuff. GetClientMode()->PreRender( &view ); GetClientMode()->AdjustEngineViewport( view.x, view.y, view.width, view.height ); view.width *= flViewportScale; view.height *= flViewportScale; if ( IsX360() ) { // view must be compliant to resolve restrictions view.width = AlignValue( view.width, GPU_RESOLVE_ALIGNMENT ); view.height = AlignValue( view.height, GPU_RESOLVE_ALIGNMENT ); } view.m_flAspectRatio = ( engineAspectRatio > 0.0f ) ? engineAspectRatio : ( (float)view.width / (float)view.height ); int nClearFlags = VIEW_CLEAR_DEPTH | VIEW_CLEAR_STENCIL; if ( gl_clear_randomcolor.GetBool() ) { CMatRenderContextPtr pRenderContext( materials ); pRenderContext->ClearColor3ub( rand()%256, rand()%256, rand()%256 ); pRenderContext->ClearBuffers( true, false, false ); pRenderContext->Release(); } else if ( gl_clear.GetBool() ) { nClearFlags |= VIEW_CLEAR_COLOR; } // Determine if we should draw view model ( client mode override ) bool drawViewModel = GetClientMode()->ShouldDrawViewModel(); // Apply any player specific overrides C_BasePlayer *pPlayer = C_BasePlayer::GetLocalPlayer(); if ( pPlayer ) { // Override view model if necessary if ( !pPlayer->m_Local.m_bDrawViewmodel ) { drawViewModel = false; } } if ( cl_leveloverview.GetFloat() > 0 ) { SetUpOverView(); nClearFlags |= VIEW_CLEAR_COLOR; drawViewModel = false; } render->SetMainView( view.origin, view.angles ); int flags = (pPlayer == NULL) ? 0 : RENDERVIEW_DRAWHUD; if ( drawViewModel ) { flags |= RENDERVIEW_DRAWVIEWMODEL; } // This is the hook for per-split screen player views C_BaseEntity::PreRenderEntities( hh ); if ( ( ss_debug_draw_player.GetInt() < 0 ) || ( hh == ss_debug_draw_player.GetInt() ) ) { CViewSetup hudViewSetup; VGui_GetHudBounds( hh, hudViewSetup.x, hudViewSetup.y, hudViewSetup.width, hudViewSetup.height ); RenderView( view, hudViewSetup, nClearFlags, flags ); } GetClientMode()->PostRender(); } IterateRemoteSplitScreenViewSlots_Pop(); engine->EngineStats_EndFrame(); #if !defined( _X360 ) // Stop stubbing the material system so we can see the budget panel matStub.End(); #endif // Render the new-style embedded UI // TODO: when embedded UI will be used for HUD, we will need it to maintain // a separate screen for HUD and a separate screen stack for pause menu & main menu. // for now only render embedded UI in pause menu & main menu #if defined( GAMEUI_UISYSTEM2_ENABLED ) && 0 BaseModUI::CBaseModPanel *pBaseModPanel = BaseModUI::CBaseModPanel::GetSingletonPtr(); // render the new-style embedded UI only if base mod panel is not visible (game-hud) // otherwise base mod panel will render the embedded UI on top of video/productscreen if ( !pBaseModPanel || !pBaseModPanel->IsVisible() ) { Rect_t uiViewport; uiViewport.x = rect->x; uiViewport.y = rect->y; uiViewport.width = rect->width; uiViewport.height = rect->height; g_pGameUIGameSystem->Render( uiViewport, gpGlobals->curtime ); } #endif // Draw all of the UI stuff "fullscreen" if ( true ) // For PIXEVENT { #if PIX_ENABLE { CMatRenderContextPtr pRenderContext( materials ); PIXEVENT( pRenderContext, "VGui UI" ); } #endif CViewSetup view2d; view2d.x = rect->x; view2d.y = rect->y; view2d.width = rect->width; view2d.height = rect->height; render->Push2DView( view2d, 0, NULL, GetFrustum() ); render->VGui_Paint( PAINT_UIPANELS ); { // The engine here is trying to access CurrentView() etc. which is bogus ACTIVE_SPLITSCREEN_PLAYER_GUARD( 0 ); render->PopView( GetFrustum() ); } } m_bAllowViewAccess = false; }
bool RMCompat::PrecacheView (iView* view) { return RenderView (view); }
void CharacterCast(void) { Word Enemy,count, cycle; Word up; state_t *StatePtr; /* reload level and set things up */ gamestate.mapon = 0; /* First level again */ PrepPlayLoop(); /* Prepare the system */ viewx = actors[0].x; /* Mark the starting x,y */ viewy = actors[0].y; topspritescale = 32*2; /* go through the cast */ Enemy = 0; cycle = 0; do { StatePtr = &states[caststate[Enemy]]; /* Init the state pointer */ count = 1; /* Force a fall through on first pass */ up = FALSE; for (;;) { if (++cycle>=60*4) { /* Time up? */ cycle = 0; /* Reset the clock */ if (++Enemy>=NUMCAST) { /* Next bad guy */ Enemy = 0; /* Reset the bad guy */ } break; } if (!--count) { count = StatePtr->tictime; StatePtr = &states[StatePtr->next]; } topspritenum = StatePtr->shapenum; /* Set the formost shape # */ RenderView(); /* Show the 3d view */ WaitTicks(1); /* Limit to 15 frames a second */ ReadSystemJoystick(); /* Read the joystick */ if (!joystick1 && !up) { up = TRUE; continue; } if (!up) { continue; } if (!joystick1) { continue; } if (joystick1 & (JOYPAD_START|JOYPAD_A|JOYPAD_B|JOYPAD_X|JOYPAD_Y)) { Enemy = NUMCAST; break; } if ( (joystick1 & (JOYPAD_TL|JOYPAD_LFT)) && Enemy >0) { Enemy--; break; } if ( (joystick1 & (JOYPAD_TR|JOYPAD_RGT)) && Enemy <NUMCAST-1) { Enemy++; break; } } } while (Enemy < NUMCAST); /* Still able to show */ StopSong(); /* Stop the music */ FadeToBlack(); /* Fade out */ }
int main(int argc, char* argv[]) { // Parse the command line int delayMilliSeconds = 0; int realParams = 0; for (int i = 1; i < argc; i++) { if (argv[i][0] == '-') { Usage(argv[0]); } else switch (++realParams) { case 1: delayMilliSeconds = atoi(argv[i]); break; default: Usage(argv[0]); } } if (realParams != 1) { Usage(argv[0]); } // Get an OSVR client context to use to access the devices // that we need. osvr::clientkit::ClientContext context( "com.osvr.renderManager.openGLExample"); // Construct button devices and connect them to a callback // that will set the "quit" variable to true when it is // pressed. Use button "1" on the left-hand or // right-hand controller. osvr::clientkit::Interface leftButton1 = context.getInterface("/controller/left/1"); leftButton1.registerCallback(&myButtonCallback, &quit); osvr::clientkit::Interface rightButton1 = context.getInterface("/controller/right/1"); rightButton1.registerCallback(&myButtonCallback, &quit); // Open Direct3D and set up the context for rendering to // an HMD. Do this using the OSVR RenderManager interface, // which maps to the nVidia or other vendor direct mode // to reduce the latency. osvr::renderkit::RenderManager* render = osvr::renderkit::createRenderManager(context.get(), "OpenGL"); if ((render == nullptr) || (!render->doingOkay())) { std::cerr << "Could not create RenderManager" << std::endl; return 1; } // Set up a handler to cause us to exit cleanly. #ifdef _WIN32 SetConsoleCtrlHandler((PHANDLER_ROUTINE)CtrlHandler, TRUE); #endif // Open the display and make sure this worked. osvr::renderkit::RenderManager::OpenResults ret = render->OpenDisplay(); if (ret.status == osvr::renderkit::RenderManager::OpenStatus::FAILURE) { std::cerr << "Could not open display" << std::endl; delete render; return 2; } // Set up the rendering state we need. if (!SetupRendering(ret.library)) { return 3; } // Do a call to get the information we need to construct our // color and depth render-to-texture buffers. std::vector<osvr::renderkit::RenderInfo> renderInfo; context.update(); renderInfo = render->GetRenderInfo(); std::vector<osvr::renderkit::RenderBuffer> colorBuffers; std::vector<GLuint> depthBuffers; //< Depth/stencil buffers to render into // Construct the buffers we're going to need for our render-to-texture // code. GLuint frameBuffer; //< Groups a color buffer and a depth buffer glGenFramebuffers(1, &frameBuffer); glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer); for (size_t i = 0; i < renderInfo.size(); i++) { // The color buffer for this eye. We need to put this into // a generic structure for the Present function, but we only need // to fill in the OpenGL portion. // Note that this must be used to generate a RenderBuffer, not just // a texture, if we want to be able to present it to be rendered // via Direct3D for DirectMode. This is selected based on the // config file value, so we want to be sure to use the more general // case. // Note that this texture format must be RGBA and unsigned byte, // so that we can present it to Direct3D for DirectMode GLuint colorBufferName = 0; glGenRenderbuffers(1, &colorBufferName); osvr::renderkit::RenderBuffer rb; rb.OpenGL = new osvr::renderkit::RenderBufferOpenGL; rb.OpenGL->colorBufferName = colorBufferName; colorBuffers.push_back(rb); // "Bind" the newly created texture : all future texture // functions will modify this texture glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, colorBufferName); // Determine the appropriate size for the frame buffer to be used for // this eye. int width = static_cast<int>(renderInfo[i].viewport.width); int height = static_cast<int>(renderInfo[i].viewport.height); // Give an empty image to OpenGL ( the last "0" means "empty" ) glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); // Bilinear filtering glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // The depth buffer GLuint depthrenderbuffer; glGenRenderbuffers(1, &depthrenderbuffer); glBindRenderbuffer(GL_RENDERBUFFER, depthrenderbuffer); glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, width, height); depthBuffers.push_back(depthrenderbuffer); } // Register our constructed buffers so that we can use them for // presentation. if (!render->RegisterRenderBuffers(colorBuffers)) { std::cerr << "RegisterRenderBuffers() returned false, cannot continue" << std::endl; quit = true; } // Continue rendering until it is time to quit. while (!quit) { // Update the context so we get our callbacks called and // update tracker state. context.update(); renderInfo = render->GetRenderInfo(); // Render into each buffer using the specified information. for (size_t i = 0; i < renderInfo.size(); i++) { RenderView(renderInfo[i], frameBuffer, colorBuffers[i].OpenGL->colorBufferName, depthBuffers[i]); } // Delay the requested length of time. // Busy-wait so we don't get swapped out longer than we wanted. auto end = std::chrono::high_resolution_clock::now() + std::chrono::milliseconds(delayMilliSeconds); do { } while (std::chrono::high_resolution_clock::now() < end); // Send the rendered results to the screen if (!render->PresentRenderBuffers(colorBuffers, renderInfo)) { std::cerr << "PresentRenderBuffers() returned false, maybe because " "it was asked to quit" << std::endl; quit = true; } } // Clean up after ourselves. glDeleteFramebuffers(1, &frameBuffer); for (size_t i = 0; i < renderInfo.size(); i++) { glDeleteTextures(1, &colorBuffers[i].OpenGL->colorBufferName); delete colorBuffers[i].OpenGL; glDeleteRenderbuffers(1, &depthBuffers[i]); } // Close the Renderer interface cleanly. delete render; return 0; }
int main(int argc, char* argv[]) { // Get an OSVR client context to use to access the devices // that we need. osvr::clientkit::ClientContext context( "com.osvr.renderManager.openGLExample"); // Construct button devices and connect them to a callback // that will set the "quit" variable to true when it is // pressed. Use button "1" on the left-hand or // right-hand controller. osvr::clientkit::Interface leftButton1 = context.getInterface("/controller/left/1"); leftButton1.registerCallback(&myButtonCallback, &quit); osvr::clientkit::Interface rightButton1 = context.getInterface("/controller/right/1"); rightButton1.registerCallback(&myButtonCallback, &quit); // Open OpenGL and set up the context for rendering to // an HMD. Do this using the OSVR RenderManager interface, // which maps to the nVidia or other vendor direct mode // to reduce the latency. OSVR_GraphicsLibraryOpenGL library; library.toolkit = nullptr; OSVR_RenderManager render; OSVR_RenderManagerOpenGL renderOGL; if (OSVR_RETURN_SUCCESS != osvrCreateRenderManagerOpenGL( context.get(), "OpenGL", library, &render, &renderOGL)) { std::cerr << "Could not create the RenderManager" << std::endl; return 1; } // Set up a handler to cause us to exit cleanly. #ifdef _WIN32 SetConsoleCtrlHandler((PHANDLER_ROUTINE)CtrlHandler, TRUE); #endif // Open the display and make sure this worked. OSVR_OpenResultsOpenGL openResults; if ((OSVR_RETURN_SUCCESS != osvrRenderManagerOpenDisplayOpenGL( renderOGL, &openResults)) || (openResults.status == OSVR_OPEN_STATUS_FAILURE)) { std::cerr << "Could not open display" << std::endl; delete render; return 2; } // Set up the rendering state we need. if (!SetupRendering()) { return 3; } // Do a call to get the information we need to construct our // color and depth render-to-texture buffers. context.update(); OSVR_RenderParams renderParams; osvrRenderManagerGetDefaultRenderParams(&renderParams); OSVR_RenderInfoCollection renderInfoCollection; if ((OSVR_RETURN_SUCCESS != osvrRenderManagerGetRenderInfoCollection( render, renderParams, &renderInfoCollection))) { std::cerr << "Could not get render info" << std::endl; return 5; } OSVR_RenderInfoCount numRenderInfo; osvrRenderManagerGetNumRenderInfoInCollection(renderInfoCollection, &numRenderInfo); std::vector<OSVR_RenderBufferOpenGL> colorBuffers; std::vector<GLuint> depthBuffers; //< Depth/stencil buffers to render into // Construct the buffers we're going to need for our render-to-texture // code. GLuint frameBuffer; //< Groups a color buffer and a depth buffer glGenFramebuffers(1, &frameBuffer); glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer); OSVR_RenderManagerRegisterBufferState registerBufferState; if ((OSVR_RETURN_SUCCESS != osvrRenderManagerStartRegisterRenderBuffers( ®isterBufferState))) { std::cerr << "Could not start registering render buffers" << std::endl; return -4; } for (size_t i = 0; i < numRenderInfo; i++) { // Get the current render info OSVR_RenderInfoOpenGL renderInfo = { 0 }; if (OSVR_RETURN_SUCCESS != osvrRenderManagerGetRenderInfoFromCollectionOpenGL( renderInfoCollection, i, &renderInfo)) { std::cerr << "Could not get render info " << i << std::endl; return 1; } // The color buffer for this eye. We need to put this into // a generic structure for the Present function, but we only need // to fill in the OpenGL portion. // Note that this must be used to generate a RenderBuffer, not just // a texture, if we want to be able to present it to be rendered // via Direct3D for DirectMode. This is selected based on the // config file value, so we want to be sure to use the more general // case. // Note that this texture format must be RGBA and unsigned byte, // so that we can present it to Direct3D for DirectMode GLuint colorBufferName = 0; glGenRenderbuffers(1, &colorBufferName); OSVR_RenderBufferOpenGL rb; rb.colorBufferName = colorBufferName; colorBuffers.push_back(rb); // "Bind" the newly created texture : all future texture // functions will modify this texture glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, colorBufferName); // Determine the appropriate size for the frame buffer to be used for // this eye. int width = static_cast<int>(renderInfo.viewport.width); int height = static_cast<int>(renderInfo.viewport.height); // Give an empty image to OpenGL ( the last "0" means "empty" ) glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); // Bilinear filtering glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // The depth buffer GLuint depthrenderbuffer; glGenRenderbuffers(1, &depthrenderbuffer); glBindRenderbuffer(GL_RENDERBUFFER, depthrenderbuffer); glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, width, height); depthBuffers.push_back(depthrenderbuffer); if (OSVR_RETURN_SUCCESS != osvrRenderManagerRegisterRenderBufferOpenGL( registerBufferState, rb)) { std::cerr << "Could not register render buffer " << i << std::endl; return -5; } } // Register our constructed buffers so that we can use them for // presentation. if ((OSVR_RETURN_SUCCESS != osvrRenderManagerFinishRegisterRenderBuffers( render, registerBufferState, false))) { std::cerr << "Could not start finish registering render buffers" << std::endl; quit = true; } // Continue rendering until it is time to quit. while (!quit) { // Update the context so we get our callbacks called and // update tracker state. context.update(); //renderInfo = render->GetRenderInfo(); OSVR_RenderInfoCollection renderInfoCollection = { 0 }; if (OSVR_RETURN_SUCCESS != osvrRenderManagerGetRenderInfoCollection( render, renderParams, &renderInfoCollection)) { std::cerr << "Could not get render info in the main loop" << std::endl; return -1; } osvrRenderManagerGetNumRenderInfoInCollection(renderInfoCollection, &numRenderInfo); // Render into each buffer using the specified information. for (size_t i = 0; i < numRenderInfo; i++) { OSVR_RenderInfoOpenGL renderInfo = { 0 }; osvrRenderManagerGetRenderInfoFromCollectionOpenGL( renderInfoCollection, i, &renderInfo); RenderView(renderInfo, frameBuffer, colorBuffers[i].colorBufferName, depthBuffers[i]); } OSVR_RenderManagerPresentState presentState; if ((OSVR_RETURN_SUCCESS != osvrRenderManagerStartPresentRenderBuffers( &presentState))) { std::cerr << "Could not start presenting render buffers" << std::endl; return 201; } OSVR_ViewportDescription fullView; fullView.left = fullView.lower = 0; fullView.width = fullView.height = 1; for (size_t i = 0; i < numRenderInfo; i++) { OSVR_RenderInfoOpenGL renderInfo = { 0 }; osvrRenderManagerGetRenderInfoFromCollectionOpenGL( renderInfoCollection, i, &renderInfo); if ((OSVR_RETURN_SUCCESS != osvrRenderManagerPresentRenderBufferOpenGL( presentState, colorBuffers[i], renderInfo, fullView))) { std::cerr << "Could not present render buffer " << i << std::endl; return 202; } } if ((OSVR_RETURN_SUCCESS != osvrRenderManagerFinishPresentRenderBuffers( render, presentState, renderParams, false))) { std::cerr << "Could not finish presenting render buffers" << std::endl; quit = true; } } // Clean up after ourselves. glDeleteFramebuffers(1, &frameBuffer); for (size_t i = 0; i < colorBuffers.size(); i++) { glDeleteTextures(1, &colorBuffers[i].colorBufferName); glDeleteRenderbuffers(1, &depthBuffers[i]); } // Close the Renderer interface cleanly. osvrDestroyRenderManager(render); return 0; }
int WINAPI WinMain(HINSTANCE hThisInst, HINSTANCE hPrevInst, LPSTR lpszArgs, int nWinMode) { HWND hwnd; MSG msg; HDC hdc; char buffer[20]; // Un-comment these variables if a frame rate is to be calculated // clock_t start, finish; // double duration; // Obtain paramaters from command line arguments char height_file[40]=""; char colour_file[40]=""; int resolution; sscanf(lpszArgs, "%s %s %d %d %d %d", colour_file, height_file, &MAP_HEIGHT, &MAP_WIDTH, &MAX_STEPS, &resolution); // Set screen resolution based on command line argument switch(resolution) { case _320x240: SCREEN_WIDTH = 320; SCREEN_HEIGHT = 240; break; case _640x480: SCREEN_WIDTH = 640; SCREEN_HEIGHT = 480; break; case _800x600: SCREEN_WIDTH = 800; SCREEN_HEIGHT = 600; break; case _1024x768: SCREEN_WIDTH = 1024; SCREEN_HEIGHT = 768; break; case _1280x1024: SCREEN_WIDTH = 1280; SCREEN_HEIGHT = 1024; break; default: return 0; } // Check Map Size argument values if (!MAP_HEIGHT || !MAP_WIDTH) return 0; // Allocate Memory for Height and Colour Maps heights = new int*[MAP_HEIGHT]; colours = new UCHAR*[MAP_HEIGHT]; for (int index = 0; index < MAP_HEIGHT; index++ ) heights[index] = new int[MAP_WIDTH], colours[index] = new UCHAR[MAP_WIDTH]; // Set initial map heights to zero for ( int i = 0; i < MAP_HEIGHT; i++) for ( int j = 0; j < MAP_WIDTH; j++) heights[i][j] = 0; // Load in Colour Map if(strlen(colour_file)==0) return 0; else LoadBitmap(colour_file); // Load in Height Map if(strlen(height_file)==0) return 0; else LoadDEM(height_file); // Set up initial viewing paramters HALF_SCREEN_WIDTH = SCREEN_WIDTH / 2; ANGLE_RANGE = (SCREEN_WIDTH * 360 / FOV ); pitch = -(SCREEN_HEIGHT / 2); toRadians = 2.0 * PIE / (double) ANGLE_RANGE; // Initialise Application Window if ( !(hwnd = InitWindows(hThisInst, hPrevInst, nWinMode, hwnd)) ) return 0; // Initialise DirectDraw if (!DD_Init(hwnd)) { DestroyWindow(hwnd); return 0; } // Enter main loop // 1. Deal with windows events // 2. Do asynchronous processing while (1) { // Check windows messages if(PeekMessage(&msg,NULL,0,0,PM_REMOVE)) { if(msg.message == WM_QUIT) { DD_Shutdown(); break; } TranslateMessage(&msg); DispatchMessage(&msg); } else { // Main Routines // Obtain pointer to display buffer memset(&ddsd,0,sizeof(ddsd)); ddsd.dwSize = sizeof(ddsd); while ( lpddsback->Lock(NULL,&ddsd,DDLOCK_SURFACEMEMORYPTR,NULL) != DD_OK); video_buffer = (UCHAR *) ddsd.lpSurface; // Clear screen memset(video_buffer,250,SCREEN_WIDTH*SCREEN_HEIGHT); // start = clock(); // Render current view of landscape RenderView(); // finish = clock(); /* Un-comment this section if screen shots are wanted To save a screenshot press F12 if (KEY_DOWN(VK_F12)) { SaveScreen(); } */ // Release pointer to display memory lpddsback->Unlock(NULL); // Flip back surface to front while (TRUE) { ddrval = lpddsprimary->Flip(NULL, 0); if (ddrval == DD_OK) break; if (ddrval == DDERR_SURFACELOST) { ddrval = lpddsprimary->Restore(); if (ddrval != DD_OK) break; } if (ddrval != DDERR_WASSTILLDRAWING) break; } // Check to see if user pressed Escape if (KEY_DOWN(VK_ESCAPE)) { // Shut down DirectDraw DD_Shutdown(); // Send Quit event to Windows event handler PostMessage(main_window_handle,WM_CLOSE,0,0); } } // Un-comment these for frame rates // buffer contains the current frame rate // duration = (double) (finish - start); // sprintf(buffer,"%2.4f", (1000.0 / (float)(finish - start))); } // We should never get here return(msg.wParam); }
void CAM_Render(CEntity *pen, CDrawPort *pdp) { if( cam_bRecord) { if (!_bInitialized) { _bInitialized = TRUE; SetSpeed(1.0f); _fStartTime = _pTimer->CurrentTick(); } FLOATmatrix3D m; MakeRotationMatrixFast(m, _cp.cp_aRot); FLOAT3D vX, vY, vZ; vX(1) = m(1,1); vX(2) = m(2,1); vX(3) = m(3,1); vY(1) = m(1,2); vY(2) = m(2,2); vY(3) = m(3,2); vZ(1) = m(1,3); vZ(2) = m(2,3); vZ(3) = m(3,3); _cp.cp_aRot(1)-=_pInput->GetAxisValue(MOUSE_X_AXIS)*0.5f; _cp.cp_aRot(2)-=_pInput->GetAxisValue(MOUSE_Y_AXIS)*0.5f; if( cam_bMoveForward) { _cp.cp_vPos -= vZ *cam_fSpeed; }; if( cam_bMoveBackward) { _cp.cp_vPos += vZ *cam_fSpeed; }; if( cam_bMoveLeft) { _cp.cp_vPos -= vX *cam_fSpeed; }; if( cam_bMoveRight) { _cp.cp_vPos += vX *cam_fSpeed; }; if( cam_bMoveUp) { _cp.cp_vPos += vY *cam_fSpeed; }; if( cam_bMoveDown) { _cp.cp_vPos -= vY *cam_fSpeed; }; if( cam_bTurnBankingLeft) { _cp.cp_aRot(3) += 10.0f; }; if( cam_bTurnBankingRight) { _cp.cp_aRot(3) -= 10.0f; }; if( cam_bZoomIn) { _cp.cp_aFOV -= 1.0f; }; if( cam_bZoomOut) { _cp.cp_aFOV += 1.0f; }; if( cam_bZoomDefault) { _cp.cp_aFOV = 90.0f; }; Clamp( _cp.cp_aFOV, 10.0f, 150.0f); if( cam_bResetToPlayer) { _cp.cp_vPos = pen->GetPlacement().pl_PositionVector; _cp.cp_aRot = pen->GetPlacement().pl_OrientationAngle; } if( cam_bSnapshot) { cam_bSnapshot = FALSE; WritePos(_cp); } } else { if (!_bInitialized) { _bInitialized = TRUE; ReadPos(_cp0); ReadPos(_cp1); SetSpeed(_cp0.cp_fSpeed); _fStartTime = _pTimer->CurrentTick(); } TIME tmNow = _pTimer->GetLerpedCurrentTick()-_fStartTime; if (tmNow>_cp1.cp_tmTick) { _cp0 = _cp1; ReadPos(_cp1); SetSpeed(_cp0.cp_fSpeed); } FLOAT fRatio = (tmNow-_cp0.cp_tmTick)/(_cp1.cp_tmTick-_cp0.cp_tmTick); _cp.cp_vPos = Lerp(_cp0.cp_vPos, _cp1.cp_vPos, fRatio); _cp.cp_aRot = Lerp(_cp0.cp_aRot, _cp1.cp_aRot, fRatio); _cp.cp_aFOV = Lerp(_cp0.cp_aFOV, _cp1.cp_aFOV, fRatio); } CPlacement3D plCamera; plCamera.pl_PositionVector = _cp.cp_vPos; plCamera.pl_OrientationAngle = _cp.cp_aRot; // init projection parameters CPerspectiveProjection3D prPerspectiveProjection; prPerspectiveProjection.FOVL() = _cp.cp_aFOV; prPerspectiveProjection.ScreenBBoxL() = FLOATaabbox2D( FLOAT2D(0.0f, 0.0f), FLOAT2D((float)pdp->GetWidth(), (float)pdp->GetHeight()) ); prPerspectiveProjection.AspectRatioL() = 1.0f; prPerspectiveProjection.FrontClipDistanceL() = 0.3f; CAnyProjection3D prProjection; prProjection = prPerspectiveProjection; // set up viewer position prProjection->ViewerPlacementL() = plCamera; // render the view RenderView(*pen->en_pwoWorld, *(CEntity*)NULL, prProjection, *pdp); }
int main(int argc, char* argv[]) { // Get an OSVR client context to use to access the devices // that we need. osvr::clientkit::ClientContext context( "com.osvr.renderManager.openGLExample"); // Construct button devices and connect them to a callback // that will set the "quit" variable to true when it is // pressed. Use button "1" on the left-hand or // right-hand controller. osvr::clientkit::Interface leftButton1 = context.getInterface("/controller/left/1"); leftButton1.registerCallback(&myButtonCallback, &quit); osvr::clientkit::Interface rightButton1 = context.getInterface("/controller/right/1"); rightButton1.registerCallback(&myButtonCallback, &quit); // Use SDL to open a window and then get an OpenGL context for us. // Note: This window is not the one that will be used for rendering // the OSVR display, but one that will be cleared to a slowly-changing // constant color so we can see that we're able to render to both // contexts. if (!osvr::renderkit::SDLInitQuit()) { std::cerr << "Could not initialize SDL" << std::endl; return 100; } SDL_Window *myWindow = SDL_CreateWindow( "Test window, not used", 30, 30, 300, 100, SDL_WINDOW_OPENGL | SDL_WINDOW_RESIZABLE | SDL_WINDOW_SHOWN); if (myWindow == nullptr) { std::cerr << "SDL window open failed: Could not get window" << std::endl; return 101; } SDL_GLContext myGLContext; myGLContext = SDL_GL_CreateContext(myWindow); if (myGLContext == 0) { std::cerr << "RenderManagerOpenGL::addOpenGLContext: Could not get " "OpenGL context" << std::endl; return 102; } // Open OpenGL and set up the context for rendering to // an HMD. Do this using the OSVR RenderManager interface, // which maps to the nVidia or other vendor direct mode // to reduce the latency. osvr::renderkit::RenderManager* render = osvr::renderkit::createRenderManager(context.get(), "OpenGL"); if ((render == nullptr) || (!render->doingOkay())) { std::cerr << "Could not create RenderManager" << std::endl; return 1; } // Set up a handler to cause us to exit cleanly. #ifdef _WIN32 SetConsoleCtrlHandler((PHANDLER_ROUTINE)CtrlHandler, TRUE); #endif // Open the display and make sure this worked. osvr::renderkit::RenderManager::OpenResults ret = render->OpenDisplay(); if (ret.status == osvr::renderkit::RenderManager::OpenStatus::FAILURE) { std::cerr << "Could not open display" << std::endl; delete render; return 2; } // Set up the rendering state we need. if (!SetupRendering(ret.library)) { return 3; } // Do a call to get the information we need to construct our // color and depth render-to-texture buffers. std::vector<osvr::renderkit::RenderInfo> renderInfo; context.update(); renderInfo = render->GetRenderInfo(); std::vector<osvr::renderkit::RenderBuffer> colorBuffers; std::vector<GLuint> depthBuffers; //< Depth/stencil buffers to render into // Initialize the textures with our window's context open, // so that they will be associated with it. SDL_GL_MakeCurrent(myWindow, myGLContext); // Construct the buffers we're going to need for our render-to-texture // code. GLuint frameBuffer; //< Groups a color buffer and a depth buffer glGenFramebuffers(1, &frameBuffer); glBindFramebuffer(GL_FRAMEBUFFER, frameBuffer); for (size_t i = 0; i < renderInfo.size(); i++) { // The color buffer for this eye. We need to put this into // a generic structure for the Present function, but we only need // to fill in the OpenGL portion. // Note that this must be used to generate a RenderBuffer, not just // a texture, if we want to be able to present it to be rendered // via Direct3D for DirectMode. This is selected based on the // config file value, so we want to be sure to use the more general // case. // Note that this texture format must be RGBA and unsigned byte, // so that we can present it to Direct3D for DirectMode GLuint colorBufferName = 0; glGenTextures(1, &colorBufferName); osvr::renderkit::RenderBuffer rb; rb.OpenGL = new osvr::renderkit::RenderBufferOpenGL; rb.OpenGL->colorBufferName = colorBufferName; colorBuffers.push_back(rb); // "Bind" the newly created texture : all future texture // functions will modify this texture glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, colorBufferName); // Determine the appropriate size for the frame buffer to be used for // this eye. int width = static_cast<int>(renderInfo[i].viewport.width); int height = static_cast<int>(renderInfo[i].viewport.height); // Give an empty image to OpenGL ( the last "0" means "empty" ) // Note that whether or not the second GL_RGBA is turned into // GL_BGRA, the first one should remain GL_RGBA -- it is specifying // the size. If the second is changed to GL_RGB or GL_BGR, then // the first should become GL_RGB. glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0); // Bilinear filtering glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); // The depth buffer GLuint depthrenderbuffer; glGenRenderbuffers(1, &depthrenderbuffer); glBindRenderbuffer(GL_RENDERBUFFER, depthrenderbuffer); glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, width, height); depthBuffers.push_back(depthrenderbuffer); } // Register our constructed buffers so that we can use them for // presentation. if (!render->RegisterRenderBuffers(colorBuffers)) { std::cerr << "RegisterRenderBuffers() returned false, cannot continue" << std::endl; quit = true; } // Continue rendering until it is time to quit. while (!quit) { // Update the context so we get our callbacks called and // update tracker state. context.update(); renderInfo = render->GetRenderInfo(); // Render into each buffer using the specified information. for (size_t i = 0; i < renderInfo.size(); i++) { RenderView(renderInfo[i], frameBuffer, colorBuffers[i].OpenGL->colorBufferName, depthBuffers[i]); } // Send the rendered results to the screen if (!render->PresentRenderBuffers(colorBuffers, renderInfo)) { std::cerr << "PresentRenderBuffers() returned false, maybe because " "it was asked to quit" << std::endl; quit = true; } // Draw something in our window, just looping the background color // Render to the standard framebuffer in our own window // Because we bind a different frame buffer in our draw routine, we // need to put this back here. SDL_GL_MakeCurrent(myWindow, myGLContext); glBindFramebuffer(GL_FRAMEBUFFER, 0); static GLfloat bg = 0; glViewport(static_cast<GLint>(0), static_cast<GLint>(0), static_cast<GLint>(300), static_cast<GLint>(100)); glClearColor(bg, bg, bg, 1.0f); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); SDL_GL_SwapWindow(myWindow); bg += 0.003f; if (bg > 1) { bg = 0; } } // Clean up after ourselves. glDeleteFramebuffers(1, &frameBuffer); for (size_t i = 0; i < renderInfo.size(); i++) { glDeleteTextures(1, &colorBuffers[i].OpenGL->colorBufferName); delete colorBuffers[i].OpenGL; glDeleteRenderbuffers(1, &depthBuffers[i]); } // Close the Renderer interface cleanly. delete render; return 0; }