bool VertexManager::ReserveConstantStorage() { static constexpr u32 reserve_size = static_cast<u32>(std::max({sizeof(PixelShaderConstants), sizeof(VertexShaderConstants), sizeof(GeometryShaderConstants)})); if (m_uniform_stream_buffer.ReserveMemory(reserve_size, D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)) { return true; } // The only places that call constant updates are safe to have state restored. WARN_LOG(VIDEO, "Executing command list while waiting for space in uniform buffer"); Renderer::GetInstance()->ExecuteCommandList(false); // Since we are on a new command buffer, all constants have been invalidated, and we need // to reupload them. We may as well do this now, since we're issuing a draw anyway. UploadAllConstants(); return false; }
bool StateTracker::ReserveConstantStorage() { // Since we invalidate all constants on command buffer execution, it doesn't matter if this // causes the stream buffer to be resized. if (m_uniform_stream_buffer->ReserveMemory(m_uniform_buffer_reserve_size, g_vulkan_context->GetUniformBufferAlignment(), false, false, false)) { return true; } // The only places that call constant updates are safe to have state restored. WARN_LOG(VIDEO, "Executing command buffer while waiting for space in uniform buffer"); Util::ExecuteCurrentCommandsAndRestoreState(false); // Since we are on a new command buffer, all constants have been invalidated, and we need // to reupload them. We may as well do this now, since we're issuing a draw anyway. UploadAllConstants(); return false; }
bool VertexManager::Initialize() { if (!m_vertex_stream_buffer.AllocateBuffer(VERTEX_STREAM_BUFFER_SIZE) || !m_index_stream_buffer.AllocateBuffer(INDEX_STREAM_BUFFER_SIZE) || !m_uniform_stream_buffer.AllocateBuffer(UNIFORM_STREAM_BUFFER_SIZE) || !m_texel_stream_buffer.AllocateBuffer(TEXEL_STREAM_BUFFER_SIZE)) { PanicAlert("Failed to allocate streaming buffers"); return false; } static constexpr std::array<std::pair<TexelBufferFormat, DXGI_FORMAT>, NUM_TEXEL_BUFFER_FORMATS> format_mapping = {{ {TEXEL_BUFFER_FORMAT_R8_UINT, DXGI_FORMAT_R8_UINT}, {TEXEL_BUFFER_FORMAT_R16_UINT, DXGI_FORMAT_R16_UINT}, {TEXEL_BUFFER_FORMAT_RGBA8_UINT, DXGI_FORMAT_R8G8B8A8_UINT}, {TEXEL_BUFFER_FORMAT_R32G32_UINT, DXGI_FORMAT_R32G32_UINT}, }}; for (const auto& it : format_mapping) { DescriptorHandle& dh = m_texel_buffer_views[it.first]; if (!g_dx_context->GetDescriptorHeapManager().Allocate(&dh)) { PanicAlert("Failed to allocate descriptor for texel buffer"); return false; } D3D12_SHADER_RESOURCE_VIEW_DESC srv_desc = {it.second, D3D12_SRV_DIMENSION_BUFFER, D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING}; srv_desc.Buffer.NumElements = m_texel_stream_buffer.GetSize() / GetTexelBufferElementSize(it.first); g_dx_context->GetDevice()->CreateShaderResourceView(m_texel_stream_buffer.GetBuffer(), &srv_desc, dh.cpu_handle); } UploadAllConstants(); return true; }
bool StateTracker::Initialize() { // Set some sensible defaults m_pipeline_state.rasterization_state.cull_mode = VK_CULL_MODE_NONE; m_pipeline_state.rasterization_state.per_sample_shading = VK_FALSE; m_pipeline_state.rasterization_state.depth_clamp = VK_FALSE; m_pipeline_state.depth_stencil_state.test_enable = VK_TRUE; m_pipeline_state.depth_stencil_state.write_enable = VK_TRUE; m_pipeline_state.depth_stencil_state.compare_op = VK_COMPARE_OP_LESS; m_pipeline_state.blend_state.blend_enable = VK_FALSE; m_pipeline_state.blend_state.blend_op = VK_BLEND_OP_ADD; m_pipeline_state.blend_state.src_blend = VK_BLEND_FACTOR_ONE; m_pipeline_state.blend_state.dst_blend = VK_BLEND_FACTOR_ZERO; m_pipeline_state.blend_state.alpha_blend_op = VK_BLEND_OP_ADD; m_pipeline_state.blend_state.src_alpha_blend = VK_BLEND_FACTOR_ONE; m_pipeline_state.blend_state.dst_alpha_blend = VK_BLEND_FACTOR_ZERO; m_pipeline_state.blend_state.logic_op_enable = VK_FALSE; m_pipeline_state.blend_state.logic_op = VK_LOGIC_OP_CLEAR; m_pipeline_state.blend_state.write_mask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; // Enable depth clamping if supported by driver. if (g_ActiveConfig.backend_info.bSupportsDepthClamp) m_pipeline_state.rasterization_state.depth_clamp = VK_TRUE; // BBox is disabled by default. m_pipeline_state.pipeline_layout = g_object_cache->GetPipelineLayout(PIPELINE_LAYOUT_STANDARD); m_num_active_descriptor_sets = NUM_GX_DRAW_DESCRIPTOR_SETS; m_bbox_enabled = false; // Initialize all samplers to point by default for (size_t i = 0; i < NUM_PIXEL_SHADER_SAMPLERS; i++) { m_bindings.ps_samplers[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; m_bindings.ps_samplers[i].imageView = VK_NULL_HANDLE; m_bindings.ps_samplers[i].sampler = g_object_cache->GetPointSampler(); } // Create the streaming uniform buffer m_uniform_stream_buffer = StreamBuffer::Create(VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, INITIAL_UNIFORM_STREAM_BUFFER_SIZE, MAXIMUM_UNIFORM_STREAM_BUFFER_SIZE); if (!m_uniform_stream_buffer) { PanicAlert("Failed to create uniform stream buffer"); return false; } // The validation layer complains if max(offsets) + max(ubo_ranges) >= ubo_size. // To work around this we reserve the maximum buffer size at all times, but only commit // as many bytes as we use. m_uniform_buffer_reserve_size = sizeof(PixelShaderConstants); m_uniform_buffer_reserve_size = Common::AlignUp(m_uniform_buffer_reserve_size, g_vulkan_context->GetUniformBufferAlignment()) + sizeof(VertexShaderConstants); m_uniform_buffer_reserve_size = Common::AlignUp(m_uniform_buffer_reserve_size, g_vulkan_context->GetUniformBufferAlignment()) + sizeof(GeometryShaderConstants); // Default dirty flags include all descriptors InvalidateDescriptorSets(); SetPendingRebind(); // Set default constants UploadAllConstants(); return true; }