void VertexManager::PrepareDrawBuffers(u32 stride) { D3D11_MAPPED_SUBRESOURCE map; u32 vertexBufferSize = u32(s_pCurBufferPointer - s_pBaseBufferPointer); u32 indexBufferSize = IndexGenerator::GetIndexLen() * sizeof(u16); u32 totalBufferSize = vertexBufferSize + indexBufferSize; u32 cursor = m_bufferCursor; u32 padding = cursor % stride; if (padding) { cursor += stride - padding; } D3D11_MAP MapType = D3D11_MAP_WRITE_NO_OVERWRITE; if (cursor + totalBufferSize >= MAX_BUFFER_SIZE) { // Wrap around m_currentBuffer = (m_currentBuffer + 1) % MAX_BUFFER_COUNT; cursor = 0; MapType = D3D11_MAP_WRITE_DISCARD; } m_vertexDrawOffset = cursor; m_indexDrawOffset = cursor + vertexBufferSize; D3D::context->Map(m_buffers[m_currentBuffer].get(), 0, MapType, 0, &map); u8* mappedData = reinterpret_cast<u8*>(map.pData); memcpy(mappedData + m_vertexDrawOffset, s_pBaseBufferPointer, vertexBufferSize); memcpy(mappedData + m_indexDrawOffset, m_index_buffer_start, indexBufferSize); D3D::context->Unmap(m_buffers[m_currentBuffer].get(), 0); m_bufferCursor = cursor + totalBufferSize; ADDSTAT(stats.thisFrame.bytesVertexStreamed, vertexBufferSize); ADDSTAT(stats.thisFrame.bytesIndexStreamed, indexBufferSize); }
void VertexManager::PrepareDrawBuffers(u32 stride) { u32 vertex_data_size = IndexGenerator::GetNumVerts() * stride; u32 index_data_size = IndexGenerator::GetIndexLen() * sizeof(u16); s_baseVertex = s_vertexBuffer->Stream(vertex_data_size, stride, m_cpu_v_buffer.data()) / stride; s_index_offset = s_indexBuffer->Stream(index_data_size, m_cpu_i_buffer.data()); ADDSTAT(stats.thisFrame.bytesVertexStreamed, vertex_data_size); ADDSTAT(stats.thisFrame.bytesIndexStreamed, index_data_size); }
void VertexManager::PrepareDrawBuffers(u32 stride) { u32 vertexBufferSize = u32(s_pCurBufferPointer - s_pBaseBufferPointer); s_lastIndexWriteSize = IndexGenerator::GetIndexLen() * sizeof(u16); m_vertexDrawOffset = (u32) (s_pCurBufferPointerBeforeWrite - s_pBaseBufferPointer); m_indexDrawOffset = (u32) (s_pIndexBufferPointer - (u8*) m_indexBufferData); ADDSTAT(stats.thisFrame.bytesVertexStreamed, vertexBufferSize); ADDSTAT(stats.thisFrame.bytesIndexStreamed, s_lastIndexWriteSize); }
void VertexManager::PrepareDrawBuffers(u32 stride) { u32 vertex_data_size = IndexGenerator::GetNumVerts() * stride; u32 index_data_size = IndexGenerator::GetIndexLen() * sizeof(u16); m_vertex_stream_buffer->OverrideSizeOfPreviousAllocation(vertex_data_size); m_index_stream_buffer->OverrideSizeOfPreviousAllocation(index_data_size); ADDSTAT(stats.thisFrame.bytesVertexStreamed, vertex_data_size); ADDSTAT(stats.thisFrame.bytesIndexStreamed, index_data_size); }
void VertexManager::PrepareDrawBuffers(u32 stride) { u32 vertex_data_size = IndexGenerator::GetNumVerts() * stride; u32 index_data_size = IndexGenerator::GetIndexLen() * sizeof(u16); s_vertexBuffer->Unmap(vertex_data_size); s_indexBuffer->Unmap(index_data_size); ADDSTAT(stats.thisFrame.bytesVertexStreamed, vertex_data_size); ADDSTAT(stats.thisFrame.bytesIndexStreamed, index_data_size); }
void VertexManager::PrepareDrawBuffers(u32 stride) { size_t vertex_data_size = IndexGenerator::GetNumVerts() * stride; size_t index_data_size = IndexGenerator::GetIndexLen() * sizeof(u16); m_vertex_stream_buffer->CommitMemory(vertex_data_size); m_index_stream_buffer->CommitMemory(index_data_size); ADDSTAT(stats.thisFrame.bytesVertexStreamed, static_cast<int>(vertex_data_size)); ADDSTAT(stats.thisFrame.bytesIndexStreamed, static_cast<int>(index_data_size)); StateTracker::GetInstance()->SetVertexBuffer(m_vertex_stream_buffer->GetBuffer(), 0); StateTracker::GetInstance()->SetIndexBuffer(m_index_stream_buffer->GetBuffer(), 0, VK_INDEX_TYPE_UINT16); }
void VertexManager::CommitBuffer(u32 num_vertices, u32 vertex_stride, u32 num_indices, u32* out_base_vertex, u32* out_base_index) { u32 vertex_data_size = num_vertices * vertex_stride; u32 index_data_size = num_indices * sizeof(u16); *out_base_vertex = vertex_stride > 0 ? (m_vertex_buffer->GetCurrentOffset() / vertex_stride) : 0; *out_base_index = m_index_buffer->GetCurrentOffset() / sizeof(u16); CheckBufferBinding(); m_vertex_buffer->Unmap(vertex_data_size); m_index_buffer->Unmap(index_data_size); ADDSTAT(stats.thisFrame.bytesVertexStreamed, vertex_data_size); ADDSTAT(stats.thisFrame.bytesIndexStreamed, index_data_size); }
bool VertexManager::UploadTexelBuffer(const void* data, u32 data_size, TexelBufferFormat format, u32* out_offset) { if (data_size > m_texel_stream_buffer.GetSize()) return false; const u32 elem_size = GetTexelBufferElementSize(format); if (!m_texel_stream_buffer.ReserveMemory(data_size, elem_size)) { // Try submitting cmdbuffer. WARN_LOG(VIDEO, "Submitting command buffer while waiting for space in texel buffer"); Renderer::GetInstance()->ExecuteCommandList(false); if (!m_texel_stream_buffer.ReserveMemory(data_size, elem_size)) { PanicAlert("Failed to allocate %u bytes from texel buffer", data_size); return false; } } std::memcpy(m_texel_stream_buffer.GetCurrentHostPointer(), data, data_size); *out_offset = static_cast<u32>(m_texel_stream_buffer.GetCurrentOffset()) / elem_size; m_texel_stream_buffer.CommitMemory(data_size); ADDSTAT(stats.thisFrame.bytesUniformStreamed, data_size); Renderer::GetInstance()->SetTextureDescriptor(0, m_texel_buffer_views[format].cpu_handle); return true; }
void ProgramShaderCache::UploadConstants() { if (PixelShaderManager::IsDirty() || VertexShaderManager::IsDirty() || GeometryShaderManager::IsDirty()) { auto buffer = s_buffer->Map(s_ubo_buffer_size, s_ubo_align); u8* dst = buffer.first; size_t pixel_buffer_size = PixelShaderManager::ConstantBufferSize * sizeof(float); memcpy(buffer.first, PixelShaderManager::GetBuffer(), pixel_buffer_size); dst += ROUND_UP(pixel_buffer_size, s_ubo_align); size_t vertex_buffer_size = VertexShaderManager::ConstantBufferSize * sizeof(float); memcpy(dst, VertexShaderManager::GetBuffer(), vertex_buffer_size); dst += ROUND_UP(vertex_buffer_size, s_ubo_align); memcpy(dst, &GeometryShaderManager::constants, sizeof(GeometryShaderConstants)); s_buffer->Unmap(s_ubo_buffer_size); glBindBufferRange(GL_UNIFORM_BUFFER, 1, s_buffer->m_buffer, buffer.second, pixel_buffer_size); glBindBufferRange(GL_UNIFORM_BUFFER, 2, s_buffer->m_buffer, buffer.second + ROUND_UP(pixel_buffer_size, s_ubo_align), vertex_buffer_size); glBindBufferRange(GL_UNIFORM_BUFFER, 3, s_buffer->m_buffer, buffer.second + ROUND_UP(pixel_buffer_size, s_ubo_align) + ROUND_UP(vertex_buffer_size, s_ubo_align), sizeof(GeometryShaderConstants)); PixelShaderManager::Clear(); VertexShaderManager::Clear(); GeometryShaderManager::Clear(); ADDSTAT(stats.thisFrame.bytesUniformStreamed, s_ubo_buffer_size); } }
void ProgramShaderCache::UploadConstants() { if (PixelShaderManager::dirty || VertexShaderManager::dirty || GeometryShaderManager::dirty) { auto buffer = s_buffer->Map(s_ubo_buffer_size, s_ubo_align); memcpy(buffer.first, &PixelShaderManager::constants, sizeof(PixelShaderConstants)); memcpy(buffer.first + ROUND_UP(sizeof(PixelShaderConstants), s_ubo_align), &VertexShaderManager::constants, sizeof(VertexShaderConstants)); memcpy(buffer.first + ROUND_UP(sizeof(PixelShaderConstants), s_ubo_align) + ROUND_UP(sizeof(VertexShaderConstants), s_ubo_align), &GeometryShaderManager::constants, sizeof(GeometryShaderConstants)); s_buffer->Unmap(s_ubo_buffer_size); glBindBufferRange(GL_UNIFORM_BUFFER, 1, s_buffer->m_buffer, buffer.second, sizeof(PixelShaderConstants)); glBindBufferRange(GL_UNIFORM_BUFFER, 2, s_buffer->m_buffer, buffer.second + ROUND_UP(sizeof(PixelShaderConstants), s_ubo_align), sizeof(VertexShaderConstants)); glBindBufferRange(GL_UNIFORM_BUFFER, 3, s_buffer->m_buffer, buffer.second + ROUND_UP(sizeof(PixelShaderConstants), s_ubo_align) + ROUND_UP(sizeof(VertexShaderConstants), s_ubo_align), sizeof(GeometryShaderConstants)); PixelShaderManager::dirty = false; VertexShaderManager::dirty = false; GeometryShaderManager::dirty = false; ADDSTAT(stats.thisFrame.bytesUniformStreamed, s_ubo_buffer_size); } }
void GeometryShaderCache::GetConstantBuffer12() { if (GeometryShaderManager::dirty) { currentGscbuf12 = (currentGscbuf12 + 1) % gscbuf12Slots; memcpy((u8*)gscbuf12data + gscbuf12paddedSize * currentGscbuf12, &GeometryShaderManager::constants, sizeof(GeometryShaderConstants)); GeometryShaderManager::dirty = false; ADDSTAT(stats.thisFrame.bytesUniformStreamed, sizeof(GeometryShaderConstants)); D3D::commandListMgr->dirtyGSCBV = true; } if (D3D::commandListMgr->dirtyGSCBV) { D3D::currentCommandList->SetGraphicsRootConstantBufferView( DESCRIPTOR_TABLE_GS_CBV, gscbuf12->GetGPUVirtualAddress() + gscbuf12paddedSize * currentGscbuf12 ); D3D::commandListMgr->dirtyGSCBV = false; } }
bool ShaderConstantsManager::LoadAndSetPixelShaderConstants() { bool command_list_executed = false; if (PixelShaderManager::dirty) { command_list_executed = s_shader_constant_stream_buffers[SHADER_STAGE_PIXEL_SHADER]->AllocateSpaceInBuffer( s_shader_constant_buffer_padded_sizes[SHADER_STAGE_PIXEL_SHADER], 0 // The padded sizes are already aligned to 256 bytes, so don't need to worry about manually aligning offset. ); memcpy( s_shader_constant_stream_buffers[SHADER_STAGE_PIXEL_SHADER]->GetCPUAddressOfCurrentAllocation(), &PixelShaderManager::constants, sizeof(PixelShaderConstants)); PixelShaderManager::dirty = false; ADDSTAT(stats.thisFrame.bytesUniformStreamed, sizeof(PixelShaderConstants)); D3D::command_list_mgr->SetCommandListDirtyState(COMMAND_LIST_STATE_PS_CBV, true); } if (D3D::command_list_mgr->GetCommandListDirtyState(COMMAND_LIST_STATE_PS_CBV)) { D3D::current_command_list->SetGraphicsRootConstantBufferView( DESCRIPTOR_TABLE_PS_CBVONE, s_shader_constant_stream_buffers[SHADER_STAGE_PIXEL_SHADER]->GetGPUAddressOfCurrentAllocation() ); D3D::command_list_mgr->SetCommandListDirtyState(COMMAND_LIST_STATE_PS_CBV, false); } return command_list_executed; }
void VertexManager::UpdatePixelShaderConstants() { if (!PixelShaderManager::dirty || !ReserveConstantStorage()) return; Renderer::GetInstance()->SetConstantBuffer(0, m_uniform_stream_buffer.GetCurrentGPUPointer()); std::memcpy(m_uniform_stream_buffer.GetCurrentHostPointer(), &PixelShaderManager::constants, sizeof(PixelShaderConstants)); m_uniform_stream_buffer.CommitMemory(sizeof(PixelShaderConstants)); ADDSTAT(stats.thisFrame.bytesUniformStreamed, sizeof(PixelShaderConstants)); PixelShaderManager::dirty = false; }
void VertexManager::CommitBuffer(u32 num_vertices, u32 vertex_stride, u32 num_indices, u32* out_base_vertex, u32* out_base_index) { const u32 vertex_data_size = num_vertices * vertex_stride; const u32 index_data_size = num_indices * sizeof(u16); *out_base_vertex = vertex_stride > 0 ? (m_vertex_stream_buffer.GetCurrentOffset() / vertex_stride) : 0; *out_base_index = m_index_stream_buffer.GetCurrentOffset() / sizeof(u16); m_vertex_stream_buffer.CommitMemory(vertex_data_size); m_index_stream_buffer.CommitMemory(index_data_size); ADDSTAT(stats.thisFrame.bytesVertexStreamed, static_cast<int>(vertex_data_size)); ADDSTAT(stats.thisFrame.bytesIndexStreamed, static_cast<int>(index_data_size)); Renderer::GetInstance()->SetVertexBuffer(m_vertex_stream_buffer.GetGPUPointer(), vertex_stride, m_vertex_stream_buffer.GetSize()); Renderer::GetInstance()->SetIndexBuffer(m_index_stream_buffer.GetGPUPointer(), m_index_stream_buffer.GetSize(), DXGI_FORMAT_R16_UINT); }
void ProgramShaderCache::UploadConstants() { if(s_ubo_dirty) { s_buffer->Alloc(s_ubo_buffer_size); size_t offset = s_buffer->Upload(s_ubo_buffer, s_ubo_buffer_size); glBindBufferRange(GL_UNIFORM_BUFFER, 1, s_buffer->getBuffer(), offset, s_ps_data_size); glBindBufferRange(GL_UNIFORM_BUFFER, 2, s_buffer->getBuffer(), offset + s_vs_data_offset, s_vs_data_size); s_ubo_dirty = false; ADDSTAT(stats.thisFrame.bytesUniformStreamed, s_ubo_buffer_size); } }
void VertexManager::PrepareDrawBuffers() { D3D11_MAPPED_SUBRESOURCE map; UINT vSize = UINT(s_pCurBufferPointer - s_pBaseBufferPointer); D3D11_MAP MapType = D3D11_MAP_WRITE_NO_OVERWRITE; if (m_vertex_buffer_cursor + vSize >= VBUFFER_SIZE) { // Wrap around m_current_vertex_buffer = (m_current_vertex_buffer + 1) % MAX_VBUFFER_COUNT; m_vertex_buffer_cursor = 0; MapType = D3D11_MAP_WRITE_DISCARD; } D3D::context->Map(m_vertex_buffers[m_current_vertex_buffer], 0, MapType, 0, &map); memcpy((u8*)map.pData + m_vertex_buffer_cursor, s_pBaseBufferPointer, vSize); D3D::context->Unmap(m_vertex_buffers[m_current_vertex_buffer], 0); m_vertex_draw_offset = m_vertex_buffer_cursor; m_vertex_buffer_cursor += vSize; UINT iCount = IndexGenerator::GetIndexLen(); MapType = D3D11_MAP_WRITE_NO_OVERWRITE; if (m_index_buffer_cursor + iCount >= (IBUFFER_SIZE / sizeof(u16))) { // Wrap around m_current_index_buffer = (m_current_index_buffer + 1) % MAX_VBUFFER_COUNT; m_index_buffer_cursor = 0; MapType = D3D11_MAP_WRITE_DISCARD; } D3D::context->Map(m_index_buffers[m_current_index_buffer], 0, MapType, 0, &map); memcpy((u16*)map.pData + m_index_buffer_cursor, GetIndexBuffer(), sizeof(u16) * IndexGenerator::GetIndexLen()); D3D::context->Unmap(m_index_buffers[m_current_index_buffer], 0); m_index_draw_offset = m_index_buffer_cursor; m_index_buffer_cursor += iCount; ADDSTAT(stats.thisFrame.bytesVertexStreamed, vSize); ADDSTAT(stats.thisFrame.bytesIndexStreamed, iCount*sizeof(u16)); }
void VertexManager::UploadUtilityUniforms(const void* data, u32 data_size) { InvalidateConstants(); if (!m_uniform_stream_buffer.ReserveMemory(data_size, D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)) { WARN_LOG(VIDEO, "Executing command buffer while waiting for ext space in uniform buffer"); Renderer::GetInstance()->ExecuteCommandList(false); } Renderer::GetInstance()->SetConstantBuffer(0, m_uniform_stream_buffer.GetCurrentGPUPointer()); Renderer::GetInstance()->SetConstantBuffer(1, m_uniform_stream_buffer.GetCurrentGPUPointer()); Renderer::GetInstance()->SetConstantBuffer(2, m_uniform_stream_buffer.GetCurrentGPUPointer()); std::memcpy(m_uniform_stream_buffer.GetCurrentHostPointer(), data, data_size); m_uniform_stream_buffer.CommitMemory(data_size); ADDSTAT(stats.thisFrame.bytesUniformStreamed, data_size); }
void VertexManager::UploadAllConstants() { // We are free to re-use parts of the buffer now since we're uploading all constants. const u32 pixel_constants_offset = 0; const u32 vertex_constants_offset = Common::AlignUp(pixel_constants_offset + sizeof(PixelShaderConstants), D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT); const u32 geometry_constants_offset = Common::AlignUp(vertex_constants_offset + sizeof(VertexShaderConstants), D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT); const u32 allocation_size = geometry_constants_offset + sizeof(GeometryShaderConstants); // Allocate everything at once. // We should only be here if the buffer was full and a command buffer was submitted anyway. if (!m_uniform_stream_buffer.ReserveMemory(allocation_size, D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT)) { PanicAlert("Failed to allocate space for constants in streaming buffer"); return; } // Update bindings Renderer::GetInstance()->SetConstantBuffer(0, m_uniform_stream_buffer.GetCurrentGPUPointer() + pixel_constants_offset); Renderer::GetInstance()->SetConstantBuffer(1, m_uniform_stream_buffer.GetCurrentGPUPointer() + vertex_constants_offset); Renderer::GetInstance()->SetConstantBuffer(2, m_uniform_stream_buffer.GetCurrentGPUPointer() + geometry_constants_offset); // Copy the actual data in std::memcpy(m_uniform_stream_buffer.GetCurrentHostPointer() + pixel_constants_offset, &PixelShaderManager::constants, sizeof(PixelShaderConstants)); std::memcpy(m_uniform_stream_buffer.GetCurrentHostPointer() + vertex_constants_offset, &VertexShaderManager::constants, sizeof(VertexShaderConstants)); std::memcpy(m_uniform_stream_buffer.GetCurrentHostPointer() + geometry_constants_offset, &GeometryShaderManager::constants, sizeof(GeometryShaderConstants)); // Finally, flush buffer memory after copying m_uniform_stream_buffer.CommitMemory(allocation_size); ADDSTAT(stats.thisFrame.bytesUniformStreamed, allocation_size); // Clear dirty flags VertexShaderManager::dirty = false; GeometryShaderManager::dirty = false; PixelShaderManager::dirty = false; }
bool ShaderConstantsManager::LoadAndSetVertexShaderConstants() { bool command_list_executed = false; if (VertexShaderManager::dirty) { command_list_executed = s_shader_constant_stream_buffers[SHADER_STAGE_VERTEX_SHADER]->AllocateSpaceInBuffer( s_shader_constant_buffer_padded_sizes[SHADER_STAGE_VERTEX_SHADER], 0 // The padded sizes are already aligned to 256 bytes, so don't need to worry about manually aligning offset. ); memcpy( s_shader_constant_stream_buffers[SHADER_STAGE_VERTEX_SHADER]->GetCPUAddressOfCurrentAllocation(), &VertexShaderManager::constants, sizeof(VertexShaderConstants)); VertexShaderManager::dirty = false; ADDSTAT(stats.thisFrame.bytesUniformStreamed, sizeof(VertexShaderConstants)); D3D::command_list_mgr->SetCommandListDirtyState(COMMAND_LIST_STATE_VS_CBV, true); } if (D3D::command_list_mgr->GetCommandListDirtyState(COMMAND_LIST_STATE_VS_CBV)) { const D3D12_GPU_VIRTUAL_ADDRESS calculated_gpu_va = s_shader_constant_stream_buffers[SHADER_STAGE_VERTEX_SHADER]->GetGPUAddressOfCurrentAllocation(); D3D::current_command_list->SetGraphicsRootConstantBufferView( DESCRIPTOR_TABLE_VS_CBV, calculated_gpu_va ); if (g_ActiveConfig.bEnablePixelLighting) D3D::current_command_list->SetGraphicsRootConstantBufferView( DESCRIPTOR_TABLE_PS_CBVTWO, calculated_gpu_va ); D3D::command_list_mgr->SetCommandListDirtyState(COMMAND_LIST_STATE_VS_CBV, false); } return command_list_executed; }
int RunVertices(int vtx_attr_group, int primitive, int count, DataReader src, bool skip_drawing, bool is_preprocess) { if (!count) return 0; VertexLoaderBase* loader = RefreshLoader(vtx_attr_group, is_preprocess); int size = count * loader->m_VertexSize; if ((int)src.size() < size) return -1; if (skip_drawing || is_preprocess) return size; // If the native vertex format changed, force a flush. if (loader->m_native_vertex_format != s_current_vtx_fmt || loader->m_native_components != g_current_components) { VertexManagerBase::Flush(); } s_current_vtx_fmt = loader->m_native_vertex_format; g_current_components = loader->m_native_components; // if cull mode is CULL_ALL, tell VertexManager to skip triangles and quads. // They still need to go through vertex loading, because we need to calculate a zfreeze refrence // slope. bool cullall = (bpmem.genMode.cullmode == GenMode::CULL_ALL && primitive < 5); DataReader dst = VertexManagerBase::PrepareForAdditionalData( primitive, count, loader->m_native_vtx_decl.stride, cullall); count = loader->RunVertices(src, dst, count); IndexGenerator::AddIndices(primitive, count); VertexManagerBase::FlushData(count, loader->m_native_vtx_decl.stride); ADDSTAT(stats.thisFrame.numPrims, count); INCSTAT(stats.thisFrame.numPrimitiveJoins); return size; }
bool RunVertices(int vtx_attr_group, int primitive, int count, size_t buf_size, bool skip_drawing) { if (!count) return true; CPState* state = &g_main_cp_state; VertexLoader* loader = RefreshLoader(vtx_attr_group, state); size_t size = count * loader->GetVertexSize(); if (buf_size < size) return false; if (skip_drawing || (bpmem.genMode.cullmode == GenMode::CULL_ALL && primitive < 5)) { // if cull mode is CULL_ALL, ignore triangles and quads DataSkip((u32)size); return true; } NativeVertexFormat* native = loader->GetNativeVertexFormat(); // If the native vertex format changed, force a flush. if (native != s_current_vtx_fmt) VertexManager::Flush(); s_current_vtx_fmt = native; VertexManager::PrepareForAdditionalData(primitive, count, loader->GetNativeVertexDeclaration().stride); loader->RunVertices(state->vtx_attr[vtx_attr_group], primitive, count); IndexGenerator::AddIndices(primitive, count); ADDSTAT(stats.thisFrame.numPrims, count); INCSTAT(stats.thisFrame.numPrimitiveJoins); return true; }
void StateTracker::UpdateGeometryShaderConstants() { // Skip updating geometry shader constants if it's not in use. if (m_pipeline_state.gs == VK_NULL_HANDLE) { // However, if the buffer has changed, we can't skip the update, because then we'll // try to include the now non-existant buffer in the descriptor set. if (m_uniform_stream_buffer->GetBuffer() == m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_GS].buffer) { return; } GeometryShaderManager::Dirty(); } if (!GeometryShaderManager::IsDirty() || !ReserveConstantStorage()) return; // Buffer allocation changed? if (m_uniform_stream_buffer->GetBuffer() != m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_GS].buffer) { m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_GS].buffer = m_uniform_stream_buffer->GetBuffer(); m_dirty_flags |= DIRTY_FLAG_GS_UBO; } m_bindings.uniform_buffer_offsets[UBO_DESCRIPTOR_SET_BINDING_GS] = static_cast<uint32_t>(m_uniform_stream_buffer->GetCurrentOffset()); m_dirty_flags |= DIRTY_FLAG_DYNAMIC_OFFSETS; memcpy(m_uniform_stream_buffer->GetCurrentHostPointer(), &GeometryShaderManager::constants, sizeof(GeometryShaderConstants)); ADDSTAT(stats.thisFrame.bytesUniformStreamed, sizeof(GeometryShaderConstants)); m_uniform_stream_buffer->CommitMemory(sizeof(GeometryShaderConstants)); GeometryShaderManager::Clear(); }
bool ConvertVertices(VertexLoaderParameters ¶meters, u32 &readsize, u32 &writesize) { if (parameters.needloaderrefresh) { UpdateLoader(parameters); } auto loader = g_main_cp_state.vertex_loaders[parameters.vtx_attr_group]; if (!loader->EnvironmentIsSupported()) { loader = loader->GetFallback(); } readsize = parameters.count * loader->m_VertexSize; if (parameters.buf_size < readsize) return false; if (parameters.skip_draw) { return true; } // Lookup pointers for any vertex arrays. UpdateVertexArrayPointers(); NativeVertexFormat *nativefmt = loader->m_native_vertex_format; // Flush if our vertex format is different from the currently set. if (s_current_vtx_fmt != nullptr && s_current_vtx_fmt != nativefmt) { VertexManagerBase::Flush(); } s_current_vtx_fmt = nativefmt; g_current_components = loader->m_native_components; VertexManagerBase::PrepareForAdditionalData(parameters.primitive, parameters.count, loader->m_native_stride); parameters.destination = VertexManagerBase::s_pCurBufferPointer; s32 finalcount = loader->RunVertices(parameters); writesize = loader->m_native_stride * finalcount; IndexGenerator::AddIndices(parameters.primitive, finalcount); ADDSTAT(stats.thisFrame.numPrims, finalcount); INCSTAT(stats.thisFrame.numPrimitiveJoins); return true; }
void StateTracker::UpdatePixelShaderConstants() { if (!PixelShaderManager::IsDirty() || !ReserveConstantStorage()) return; // Buffer allocation changed? if (m_uniform_stream_buffer->GetBuffer() != m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_PS].buffer) { m_bindings.uniform_buffer_bindings[UBO_DESCRIPTOR_SET_BINDING_PS].buffer = m_uniform_stream_buffer->GetBuffer(); m_dirty_flags |= DIRTY_FLAG_PS_UBO; } m_bindings.uniform_buffer_offsets[UBO_DESCRIPTOR_SET_BINDING_PS] = static_cast<uint32_t>(m_uniform_stream_buffer->GetCurrentOffset()); m_dirty_flags |= DIRTY_FLAG_DYNAMIC_OFFSETS; int size = PixelShaderManager::ConstantBufferSize * sizeof(float); memcpy(m_uniform_stream_buffer->GetCurrentHostPointer(), PixelShaderManager::GetBuffer(), size); ADDSTAT(stats.thisFrame.bytesUniformStreamed, size); m_uniform_stream_buffer->CommitMemory(size); PixelShaderManager::Clear(); }
void VertexManager::PrepareDrawBuffers(u32 stride) { u8* p_vertices_base; u8* p_vertices; u16* p_indices; u16* indices = GetIndexBuffer(); u32 total_data_size = m_total_num_verts * stride; u32 data_size = m_num_verts * stride; u16 current_index = m_num_verts; if (m_index_len) { DWORD LockMode = D3DLOCK_NOOVERWRITE; m_vertex_buffer_cursor--; m_vertex_buffer_cursor = m_vertex_buffer_cursor - (m_vertex_buffer_cursor % stride) + stride; if (m_vertex_buffer_cursor > m_vertex_buffer_size - total_data_size) { LockMode = D3DLOCK_DISCARD; m_vertex_buffer_cursor = 0; m_current_vertex_buffer = (m_current_vertex_buffer + 1) % m_buffers_count; } if (FAILED(m_vertex_buffers[m_current_vertex_buffer]->Lock(m_vertex_buffer_cursor, total_data_size, (VOID**)(&p_vertices_base), LockMode))) { DestroyDeviceObjects(); return; } LockMode = D3DLOCK_NOOVERWRITE; if (m_index_buffer_cursor > m_index_buffer_size - m_total_index_len) { LockMode = D3DLOCK_DISCARD; m_index_buffer_cursor = 0; m_current_index_buffer = (m_current_index_buffer + 1) % m_buffers_count; } if (FAILED(m_index_buffers[m_current_index_buffer]->Lock(m_index_buffer_cursor * sizeof(u16), m_total_index_len * sizeof(u16), (VOID**)(&p_indices), LockMode))) { DestroyDeviceObjects(); return; } memcpy(p_vertices_base, s_pBaseBufferPointer, data_size); p_vertices = p_vertices_base + data_size; if (current_primitive_type == PRIMITIVE_TRIANGLES) { memcpy(p_indices, indices, m_index_len * sizeof(u16)); } else if (current_primitive_type == PRIMITIVE_LINES) { for (u32 i = 0; i < (m_index_len - 1); i += 2) { // Get Line Indices u16 first_index = indices[i]; u16 second_index = indices[i + 1]; // Get the position in the stream o f the first vertex u32 currentstride = first_index * stride; // Get The first vertex Position data Float_2* base_vertex_0 = (Float_2*)(s_pBaseBufferPointer + currentstride); // Get The blendindices data U8_4* blendindices_vertex_0 = (U8_4*)(p_vertices_base + currentstride + stride - sizeof(U8_4)); // Get The first vertex Position data currentstride = second_index * stride; Float_2* base_vertex_1 = (Float_2*)(s_pBaseBufferPointer + currentstride); U8_4* blendindices_vertex_1 = (U8_4*)(p_vertices_base + currentstride + stride - sizeof(U8_4)); // Calculate line orientation // mostly a hack because we are in object space but is better than nothing float dx = base_vertex_1->x - base_vertex_0->x; float dy = base_vertex_1->y - base_vertex_0->y; bool horizontal = fabs(dx) > fabs(dy); bool positive = horizontal ? dx > 0 : dy > 0; // setup offset index acording to line orientation u8 idx0 = horizontal ? (positive ? PLO_POS_LINE_NEGATIVE_Y : PLO_POS_LINE_POSITIVE_Y) : (positive ? PLO_POS_LINE_POSITIVE_X : PLO_POS_LINE_NEGATIVE_X); u8 idx1 = horizontal ? (positive ? PLO_POS_LINE_POSITIVE_Y : PLO_POS_LINE_NEGATIVE_Y) : (positive ? PLO_POS_LINE_NEGATIVE_X : PLO_POS_LINE_POSITIVE_X); memcpy(p_vertices, base_vertex_0, stride); p_vertices += stride; U8_4* blendindices_vertex_2 = (U8_4*)(p_vertices - sizeof(U8_4)); memcpy(p_vertices, base_vertex_1, stride); p_vertices += stride; U8_4* blendindices_vertex_3 = (U8_4*)(p_vertices - sizeof(U8_4)); // Setup Blend Indices blendindices_vertex_0->y = PLO_TEX_MASK_LINE_0_3; blendindices_vertex_0->z = idx0; blendindices_vertex_0->w = PLO_ZERO; blendindices_vertex_1->y = PLO_TEX_MASK_LINE_0_3; blendindices_vertex_1->z = idx0; blendindices_vertex_1->w = PLO_ZERO; blendindices_vertex_2->y = PLO_TEX_MASK_LINE_0_3; blendindices_vertex_2->z = idx1; blendindices_vertex_2->w = PLO_TEX_LINE; blendindices_vertex_3->y = PLO_TEX_MASK_LINE_0_3; blendindices_vertex_3->z = idx1; blendindices_vertex_3->w = PLO_TEX_LINE; // Setup new triangle indices *p_indices = first_index; p_indices++; *p_indices = current_index; current_index++; p_indices++; *p_indices = current_index; p_indices++; *p_indices = current_index; current_index++; p_indices++; *p_indices = second_index; p_indices++; *p_indices = first_index; p_indices++; } } else if (current_primitive_type == PRIMITIVE_POINTS) { for (u32 i = 0; i < m_index_len; i++) { // Get point indes u16 pointindex = indices[i]; // Calculate stream Position int currentstride = pointindex * stride; // Get data Pointer for vertex replication u8* base_vertex = s_pBaseBufferPointer + currentstride; U8_4* blendindices_vertex_0 = (U8_4*)(p_vertices_base + currentstride + stride - sizeof(U8_4)); // Generate Extra vertices memcpy(p_vertices, base_vertex, stride); p_vertices += stride; U8_4* blendindices_vertex_1 = (U8_4*)(p_vertices - sizeof(U8_4)); memcpy(p_vertices, base_vertex, stride); p_vertices += stride; U8_4* blendindices_vertex_2 = (U8_4*)(p_vertices - sizeof(U8_4)); memcpy(p_vertices, base_vertex, stride); p_vertices += stride; U8_4* blendindices_vertex_3 = (U8_4*)(p_vertices - sizeof(U8_4)); // Setup Blen Indices blendindices_vertex_0->y = PLO_TEX_MASK_POINT_0_3; blendindices_vertex_0->z = PLO_POS_POINT_LEFT_TOP; blendindices_vertex_0->w = PLO_ZERO; blendindices_vertex_1->y = PLO_TEX_MASK_POINT_0_3; blendindices_vertex_1->z = PLO_POS_POINT_LEFT_BOTTOM; blendindices_vertex_1->w = PLO_TEX_POINT_X; blendindices_vertex_2->y = PLO_TEX_MASK_POINT_0_3; blendindices_vertex_2->z = PLO_POS_POINT_RIGHT_TOP; blendindices_vertex_2->w = PLO_TEX_POINT_Y; blendindices_vertex_3->y = PLO_TEX_MASK_POINT_0_3; blendindices_vertex_3->z = PLO_POS_POINT_RIGHT_BOTTOM; blendindices_vertex_3->w = PLO_TEX_POINT_XY; // Setup new triangle indices *p_indices = pointindex; // Left Top p_indices++; *p_indices = current_index; // Left Bottom current_index++; p_indices++; *p_indices = current_index; // Right Top p_indices++; *p_indices = current_index; // Right Top p_indices++; *p_indices = current_index - 1; // Left Bottom p_indices++; current_index++; *p_indices = current_index; // Right Bottom p_indices++; current_index++; } } m_vertex_buffers[m_current_vertex_buffer]->Unlock(); m_index_buffers[m_current_index_buffer]->Unlock(); } if (m_last_stride != stride || m_vertex_buffer_cursor == 0) { m_last_stride = stride; D3D::SetStreamSource(0, m_vertex_buffers[m_current_vertex_buffer], 0, m_last_stride); } if (m_index_buffer_cursor == 0) { D3D::SetIndices(m_index_buffers[m_current_index_buffer]); } ADDSTAT(stats.thisFrame.bytesVertexStreamed, total_data_size); ADDSTAT(stats.thisFrame.bytesIndexStreamed, m_total_index_len); }