void LoadCPReg(u32 sub_cmd, u32 value, bool is_preprocess) { bool update_global_state = !is_preprocess; CPState* state = is_preprocess ? &g_preprocess_cp_state : &g_main_cp_state; switch (sub_cmd & 0xF0) { case 0x30: if (update_global_state) VertexShaderManager::SetTexMatrixChangedA(value); break; case 0x40: if (update_global_state) VertexShaderManager::SetTexMatrixChangedB(value); break; case 0x50: state->vtx_desc.Hex &= ~0x1FFFF; // keep the Upper bits state->vtx_desc.Hex |= value; state->attr_dirty = BitSet32::AllTrue(8); state->bases_dirty = true; break; case 0x60: state->vtx_desc.Hex &= 0x1FFFF; // keep the lower 17Bits state->vtx_desc.Hex |= (u64)value << 17; state->attr_dirty = BitSet32::AllTrue(8); state->bases_dirty = true; break; case 0x70: _assert_((sub_cmd & 0x0F) < 8); state->vtx_attr[sub_cmd & 7].g0.Hex = value; state->attr_dirty[sub_cmd & 7] = true; break; case 0x80: _assert_((sub_cmd & 0x0F) < 8); state->vtx_attr[sub_cmd & 7].g1.Hex = value; state->attr_dirty[sub_cmd & 7] = true; break; case 0x90: _assert_((sub_cmd & 0x0F) < 8); state->vtx_attr[sub_cmd & 7].g2.Hex = value; state->attr_dirty[sub_cmd & 7] = true; break; // Pointers to vertex arrays in GC RAM case 0xA0: state->array_bases[sub_cmd & 0xF] = value; state->bases_dirty = true; break; case 0xB0: state->array_strides[sub_cmd & 0xF] = value & 0xFF; break; } }
void LoadCPReg(u32 sub_cmd, u32 value) { switch (sub_cmd & 0xF0) { case 0x30: VertexShaderManager::SetTexMatrixChangedA(value); break; case 0x40: VertexShaderManager::SetTexMatrixChangedB(value); break; case 0x50: g_main_cp_state.vtx_desc.Hex &= ~0x1FFFF; // keep the Upper bits g_main_cp_state.vtx_desc.Hex |= value; g_main_cp_state.attr_dirty = 0xFF; g_main_cp_state.bases_dirty = true; break; case 0x60: g_main_cp_state.vtx_desc.Hex &= 0x1FFFF; // keep the lower 17Bits g_main_cp_state.vtx_desc.Hex |= (u64)value << 17; g_main_cp_state.attr_dirty = 0xFF; g_main_cp_state.bases_dirty = true; break; case 0x70: _assert_((sub_cmd & 0x0F) < 8); g_main_cp_state.vtx_attr[sub_cmd & 7].g0.Hex = value; g_main_cp_state.attr_dirty |= 1 << (sub_cmd & 7); break; case 0x80: _assert_((sub_cmd & 0x0F) < 8); g_main_cp_state.vtx_attr[sub_cmd & 7].g1.Hex = value; g_main_cp_state.attr_dirty |= 1 << (sub_cmd & 7); break; case 0x90: _assert_((sub_cmd & 0x0F) < 8); g_main_cp_state.vtx_attr[sub_cmd & 7].g2.Hex = value; g_main_cp_state.attr_dirty |= 1 << (sub_cmd & 7); break; // Pointers to vertex arrays in GC RAM case 0xA0: g_main_cp_state.array_bases[sub_cmd & 0xF] = value; g_main_cp_state.bases_dirty = true; break; case 0xB0: g_main_cp_state.array_strides[sub_cmd & 0xF] = value & 0xFF; break; } }
void StagingTexture2D::WriteTexel(u32 x, u32 y, const void* data, size_t data_size) { _assert_(data_size >= m_texel_size); VkDeviceSize offset = y * m_row_stride + x * m_texel_size; VkDeviceSize map_offset = offset - m_map_offset; _assert_(offset >= m_map_offset && (map_offset + m_texel_size) <= (m_map_offset + m_map_size)); char* ptr = m_map_pointer + map_offset; memcpy(ptr, data, data_size); }
void TextureConverter::ConvertTexture(TextureCacheBase::TCacheEntry* dst_entry, TextureCacheBase::TCacheEntry* src_entry, VkRenderPass render_pass, const void* palette, TlutFormat palette_format) { struct PSUniformBlock { float multiplier; int texel_buffer_offset; int pad[2]; }; VKTexture* source_texture = static_cast<VKTexture*>(src_entry->texture.get()); VKTexture* destination_texture = static_cast<VKTexture*>(dst_entry->texture.get()); _assert_(static_cast<size_t>(palette_format) < NUM_PALETTE_CONVERSION_SHADERS); _assert_(destination_texture->GetConfig().rendertarget); // We want to align to 2 bytes (R16) or the device's texel buffer alignment, whichever is greater. size_t palette_size = (src_entry->format & 0xF) == GX_TF_I4 ? 32 : 512; if (!ReserveTexelBufferStorage(palette_size, sizeof(u16))) return; // Copy in palette to texel buffer. u32 palette_offset = static_cast<u32>(m_texel_buffer->GetCurrentOffset()); memcpy(m_texel_buffer->GetCurrentHostPointer(), palette, palette_size); m_texel_buffer->CommitMemory(palette_size); VkCommandBuffer command_buffer = GetCommandBufferForTextureConversion(src_entry); source_texture->GetRawTexIdentifier()->TransitionToLayout( command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); destination_texture->GetRawTexIdentifier()->TransitionToLayout( command_buffer, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); // Bind and draw to the destination. UtilityShaderDraw draw(command_buffer, g_object_cache->GetPipelineLayout(PIPELINE_LAYOUT_TEXTURE_CONVERSION), render_pass, g_object_cache->GetScreenQuadVertexShader(), VK_NULL_HANDLE, m_palette_conversion_shaders[palette_format]); VkRect2D region = {{0, 0}, {dst_entry->GetWidth(), dst_entry->GetHeight()}}; draw.BeginRenderPass(destination_texture->GetFramebuffer(), region); PSUniformBlock uniforms = {}; uniforms.multiplier = (src_entry->format & 0xF) == GX_TF_I4 ? 15.0f : 255.0f; uniforms.texel_buffer_offset = static_cast<int>(palette_offset / sizeof(u16)); draw.SetPushConstants(&uniforms, sizeof(uniforms)); draw.SetPSSampler(0, source_texture->GetRawTexIdentifier()->GetView(), g_object_cache->GetPointSampler()); draw.SetPSTexelBuffer(m_texel_buffer_view_r16_uint); draw.SetViewportAndScissor(0, 0, dst_entry->GetWidth(), dst_entry->GetHeight()); draw.DrawWithoutVertexBuffer(VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, 4); draw.EndRenderPass(); }
bool SwapChain::SetupSwapChainImages() { _assert_(m_swap_chain_images.empty()); uint32_t image_count; VkResult res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, nullptr); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkGetSwapchainImagesKHR failed: "); return false; } std::vector<VkImage> images(image_count); res = vkGetSwapchainImagesKHR(g_vulkan_context->GetDevice(), m_swap_chain, &image_count, images.data()); _assert_(res == VK_SUCCESS); m_swap_chain_images.reserve(image_count); for (uint32_t i = 0; i < image_count; i++) { SwapChainImage image; image.image = images[i]; // Create texture object, which creates a view of the backbuffer image.texture = Texture2D::CreateFromExistingImage( m_width, m_height, 1, 1, m_surface_format.format, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_VIEW_TYPE_2D, image.image); VkImageView view = image.texture->GetView(); VkFramebufferCreateInfo framebuffer_info = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_render_pass, 1, &view, m_width, m_height, 1}; res = vkCreateFramebuffer(g_vulkan_context->GetDevice(), &framebuffer_info, nullptr, &image.framebuffer); if (res != VK_SUCCESS) { LOG_VULKAN_ERROR(res, "vkCreateFramebuffer failed: "); return false; } m_swap_chain_images.emplace_back(std::move(image)); } return true; }
bool VulkanRenderManager::CopyFramebufferToMemorySync(VKRFramebuffer *src, int aspectBits, int x, int y, int w, int h, Draw::DataFormat destFormat, uint8_t *pixels, int pixelStride) { VKRStep *step = new VKRStep{ VKRStepType::READBACK }; step->readback.aspectMask = aspectBits; step->readback.src = src; step->readback.srcRect.offset = { x, y }; step->readback.srcRect.extent = { (uint32_t)w, (uint32_t)h }; steps_.push_back(step); curRenderStep_ = nullptr; FlushSync(); Draw::DataFormat srcFormat; if (aspectBits & VK_IMAGE_ASPECT_COLOR_BIT) { if (src) { switch (src->color.format) { case VK_FORMAT_R8G8B8A8_UNORM: srcFormat = Draw::DataFormat::R8G8B8A8_UNORM; break; default: _assert_(false); } } else { // Backbuffer. if (!(vulkan_->GetSurfaceCapabilities().supportedUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT)) { ELOG("Copying from backbuffer not supported, can't take screenshots"); return false; } switch (vulkan_->GetSwapchainFormat()) { case VK_FORMAT_B8G8R8A8_UNORM: srcFormat = Draw::DataFormat::B8G8R8A8_UNORM; break; case VK_FORMAT_R8G8B8A8_UNORM: srcFormat = Draw::DataFormat::R8G8B8A8_UNORM; break; // NOTE: If you add supported formats here, make sure to also support them in VulkanQueueRunner::CopyReadbackBuffer. default: ELOG("Unsupported backbuffer format for screenshots"); return false; } } } else if (aspectBits & VK_IMAGE_ASPECT_STENCIL_BIT) { // Copies from stencil are always S8. srcFormat = Draw::DataFormat::S8; } else if (aspectBits & VK_IMAGE_ASPECT_DEPTH_BIT) { switch (src->depth.format) { case VK_FORMAT_D24_UNORM_S8_UINT: srcFormat = Draw::DataFormat::D24_S8; break; case VK_FORMAT_D32_SFLOAT_S8_UINT: srcFormat = Draw::DataFormat::D32F; break; case VK_FORMAT_D16_UNORM_S8_UINT: srcFormat = Draw::DataFormat::D16; break; default: _assert_(false); } } else { _assert_(false); } // Need to call this after FlushSync so the pixels are guaranteed to be ready in CPU-accessible VRAM. queueRunner_.CopyReadbackBuffer(w, h, srcFormat, destFormat, pixelStride, pixels); return true; }
void VulkanRenderManager::StopThread() { if (useThread_ && run_) { run_ = false; // Stop the thread. for (int i = 0; i < vulkan_->GetInflightFrames(); i++) { auto &frameData = frameData_[i]; { std::unique_lock<std::mutex> lock(frameData.push_mutex); frameData.push_condVar.notify_all(); } { std::unique_lock<std::mutex> lock(frameData.pull_mutex); frameData.pull_condVar.notify_all(); } } thread_.join(); ILOG("Vulkan submission thread joined. Frame=%d", vulkan_->GetCurFrame()); // Eat whatever has been queued up for this frame if anything. Wipe(); // Wait for any fences to finish and be resignaled, so we don't have sync issues. // Also clean out any queued data, which might refer to things that might not be valid // when we restart... for (int i = 0; i < vulkan_->GetInflightFrames(); i++) { auto &frameData = frameData_[i]; _assert_(!frameData.readyForRun); _assert_(frameData.steps.empty()); if (frameData.hasInitCommands) { // Clear 'em out. This can happen on restart sometimes. vkEndCommandBuffer(frameData.initCmd); frameData.hasInitCommands = false; } frameData.readyForRun = false; for (size_t i = 0; i < frameData.steps.size(); i++) { delete frameData.steps[i]; } frameData.steps.clear(); std::unique_lock<std::mutex> lock(frameData.push_mutex); while (!frameData.readyForFence) { VLOG("PUSH: Waiting for frame[%d].readyForFence = 1 (stop)", i); frameData.push_condVar.wait(lock); } } } else { ILOG("Vulkan submission thread was already stopped."); } }
void StreamBuffer::CommitMemory(size_t final_num_bytes) { _assert_((m_current_offset + final_num_bytes) <= m_current_size); _assert_(final_num_bytes <= m_last_allocation_size); // For non-coherent mappings, flush the memory range if (!m_coherent_mapping) { VkMappedMemoryRange range = {VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, nullptr, m_memory, m_current_offset, final_num_bytes}; vkFlushMappedMemoryRanges(g_vulkan_context->GetDevice(), 1, &range); } m_current_offset += final_num_bytes; }
void SWLoadCPReg(u32 sub_cmd, u32 value) { switch (sub_cmd & 0xF0) { case 0x30: g_main_cp_state.matrix_index_a.Hex = value; break; case 0x40: g_main_cp_state.matrix_index_b.Hex = value; break; case 0x50: g_main_cp_state.vtx_desc.Hex &= ~0x1FFFF; // keep the Upper bits g_main_cp_state.vtx_desc.Hex |= value; break; case 0x60: g_main_cp_state.vtx_desc.Hex &= 0x1FFFF; // keep the lower 17Bits g_main_cp_state.vtx_desc.Hex |= (u64)value << 17; break; case 0x70: _assert_((sub_cmd & 0x0F) < 8); g_main_cp_state.vtx_attr[sub_cmd & 7].g0.Hex = value; break; case 0x80: _assert_((sub_cmd & 0x0F) < 8); g_main_cp_state.vtx_attr[sub_cmd & 7].g1.Hex = value; break; case 0x90: _assert_((sub_cmd & 0x0F) < 8); g_main_cp_state.vtx_attr[sub_cmd & 7].g2.Hex = value; break; // Pointers to vertex arrays in GC RAM case 0xA0: g_main_cp_state.array_bases[sub_cmd & 0xF] = value; cached_arraybases[sub_cmd & 0xF] = Memory::GetPointer(value); break; case 0xB0: g_main_cp_state.array_strides[sub_cmd & 0xF] = value & 0xFF; break; } }
static VkFormat VarToVkFormat(VarType t, uint32_t components, bool integer) { static const VkFormat float_type_lookup[][4] = { {VK_FORMAT_R8_UNORM, VK_FORMAT_R8G8_UNORM, VK_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8A8_UNORM}, // VAR_UNSIGNED_BYTE {VK_FORMAT_R8_SNORM, VK_FORMAT_R8G8_SNORM, VK_FORMAT_R8G8B8_SNORM, VK_FORMAT_R8G8B8A8_SNORM}, // VAR_BYTE {VK_FORMAT_R16_UNORM, VK_FORMAT_R16G16_UNORM, VK_FORMAT_R16G16B16_UNORM, VK_FORMAT_R16G16B16A16_UNORM}, // VAR_UNSIGNED_SHORT {VK_FORMAT_R16_SNORM, VK_FORMAT_R16G16_SNORM, VK_FORMAT_R16G16B16_SNORM, VK_FORMAT_R16G16B16A16_SNORM}, // VAR_SHORT {VK_FORMAT_R32_SFLOAT, VK_FORMAT_R32G32_SFLOAT, VK_FORMAT_R32G32B32_SFLOAT, VK_FORMAT_R32G32B32A32_SFLOAT} // VAR_FLOAT }; static const VkFormat integer_type_lookup[][4] = { {VK_FORMAT_R8_UINT, VK_FORMAT_R8G8_UINT, VK_FORMAT_R8G8B8_UINT, VK_FORMAT_R8G8B8A8_UINT}, // VAR_UNSIGNED_BYTE {VK_FORMAT_R8_SINT, VK_FORMAT_R8G8_SINT, VK_FORMAT_R8G8B8_SINT, VK_FORMAT_R8G8B8A8_SINT}, // VAR_BYTE {VK_FORMAT_R16_UINT, VK_FORMAT_R16G16_UINT, VK_FORMAT_R16G16B16_UINT, VK_FORMAT_R16G16B16A16_UINT}, // VAR_UNSIGNED_SHORT {VK_FORMAT_R16_SINT, VK_FORMAT_R16G16_SINT, VK_FORMAT_R16G16B16_SINT, VK_FORMAT_R16G16B16A16_SINT}, // VAR_SHORT {VK_FORMAT_R32_SFLOAT, VK_FORMAT_R32G32_SFLOAT, VK_FORMAT_R32G32B32_SFLOAT, VK_FORMAT_R32G32B32A32_SFLOAT} // VAR_FLOAT }; _assert_(components > 0 && components <= 4); return integer ? integer_type_lookup[t][components - 1] : float_type_lookup[t][components - 1]; }
VolumeWAD::VolumeWAD(std::unique_ptr<BlobReader> reader) : m_reader(std::move(reader)) { _assert_(m_reader); // Source: http://wiibrew.org/wiki/WAD_files m_hdr_size = m_reader->ReadSwapped<u32>(0x00).value_or(0); m_cert_size = m_reader->ReadSwapped<u32>(0x08).value_or(0); m_tick_size = m_reader->ReadSwapped<u32>(0x10).value_or(0); m_tmd_size = m_reader->ReadSwapped<u32>(0x14).value_or(0); m_data_size = m_reader->ReadSwapped<u32>(0x18).value_or(0); m_offset = Common::AlignUp(m_hdr_size, 0x40) + Common::AlignUp(m_cert_size, 0x40); m_tmd_offset = Common::AlignUp(m_hdr_size, 0x40) + Common::AlignUp(m_cert_size, 0x40) + Common::AlignUp(m_tick_size, 0x40); m_opening_bnr_offset = m_tmd_offset + Common::AlignUp(m_tmd_size, 0x40) + Common::AlignUp(m_data_size, 0x40); if (!IOS::ES::IsValidTMDSize(m_tmd_size)) { ERROR_LOG(DISCIO, "TMD is too large: %u bytes", m_tmd_size); return; } std::vector<u8> tmd_buffer(m_tmd_size); Read(m_tmd_offset, m_tmd_size, tmd_buffer.data()); m_tmd.SetBytes(std::move(tmd_buffer)); }
float Renderer::CalculateDrawAspectRatio(int target_width, int target_height) { // The dimensions are the sizes that are used to create the EFB/backbuffer textures, so // they should always be greater than zero. _assert_(target_width > 0 && target_height > 0); if (g_ActiveConfig.iAspectRatio == ASPECT_STRETCH) { // If stretch is enabled, we prefer the aspect ratio of the window. return (static_cast<float>(target_width) / static_cast<float>(target_height)) / (static_cast<float>(s_backbuffer_width) / static_cast<float>(s_backbuffer_height)); } float Ratio = static_cast<float>(target_width) / static_cast<float>(target_height); if (g_ActiveConfig.iAspectRatio == ASPECT_ANALOG_WIDE || (g_ActiveConfig.iAspectRatio != ASPECT_ANALOG && g_ActiveConfig.iAspectRatio < ASPECT_ANALOG_WIDE && Core::g_aspect_wide)) { Ratio /= AspectToWidescreen(VideoInterface::GetAspectRatio()); } else if (g_ActiveConfig.iAspectRatio == ASPECT_4_3) { Ratio /= (4.0f / 3.0f); } else if (g_ActiveConfig.iAspectRatio == ASPECT_16_9) { Ratio /= (16.0f / 9.0f); } else if (g_ActiveConfig.iAspectRatio == ASPECT_16_10) { Ratio /= (16.0f / 10.0f); } else { Ratio /= VideoInterface::GetAspectRatio(); } return Ratio; }
float Renderer::CalculateDrawAspectRatio(int target_width, int target_height) { // The dimensions are the sizes that are used to create the EFB/backbuffer textures, so // they should always be greater than zero. _assert_(target_width > 0 && target_height > 0); if (g_ActiveConfig.iAspectRatio == ASPECT_STRETCH) { // If stretch is enabled, we prefer the aspect ratio of the window. return (static_cast<float>(target_width) / static_cast<float>(target_height)) / (static_cast<float>(s_backbuffer_width) / static_cast<float>(s_backbuffer_height)); } // The rendering window aspect ratio as a proportion of the 4:3 or 16:9 ratio if (g_ActiveConfig.iAspectRatio == ASPECT_ANALOG_WIDE || (g_ActiveConfig.iAspectRatio != ASPECT_ANALOG && Core::g_aspect_wide)) { return (static_cast<float>(target_width) / static_cast<float>(target_height)) / AspectToWidescreen(VideoInterface::GetAspectRatio()); } else { return (static_cast<float>(target_width) / static_cast<float>(target_height)) / VideoInterface::GetAspectRatio(); } }
static void StartReadInternal(bool copy_to_ram, u32 output_address, u64 dvd_offset, u32 length, const DiscIO::Partition& partition, DVDInterface::ReplyType reply_type, s64 ticks_until_completion) { _assert_(Core::IsCPUThread()); ReadRequest request; request.copy_to_ram = copy_to_ram; request.output_address = output_address; request.dvd_offset = dvd_offset; request.length = length; request.partition = partition; request.reply_type = reply_type; u64 id = s_next_id++; request.id = id; request.time_started_ticks = CoreTiming::GetTicks(); request.realtime_started_us = Common::Timer::GetTimeUs(); s_request_queue.Push(std::move(request)); s_request_queue_expanded.Set(); CoreTiming::ScheduleEvent(ticks_until_completion, s_finish_read, id); }
void StateTracker::SetFramebuffer(VkFramebuffer framebuffer, const VkRect2D& render_area) { // Should not be changed within a render pass. _assert_(!InRenderPass()); m_framebuffer = framebuffer; m_framebuffer_size = render_area; }
void Jit::ApplyPrefixD(const u8 *vregs, VectorSize sz) { _assert_(js.prefixDFlag & ArmJitState::PREFIX_KNOWN); if (!js.prefixD) return; int n = GetNumVectorElements(sz); for (int i = 0; i < n; i++) { if (js.VfpuWriteMask(i)) continue; int sat = (js.prefixD >> (i * 2)) & 3; if (sat == 1) { // clamped = fabs(x) - fabs(x-0.5f) + 0.5f; // [ 0, 1] fpr.MapRegV(vregs[i], MAP_DIRTY); MOVI2F(S0, 0.5, R0); VABS(S1, fpr.V(vregs[i])); // S1 = fabs(x) VSUB(S2, fpr.V(vregs[i]), S0); // S2 = fabs(x-0.5f) {VABD} VABS(S2, S2); VSUB(fpr.V(vregs[i]), S1, S2); // v[i] = S1 - S2 + 0.5f VADD(fpr.V(vregs[i]), fpr.V(vregs[i]), S0); } else if (sat == 3) { // clamped = fabs(x) - fabs(x-1.0f); // [-1, 1] fpr.MapRegV(vregs[i], MAP_DIRTY); MOVI2F(S0, 1.0, R0); VABS(S1, fpr.V(vregs[i])); // S1 = fabs(x) VSUB(S2, fpr.V(vregs[i]), S0); // S2 = fabs(x-1.0f) {VABD} VABS(S2, S2); VSUB(fpr.V(vregs[i]), S1, S2); // v[i] = S1 - S2 } } }
void VKTexture::Bind(unsigned int stage) { // Texture should always be in SHADER_READ_ONLY layout prior to use. // This is so we don't need to transition during render passes. _assert_(m_texture->GetLayout() == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); StateTracker::GetInstance()->SetTexture(stage, m_texture->GetView()); }
bool SwapChain::SelectSurfaceFormat() { u32 format_count; VkResult res = vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &format_count, nullptr); if (res != VK_SUCCESS || format_count == 0) { LOG_VULKAN_ERROR(res, "vkGetPhysicalDeviceSurfaceFormatsKHR failed: "); return false; } std::vector<VkSurfaceFormatKHR> surface_formats(format_count); res = vkGetPhysicalDeviceSurfaceFormatsKHR(g_vulkan_context->GetPhysicalDevice(), m_surface, &format_count, surface_formats.data()); _assert_(res == VK_SUCCESS); // If there is a single undefined surface format, the device doesn't care, so we'll just use RGBA if (surface_formats[0].format == VK_FORMAT_UNDEFINED) { m_surface_format.format = VK_FORMAT_R8G8B8A8_UNORM; m_surface_format.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR; return true; } // Use the first surface format, just use what it prefers. // Some drivers seem to return a SRGB format here (Intel Mesa). // This results in gamma correction when presenting to the screen, which we don't want. // Use a linear format instead, if this is the case. m_surface_format.format = Util::GetLinearFormat(surface_formats[0].format); m_surface_format.colorSpace = surface_formats[0].colorSpace; return true; }
void CommandBufferManager::WaitForFence(VkFence fence) { // Find the command buffer that this fence corresponds to. size_t command_buffer_index = 0; for (; command_buffer_index < m_frame_resources.size(); command_buffer_index++) { if (m_frame_resources[command_buffer_index].fence == fence) break; } _assert_(command_buffer_index < m_frame_resources.size()); // Has this command buffer already been waited for? if (!m_frame_resources[command_buffer_index].needs_fence_wait) return; // Wait for this command buffer to be completed. VkResult res = vkWaitForFences(g_vulkan_context->GetDevice(), 1, &m_frame_resources[command_buffer_index].fence, VK_TRUE, UINT64_MAX); if (res != VK_SUCCESS) LOG_VULKAN_ERROR(res, "vkWaitForFences failed: "); // Immediately fire callbacks and cleanups, since the commands has been completed. m_frame_resources[command_buffer_index].needs_fence_wait = false; OnCommandBufferExecuted(command_buffer_index); }
static void GenericStreamBuffer(DebuggerRequest &req, std::function<bool(const GPUDebugBuffer *&)> func) { if (!currentDebugMIPS->isAlive()) { return req.Fail("CPU not started"); } if (coreState != CORE_STEPPING && !GPUStepping::IsStepping()) { return req.Fail("Neither CPU or GPU is stepping"); } bool includeAlpha = false; if (!req.ParamBool("alpha", &includeAlpha, DebuggerParamType::OPTIONAL)) return; u32 stackWidth = 0; if (!req.ParamU32("stackWidth", &stackWidth, false, DebuggerParamType::OPTIONAL)) return; std::string type = "uri"; if (!req.ParamString("type", &type, DebuggerParamType::OPTIONAL)) return; if (type != "uri" && type != "base64") return req.Fail("Parameter 'type' must be either 'uri' or 'base64'"); const GPUDebugBuffer *buf = nullptr; if (!func(buf)) { return req.Fail("Could not download output"); } assert(buf != nullptr); if (type == "base64") { StreamBufferToBase64(req, *buf); } else if (type == "uri") { StreamBufferToDataURI(req, *buf, includeAlpha, stackWidth); } else { _assert_(false); } }
void DXTexture::ScaleRectangleFromTexture(const AbstractTexture* source, const MathUtil::Rectangle<int>& srcrect, const MathUtil::Rectangle<int>& dstrect) { const DXTexture* srcentry = static_cast<const DXTexture*>(source); _assert_(m_config.rendertarget); g_renderer->ResetAPIState(); // reset any game specific settings const D3D11_VIEWPORT vp = CD3D11_VIEWPORT(float(dstrect.left), float(dstrect.top), float(dstrect.GetWidth()), float(dstrect.GetHeight())); D3D::stateman->UnsetTexture(m_texture->GetSRV()); D3D::stateman->Apply(); D3D::context->OMSetRenderTargets(1, &m_texture->GetRTV(), nullptr); D3D::context->RSSetViewports(1, &vp); D3D::SetLinearCopySampler(); D3D11_RECT srcRC; srcRC.left = srcrect.left; srcRC.right = srcrect.right; srcRC.top = srcrect.top; srcRC.bottom = srcrect.bottom; D3D::drawShadedTexQuad(srcentry->m_texture->GetSRV(), &srcRC, srcentry->m_config.width, srcentry->m_config.height, PixelShaderCache::GetColorCopyProgram(false), VertexShaderCache::GetSimpleVertexShader(), VertexShaderCache::GetSimpleInputLayout(), GeometryShaderCache::GetCopyGeometryShader(), 1.0, 0); FramebufferManager::BindEFBRenderTarget(); g_renderer->RestoreAPIState(); }
void ComputeShaderDispatcher::SetPushConstants(const void* data, size_t data_size) { _assert_(static_cast<u32>(data_size) < PUSH_CONSTANT_BUFFER_SIZE); vkCmdPushConstants(m_command_buffer, m_pipeline_info.pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, static_cast<u32>(data_size), data); }
void CommandBufferManager::AddFencePointCallback( const void* key, const CommandBufferQueuedCallback& queued_callback, const CommandBufferExecutedCallback& executed_callback) { // Shouldn't be adding twice. _assert_(m_fence_point_callbacks.find(key) == m_fence_point_callbacks.end()); m_fence_point_callbacks.emplace(key, std::make_pair(queued_callback, executed_callback)); }
void UtilityShaderDraw::SetPSTexelBuffer(VkBufferView view) { // Should only be used with the texture conversion pipeline layout. _assert_(m_pipeline_info.pipeline_layout == g_object_cache->GetPipelineLayout(PIPELINE_LAYOUT_TEXTURE_CONVERSION)); m_ps_texel_buffer = view; }
void UtilityShaderDraw::SetPushConstants(const void* data, size_t data_size) { _assert_(static_cast<u32>(data_size) < PUSH_CONSTANT_BUFFER_SIZE); vkCmdPushConstants(m_command_buffer, m_pipeline_info.pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, static_cast<u32>(data_size), data); }
// This is used by several of the FileIO and /dev/fs functions std::string HLE_IPC_BuildFilename(const std::string& wii_path) { std::string nand_path = File::GetUserPath(D_SESSION_WIIROOT_IDX); if (wii_path.compare(0, 1, "/") == 0) return nand_path + Common::EscapePath(wii_path); _assert_(false); return nand_path; }
void StagingTexture2DBuffer::Unmap() { _assert_(m_map_pointer); vkUnmapMemory(g_vulkan_context->GetDevice(), m_memory); m_map_pointer = nullptr; m_map_offset = 0; m_map_size = 0; }
bool TextureCache::Palettize(TCacheEntryBase* _entry, const TCacheEntryBase* base_entry) { TCacheEntry* entry = static_cast<TCacheEntry*>(_entry); const TCacheEntry* unconverted = static_cast<const TCacheEntry*>(base_entry); _assert_(entry->config.rendertarget); m_texture_converter->ConvertTexture(entry, unconverted, m_render_pass, m_pallette, m_pallette_format, m_pallette_size); return true; }
void WaitUntilIdle() { _assert_(Core::IsCPUThread()); while (!s_request_queue.Empty()) s_result_queue_expanded.Wait(); StopDVDThread(); StartDVDThread(); }
bool StateTracker::CreateInstance() { _assert_(!s_state_tracker); s_state_tracker = std::make_unique<StateTracker>(); if (!s_state_tracker->Initialize()) { s_state_tracker.reset(); return false; } return true; }