void SetBufferSwap(u32 screen_id, const FrameBufferInfo& info) { u32 base_address = 0x400000; PAddr phys_address_left = Memory::VirtualToPhysicalAddress(info.address_left); PAddr phys_address_right = Memory::VirtualToPhysicalAddress(info.address_right); if (info.active_fb == 0) { WriteHWRegs(base_address + 4 * static_cast<u32>(GPU_REG_INDEX(framebuffer_config[screen_id].address_left1)), 4, &phys_address_left); WriteHWRegs(base_address + 4 * static_cast<u32>(GPU_REG_INDEX(framebuffer_config[screen_id].address_right1)), 4, &phys_address_right); } else { WriteHWRegs(base_address + 4 * static_cast<u32>(GPU_REG_INDEX(framebuffer_config[screen_id].address_left2)), 4, &phys_address_left); WriteHWRegs(base_address + 4 * static_cast<u32>(GPU_REG_INDEX(framebuffer_config[screen_id].address_right2)), 4, &phys_address_right); } WriteHWRegs(base_address + 4 * static_cast<u32>(GPU_REG_INDEX(framebuffer_config[screen_id].stride)), 4, &info.stride); WriteHWRegs(base_address + 4 * static_cast<u32>(GPU_REG_INDEX(framebuffer_config[screen_id].color_format)), 4, &info.format); WriteHWRegs(base_address + 4 * static_cast<u32>(GPU_REG_INDEX(framebuffer_config[screen_id].active_fb)), 4, &info.shown_fb); if (Pica::g_debug_context) Pica::g_debug_context->OnEvent(Pica::DebugContext::Event::BufferSwapped, nullptr); if (screen_id == 0) { MicroProfileFlip(); } }
static void SetBufferSwap(u32 screen_id, const FrameBufferInfo& info) { u32 base_address = 0x400000; if (info.active_fb == 0) { WriteHWRegs(base_address + 4 * GPU_REG_INDEX(framebuffer_config[screen_id].address_left1), 4, &info.address_left); WriteHWRegs(base_address + 4 * GPU_REG_INDEX(framebuffer_config[screen_id].address_right1), 4, &info.address_right); } else { WriteHWRegs(base_address + 4 * GPU_REG_INDEX(framebuffer_config[screen_id].address_left2), 4, &info.address_left); WriteHWRegs(base_address + 4 * GPU_REG_INDEX(framebuffer_config[screen_id].address_right2), 4, &info.address_right); } WriteHWRegs(base_address + 4 * GPU_REG_INDEX(framebuffer_config[screen_id].stride), 4, &info.stride); WriteHWRegs(base_address + 4 * GPU_REG_INDEX(framebuffer_config[screen_id].color_format), 4, &info.format); WriteHWRegs(base_address + 4 * GPU_REG_INDEX(framebuffer_config[screen_id].active_fb), 4, &info.shown_fb); }
ResultCode SetBufferSwap(u32 screen_id, const FrameBufferInfo& info) { u32 base_address = 0x400000; PAddr phys_address_left = VirtualToPhysicalAddress(info.address_left); PAddr phys_address_right = VirtualToPhysicalAddress(info.address_right); if (info.active_fb == 0) { WriteSingleHWReg(base_address + 4 * static_cast<u32>(GPU_REG_INDEX( framebuffer_config[screen_id].address_left1)), phys_address_left); WriteSingleHWReg(base_address + 4 * static_cast<u32>(GPU_REG_INDEX( framebuffer_config[screen_id].address_right1)), phys_address_right); } else { WriteSingleHWReg(base_address + 4 * static_cast<u32>(GPU_REG_INDEX( framebuffer_config[screen_id].address_left2)), phys_address_left); WriteSingleHWReg(base_address + 4 * static_cast<u32>(GPU_REG_INDEX( framebuffer_config[screen_id].address_right2)), phys_address_right); } WriteSingleHWReg(base_address + 4 * static_cast<u32>(GPU_REG_INDEX(framebuffer_config[screen_id].stride)), info.stride); WriteSingleHWReg(base_address + 4 * static_cast<u32>(GPU_REG_INDEX( framebuffer_config[screen_id].color_format)), info.format); WriteSingleHWReg( base_address + 4 * static_cast<u32>(GPU_REG_INDEX(framebuffer_config[screen_id].active_fb)), info.shown_fb); if (Pica::g_debug_context) Pica::g_debug_context->OnEvent(Pica::DebugContext::Event::BufferSwapped, nullptr); if (screen_id == 0) { MicroProfileFlip(); Core::System::GetInstance().perf_stats.EndGameFrame(); } return RESULT_SUCCESS; }
inline void Write(u32 addr, const T data) { addr -= 0x1EF00000; int index = addr / 4; // Writes other than u32 are untested, so I'd rather have them abort than silently fail if (index >= Regs::NumIds() || !std::is_same<T,u32>::value) { ERROR_LOG(GPU, "unknown Write%d 0x%08X @ 0x%08X", sizeof(data) * 8, data, addr); return; } g_regs[index] = data; switch (index) { // Memory fills are triggered once the fill value is written. // NOTE: This is not verified. case GPU_REG_INDEX_WORKAROUND(memory_fill_config[0].value, 0x00004 + 0x3): case GPU_REG_INDEX_WORKAROUND(memory_fill_config[1].value, 0x00008 + 0x3): { const bool is_second_filler = (index != GPU_REG_INDEX(memory_fill_config[0].value)); const auto& config = g_regs.memory_fill_config[is_second_filler]; // TODO: Not sure if this check should be done at GSP level instead if (config.address_start) { // TODO: Not sure if this algorithm is correct, particularly because it doesn't use the size member at all u32* start = (u32*)Memory::GetPointer(Memory::PhysicalToVirtualAddress(config.GetStartAddress())); u32* end = (u32*)Memory::GetPointer(Memory::PhysicalToVirtualAddress(config.GetEndAddress())); for (u32* ptr = start; ptr < end; ++ptr) *ptr = bswap32(config.value); // TODO: This is just a workaround to missing framebuffer format emulation DEBUG_LOG(GPU, "MemoryFill from 0x%08x to 0x%08x", config.GetStartAddress(), config.GetEndAddress()); } break; } case GPU_REG_INDEX(display_transfer_config.trigger): { const auto& config = g_regs.display_transfer_config; if (config.trigger & 1) { u8* source_pointer = Memory::GetPointer(Memory::PhysicalToVirtualAddress(config.GetPhysicalInputAddress())); u8* dest_pointer = Memory::GetPointer(Memory::PhysicalToVirtualAddress(config.GetPhysicalOutputAddress())); for (int y = 0; y < config.output_height; ++y) { // TODO: Why does the register seem to hold twice the framebuffer width? for (int x = 0; x < config.output_width; ++x) { struct { int r, g, b, a; } source_color = { 0, 0, 0, 0 }; switch (config.input_format) { case Regs::FramebufferFormat::RGBA8: { // TODO: Most likely got the component order messed up. u8* srcptr = source_pointer + x * 4 + y * config.input_width * 4; source_color.r = srcptr[0]; // blue source_color.g = srcptr[1]; // green source_color.b = srcptr[2]; // red source_color.a = srcptr[3]; // alpha break; } default: ERROR_LOG(GPU, "Unknown source framebuffer format %x", config.input_format.Value()); break; } switch (config.output_format) { /*case Regs::FramebufferFormat::RGBA8: { // TODO: Untested u8* dstptr = (u32*)(dest_pointer + x * 4 + y * config.output_width * 4); dstptr[0] = source_color.r; dstptr[1] = source_color.g; dstptr[2] = source_color.b; dstptr[3] = source_color.a; break; }*/ case Regs::FramebufferFormat::RGB8: { // TODO: Most likely got the component order messed up. u8* dstptr = dest_pointer + x * 3 + y * config.output_width * 3; dstptr[0] = source_color.r; // blue dstptr[1] = source_color.g; // green dstptr[2] = source_color.b; // red break; } default: ERROR_LOG(GPU, "Unknown destination framebuffer format %x", config.output_format.Value()); break; } } } DEBUG_LOG(GPU, "DisplayTriggerTransfer: 0x%08x bytes from 0x%08x(%dx%d)-> 0x%08x(%dx%d), dst format %x", config.output_height * config.output_width * 4, config.GetPhysicalInputAddress(), (int)config.input_width, (int)config.input_height, config.GetPhysicalOutputAddress(), (int)config.output_width, (int)config.output_height, config.output_format.Value()); } break; } // Seems like writing to this register triggers processing case GPU_REG_INDEX(command_processor_config.trigger): { const auto& config = g_regs.command_processor_config; if (config.trigger & 1) { u32* buffer = (u32*)Memory::GetPointer(Memory::PhysicalToVirtualAddress(config.GetPhysicalAddress())); u32 size = config.size << 3; Pica::CommandProcessor::ProcessCommandList(buffer, size); } break; } default: break; } }
/// Executes the next GSP command void ExecuteCommand(const Command& command) { // Utility function to convert register ID to address auto WriteGPURegister = [](u32 id, u32 data) { GPU::Write<u32>(0x1EF00000 + 4 * id, data); }; switch (command.id) { // GX request DMA - typically used for copying memory from GSP heap to VRAM case CommandId::REQUEST_DMA: memcpy(Memory::GetPointer(command.dma_request.dest_address), Memory::GetPointer(command.dma_request.source_address), command.dma_request.size); break; // ctrulib homebrew sends all relevant command list data with this command, // hence we do all "interesting" stuff here and do nothing in SET_COMMAND_LIST_FIRST. // TODO: This will need some rework in the future. case CommandId::SET_COMMAND_LIST_LAST: { auto& params = command.set_command_list_last; WriteGPURegister(GPU_REG_INDEX(command_processor_config.address), Memory::VirtualToPhysicalAddress(params.address) >> 3); WriteGPURegister(GPU_REG_INDEX(command_processor_config.size), params.size >> 3); // TODO: Not sure if we are supposed to always write this .. seems to trigger processing though WriteGPURegister(GPU_REG_INDEX(command_processor_config.trigger), 1); // TODO: Move this to GPU // TODO: Not sure what units the size is measured in g_debugger.CommandListCalled(params.address, (u32*)Memory::GetPointer(params.address), params.size); SignalInterrupt(InterruptId::P3D); break; } // It's assumed that the two "blocks" behave equivalently. // Presumably this is done simply to allow two memory fills to run in parallel. case CommandId::SET_MEMORY_FILL: { auto& params = command.memory_fill; WriteGPURegister(GPU_REG_INDEX(memory_fill_config[0].address_start), Memory::VirtualToPhysicalAddress(params.start1) >> 3); WriteGPURegister(GPU_REG_INDEX(memory_fill_config[0].address_end), Memory::VirtualToPhysicalAddress(params.end1) >> 3); WriteGPURegister(GPU_REG_INDEX(memory_fill_config[0].size), params.end1 - params.start1); WriteGPURegister(GPU_REG_INDEX(memory_fill_config[0].value), params.value1); WriteGPURegister(GPU_REG_INDEX(memory_fill_config[1].address_start), Memory::VirtualToPhysicalAddress(params.start2) >> 3); WriteGPURegister(GPU_REG_INDEX(memory_fill_config[1].address_end), Memory::VirtualToPhysicalAddress(params.end2) >> 3); WriteGPURegister(GPU_REG_INDEX(memory_fill_config[1].size), params.end2 - params.start2); WriteGPURegister(GPU_REG_INDEX(memory_fill_config[1].value), params.value2); break; } case CommandId::SET_DISPLAY_TRANSFER: { auto& params = command.image_copy; WriteGPURegister(GPU_REG_INDEX(display_transfer_config.input_address), Memory::VirtualToPhysicalAddress(params.in_buffer_address) >> 3); WriteGPURegister(GPU_REG_INDEX(display_transfer_config.output_address), Memory::VirtualToPhysicalAddress(params.out_buffer_address) >> 3); WriteGPURegister(GPU_REG_INDEX(display_transfer_config.input_size), params.in_buffer_size); WriteGPURegister(GPU_REG_INDEX(display_transfer_config.output_size), params.out_buffer_size); WriteGPURegister(GPU_REG_INDEX(display_transfer_config.flags), params.flags); WriteGPURegister(GPU_REG_INDEX(display_transfer_config.trigger), 1); // TODO(bunnei): Signalling all of these interrupts here is totally wrong, but it seems to // work well enough for running demos. Need to figure out how these all work and trigger // them correctly. SignalInterrupt(InterruptId::PSC0); SignalInterrupt(InterruptId::PSC1); SignalInterrupt(InterruptId::PPF); SignalInterrupt(InterruptId::P3D); SignalInterrupt(InterruptId::DMA); break; } // TODO: Check if texture copies are implemented correctly.. case CommandId::SET_TEXTURE_COPY: { auto& params = command.image_copy; WriteGPURegister(GPU_REG_INDEX(display_transfer_config.input_address), Memory::VirtualToPhysicalAddress(params.in_buffer_address) >> 3); WriteGPURegister(GPU_REG_INDEX(display_transfer_config.output_address), Memory::VirtualToPhysicalAddress(params.out_buffer_address) >> 3); WriteGPURegister(GPU_REG_INDEX(display_transfer_config.input_size), params.in_buffer_size); WriteGPURegister(GPU_REG_INDEX(display_transfer_config.output_size), params.out_buffer_size); WriteGPURegister(GPU_REG_INDEX(display_transfer_config.flags), params.flags); // TODO: Should this register be set to 1 or should instead its value be OR-ed with 1? WriteGPURegister(GPU_REG_INDEX(display_transfer_config.trigger), 1); break; } // TODO: Figure out what exactly SET_COMMAND_LIST_FIRST and SET_COMMAND_LIST_LAST // are supposed to do. case CommandId::SET_COMMAND_LIST_FIRST: { break; } default: ERROR_LOG(GSP, "unknown command 0x%08X", (int)command.id.Value()); } }
inline void Write(u32 addr, const T data) { addr -= HW::VADDR_GPU; u32 index = addr / 4; // Writes other than u32 are untested, so I'd rather have them abort than silently fail if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) { LOG_ERROR(HW_GPU, "unknown Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, addr); return; } g_regs[index] = static_cast<u32>(data); switch (index) { // Memory fills are triggered once the fill value is written. case GPU_REG_INDEX_WORKAROUND(memory_fill_config[0].trigger, 0x00004 + 0x3): case GPU_REG_INDEX_WORKAROUND(memory_fill_config[1].trigger, 0x00008 + 0x3): { const bool is_second_filler = (index != GPU_REG_INDEX(memory_fill_config[0].trigger)); auto& config = g_regs.memory_fill_config[is_second_filler]; if (config.trigger) { MemoryFill(config); LOG_TRACE(HW_GPU, "MemoryFill from 0x%08x to 0x%08x", config.GetStartAddress(), config.GetEndAddress()); // It seems that it won't signal interrupt if "address_start" is zero. // TODO: hwtest this if (config.GetStartAddress() != 0) { if (!is_second_filler) { GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PSC0); } else { GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PSC1); } } // Reset "trigger" flag and set the "finish" flag // NOTE: This was confirmed to happen on hardware even if "address_start" is zero. config.trigger.Assign(0); config.finished.Assign(1); } break; } case GPU_REG_INDEX(display_transfer_config.trigger): { MICROPROFILE_SCOPE(GPU_DisplayTransfer); const auto& config = g_regs.display_transfer_config; if (config.trigger & 1) { if (Pica::g_debug_context) Pica::g_debug_context->OnEvent(Pica::DebugContext::Event::IncomingDisplayTransfer, nullptr); if (config.is_texture_copy) { TextureCopy(config); LOG_TRACE(HW_GPU, "TextureCopy: 0x%X bytes from 0x%08X(%u+%u)-> " "0x%08X(%u+%u), flags 0x%08X", config.texture_copy.size, config.GetPhysicalInputAddress(), config.texture_copy.input_width * 16, config.texture_copy.input_gap * 16, config.GetPhysicalOutputAddress(), config.texture_copy.output_width * 16, config.texture_copy.output_gap * 16, config.flags); } else { DisplayTransfer(config); LOG_TRACE(HW_GPU, "DisplayTransfer: 0x%08x(%ux%u)-> " "0x%08x(%ux%u), dst format %x, flags 0x%08X", config.GetPhysicalInputAddress(), config.input_width.Value(), config.input_height.Value(), config.GetPhysicalOutputAddress(), config.output_width.Value(), config.output_height.Value(), config.output_format.Value(), config.flags); } g_regs.display_transfer_config.trigger = 0; GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PPF); } break; } // Seems like writing to this register triggers processing case GPU_REG_INDEX(command_processor_config.trigger): { const auto& config = g_regs.command_processor_config; if (config.trigger & 1) { MICROPROFILE_SCOPE(GPU_CmdlistProcessing); u32* buffer = (u32*)Memory::GetPhysicalPointer(config.GetPhysicalAddress()); if (Pica::g_debug_context && Pica::g_debug_context->recorder) { Pica::g_debug_context->recorder->MemoryAccessed( (u8*)buffer, config.size * sizeof(u32), config.GetPhysicalAddress()); } Pica::CommandProcessor::ProcessCommandList(buffer, config.size); g_regs.command_processor_config.trigger = 0; } break; } default: break; } // Notify tracer about the register write // This is happening *after* handling the write to make sure we properly catch all memory reads. if (Pica::g_debug_context && Pica::g_debug_context->recorder) { // addr + GPU VBase - IO VBase + IO PBase Pica::g_debug_context->recorder->RegisterWritten<T>( addr + 0x1EF00000 - 0x1EC00000 + 0x10100000, data); } }
/// Executes the next GSP command static void ExecuteCommand(const Command& command, u32 thread_id) { // Utility function to convert register ID to address static auto WriteGPURegister = [](u32 id, u32 data) { GPU::Write<u32>(0x1EF00000 + 4 * id, data); }; switch (command.id) { // GX request DMA - typically used for copying memory from GSP heap to VRAM case CommandId::REQUEST_DMA: VideoCore::g_renderer->hw_rasterizer->NotifyPreRead(Memory::VirtualToPhysicalAddress(command.dma_request.source_address), command.dma_request.size); memcpy(Memory::GetPointer(command.dma_request.dest_address), Memory::GetPointer(command.dma_request.source_address), command.dma_request.size); SignalInterrupt(InterruptId::DMA); VideoCore::g_renderer->hw_rasterizer->NotifyFlush(Memory::VirtualToPhysicalAddress(command.dma_request.dest_address), command.dma_request.size); break; // ctrulib homebrew sends all relevant command list data with this command, // hence we do all "interesting" stuff here and do nothing in SET_COMMAND_LIST_FIRST. // TODO: This will need some rework in the future. case CommandId::SET_COMMAND_LIST_LAST: { auto& params = command.set_command_list_last; WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(command_processor_config.address)), Memory::VirtualToPhysicalAddress(params.address) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(command_processor_config.size)), params.size); // TODO: Not sure if we are supposed to always write this .. seems to trigger processing though WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(command_processor_config.trigger)), 1); break; } // It's assumed that the two "blocks" behave equivalently. // Presumably this is done simply to allow two memory fills to run in parallel. case CommandId::SET_MEMORY_FILL: { auto& params = command.memory_fill; if (params.start1 != 0) { WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[0].address_start)), Memory::VirtualToPhysicalAddress(params.start1) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[0].address_end)), Memory::VirtualToPhysicalAddress(params.end1) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[0].value_32bit)), params.value1); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[0].control)), params.control1); } if (params.start2 != 0) { WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[1].address_start)), Memory::VirtualToPhysicalAddress(params.start2) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[1].address_end)), Memory::VirtualToPhysicalAddress(params.end2) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[1].value_32bit)), params.value2); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[1].control)), params.control2); } break; }
inline void Write(u32 addr, const T data) { addr -= HW::VADDR_GPU; u32 index = addr / 4; // Writes other than u32 are untested, so I'd rather have them abort than silently fail if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) { LOG_ERROR(HW_GPU, "unknown Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, addr); return; } g_regs[index] = static_cast<u32>(data); switch (index) { // Memory fills are triggered once the fill value is written. case GPU_REG_INDEX_WORKAROUND(memory_fill_config[0].trigger, 0x00004 + 0x3): case GPU_REG_INDEX_WORKAROUND(memory_fill_config[1].trigger, 0x00008 + 0x3): { const bool is_second_filler = (index != GPU_REG_INDEX(memory_fill_config[0].trigger)); auto& config = g_regs.memory_fill_config[is_second_filler]; if (config.trigger) { if (config.address_start) { // Some games pass invalid values here u8* start = Memory::GetPhysicalPointer(config.GetStartAddress()); u8* end = Memory::GetPhysicalPointer(config.GetEndAddress()); if (config.fill_24bit) { // fill with 24-bit values for (u8* ptr = start; ptr < end; ptr += 3) { ptr[0] = config.value_24bit_r; ptr[1] = config.value_24bit_g; ptr[2] = config.value_24bit_b; } } else if (config.fill_32bit) { // fill with 32-bit values for (u32* ptr = (u32*)start; ptr < (u32*)end; ++ptr) *ptr = config.value_32bit; } else { // fill with 16-bit values for (u16* ptr = (u16*)start; ptr < (u16*)end; ++ptr) *ptr = config.value_16bit; } LOG_TRACE(HW_GPU, "MemoryFill from 0x%08x to 0x%08x", config.GetStartAddress(), config.GetEndAddress()); if (!is_second_filler) { GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PSC0); } else { GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PSC1); } VideoCore::g_renderer->rasterizer->InvalidateRegion(config.GetStartAddress(), config.GetEndAddress() - config.GetStartAddress()); } // Reset "trigger" flag and set the "finish" flag // NOTE: This was confirmed to happen on hardware even if "address_start" is zero. config.trigger = 0; config.finished = 1; } break; } case GPU_REG_INDEX(display_transfer_config.trigger): { MICROPROFILE_SCOPE(GPU_DisplayTransfer); const auto& config = g_regs.display_transfer_config; if (config.trigger & 1) { if (Pica::g_debug_context) Pica::g_debug_context->OnEvent(Pica::DebugContext::Event::IncomingDisplayTransfer, nullptr); u8* src_pointer = Memory::GetPhysicalPointer(config.GetPhysicalInputAddress()); u8* dst_pointer = Memory::GetPhysicalPointer(config.GetPhysicalOutputAddress()); if (config.is_texture_copy) { u32 input_width = config.texture_copy.input_width * 16; u32 input_gap = config.texture_copy.input_gap * 16; u32 output_width = config.texture_copy.output_width * 16; u32 output_gap = config.texture_copy.output_gap * 16; size_t contiguous_input_size = config.texture_copy.size / input_width * (input_width + input_gap); VideoCore::g_renderer->rasterizer->FlushRegion(config.GetPhysicalInputAddress(), contiguous_input_size); u32 remaining_size = config.texture_copy.size; u32 remaining_input = input_width; u32 remaining_output = output_width; while (remaining_size > 0) { u32 copy_size = std::min({ remaining_input, remaining_output, remaining_size }); std::memcpy(dst_pointer, src_pointer, copy_size); src_pointer += copy_size; dst_pointer += copy_size; remaining_input -= copy_size; remaining_output -= copy_size; remaining_size -= copy_size; if (remaining_input == 0) { remaining_input = input_width; src_pointer += input_gap; } if (remaining_output == 0) { remaining_output = output_width; dst_pointer += output_gap; } } LOG_TRACE(HW_GPU, "TextureCopy: 0x%X bytes from 0x%08X(%u+%u)-> 0x%08X(%u+%u), flags 0x%08X", config.texture_copy.size, config.GetPhysicalInputAddress(), input_width, input_gap, config.GetPhysicalOutputAddress(), output_width, output_gap, config.flags); size_t contiguous_output_size = config.texture_copy.size / output_width * (output_width + output_gap); VideoCore::g_renderer->rasterizer->InvalidateRegion(config.GetPhysicalOutputAddress(), contiguous_output_size); GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PPF); break; } if (config.scaling > config.ScaleXY) { LOG_CRITICAL(HW_GPU, "Unimplemented display transfer scaling mode %u", config.scaling.Value()); UNIMPLEMENTED(); break; } if (config.input_linear && config.scaling != config.NoScale) { LOG_CRITICAL(HW_GPU, "Scaling is only implemented on tiled input"); UNIMPLEMENTED(); break; } bool horizontal_scale = config.scaling != config.NoScale; bool vertical_scale = config.scaling == config.ScaleXY; u32 output_width = config.output_width >> horizontal_scale; u32 output_height = config.output_height >> vertical_scale; u32 input_size = config.input_width * config.input_height * GPU::Regs::BytesPerPixel(config.input_format); u32 output_size = output_width * output_height * GPU::Regs::BytesPerPixel(config.output_format); VideoCore::g_renderer->rasterizer->FlushRegion(config.GetPhysicalInputAddress(), input_size); for (u32 y = 0; y < output_height; ++y) { for (u32 x = 0; x < output_width; ++x) { Math::Vec4<u8> src_color; // Calculate the [x,y] position of the input image // based on the current output position and the scale u32 input_x = x << horizontal_scale; u32 input_y = y << vertical_scale; if (config.flip_vertically) { // Flip the y value of the output data, // we do this after calculating the [x,y] position of the input image // to account for the scaling options. y = output_height - y - 1; } u32 dst_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.output_format); u32 src_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.input_format); u32 src_offset; u32 dst_offset; if (config.input_linear) { if (!config.dont_swizzle) { // Interpret the input as linear and the output as tiled u32 coarse_y = y & ~7; u32 stride = output_width * dst_bytes_per_pixel; src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel; dst_offset = VideoCore::GetMortonOffset(x, y, dst_bytes_per_pixel) + coarse_y * stride; } else { // Both input and output are linear src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel; dst_offset = (x + y * output_width) * dst_bytes_per_pixel; } } else { if (!config.dont_swizzle) { // Interpret the input as tiled and the output as linear u32 coarse_y = input_y & ~7; u32 stride = config.input_width * src_bytes_per_pixel; src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) + coarse_y * stride; dst_offset = (x + y * output_width) * dst_bytes_per_pixel; } else { // Both input and output are tiled u32 out_coarse_y = y & ~7; u32 out_stride = output_width * dst_bytes_per_pixel; u32 in_coarse_y = input_y & ~7; u32 in_stride = config.input_width * src_bytes_per_pixel; src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) + in_coarse_y * in_stride; dst_offset = VideoCore::GetMortonOffset(x, y, dst_bytes_per_pixel) + out_coarse_y * out_stride; } } const u8* src_pixel = src_pointer + src_offset; src_color = DecodePixel(config.input_format, src_pixel); if (config.scaling == config.ScaleX) { Math::Vec4<u8> pixel = DecodePixel(config.input_format, src_pixel + src_bytes_per_pixel); src_color = ((src_color + pixel) / 2).Cast<u8>(); } else if (config.scaling == config.ScaleXY) { Math::Vec4<u8> pixel1 = DecodePixel(config.input_format, src_pixel + 1 * src_bytes_per_pixel); Math::Vec4<u8> pixel2 = DecodePixel(config.input_format, src_pixel + 2 * src_bytes_per_pixel); Math::Vec4<u8> pixel3 = DecodePixel(config.input_format, src_pixel + 3 * src_bytes_per_pixel); src_color = (((src_color + pixel1) + (pixel2 + pixel3)) / 4).Cast<u8>(); } u8* dst_pixel = dst_pointer + dst_offset; switch (config.output_format) { case Regs::PixelFormat::RGBA8: Color::EncodeRGBA8(src_color, dst_pixel); break; case Regs::PixelFormat::RGB8: Color::EncodeRGB8(src_color, dst_pixel); break; case Regs::PixelFormat::RGB565: Color::EncodeRGB565(src_color, dst_pixel); break; case Regs::PixelFormat::RGB5A1: Color::EncodeRGB5A1(src_color, dst_pixel); break; case Regs::PixelFormat::RGBA4: Color::EncodeRGBA4(src_color, dst_pixel); break; default: LOG_ERROR(HW_GPU, "Unknown destination framebuffer format %x", config.output_format.Value()); break; } } } LOG_TRACE(HW_GPU, "DisplayTriggerTransfer: 0x%08x bytes from 0x%08x(%ux%u)-> 0x%08x(%ux%u), dst format %x, flags 0x%08X", config.output_height * output_width * GPU::Regs::BytesPerPixel(config.output_format), config.GetPhysicalInputAddress(), config.input_width.Value(), config.input_height.Value(), config.GetPhysicalOutputAddress(), output_width, output_height, config.output_format.Value(), config.flags); g_regs.display_transfer_config.trigger = 0; GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PPF); VideoCore::g_renderer->rasterizer->InvalidateRegion(config.GetPhysicalOutputAddress(), output_size); } break; } // Seems like writing to this register triggers processing case GPU_REG_INDEX(command_processor_config.trigger): { const auto& config = g_regs.command_processor_config; if (config.trigger & 1) { MICROPROFILE_SCOPE(GPU_CmdlistProcessing); u32* buffer = (u32*)Memory::GetPhysicalPointer(config.GetPhysicalAddress()); if (Pica::g_debug_context && Pica::g_debug_context->recorder) { Pica::g_debug_context->recorder->MemoryAccessed((u8*)buffer, config.size * sizeof(u32), config.GetPhysicalAddress()); } Pica::CommandProcessor::ProcessCommandList(buffer, config.size); g_regs.command_processor_config.trigger = 0; } break; } default: break; } // Notify tracer about the register write // This is happening *after* handling the write to make sure we properly catch all memory reads. if (Pica::g_debug_context && Pica::g_debug_context->recorder) { // addr + GPU VBase - IO VBase + IO PBase Pica::g_debug_context->recorder->RegisterWritten<T>(addr + 0x1EF00000 - 0x1EC00000 + 0x10100000, data); } }
inline void Write(u32 addr, const T data) { addr -= 0x1EF00000; u32 index = addr / 4; // Writes other than u32 are untested, so I'd rather have them abort than silently fail if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) { LOG_ERROR(HW_GPU, "unknown Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, addr); return; } g_regs[index] = static_cast<u32>(data); switch (index) { // Memory fills are triggered once the fill value is written. case GPU_REG_INDEX_WORKAROUND(memory_fill_config[0].trigger, 0x00004 + 0x3): case GPU_REG_INDEX_WORKAROUND(memory_fill_config[1].trigger, 0x00008 + 0x3): { const bool is_second_filler = (index != GPU_REG_INDEX(memory_fill_config[0].trigger)); auto& config = g_regs.memory_fill_config[is_second_filler]; if (config.address_start && config.trigger) { u8* start = Memory::GetPointer(Memory::PhysicalToVirtualAddress(config.GetStartAddress())); u8* end = Memory::GetPointer(Memory::PhysicalToVirtualAddress(config.GetEndAddress())); if (config.fill_24bit) { // fill with 24-bit values for (u8* ptr = start; ptr < end; ptr += 3) { ptr[0] = config.value_24bit_b; ptr[1] = config.value_24bit_g; ptr[2] = config.value_24bit_r; } } else if (config.fill_32bit) { // fill with 32-bit values for (u32* ptr = (u32*)start; ptr < (u32*)end; ++ptr) *ptr = config.value_32bit; } else { // fill with 16-bit values for (u16* ptr = (u16*)start; ptr < (u16*)end; ++ptr) *ptr = config.value_16bit; } LOG_TRACE(HW_GPU, "MemoryFill from 0x%08x to 0x%08x", config.GetStartAddress(), config.GetEndAddress()); config.trigger = 0; config.finished = 1; if (!is_second_filler) { GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PSC0); } else { GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PSC1); } } break; } case GPU_REG_INDEX(display_transfer_config.trigger): { const auto& config = g_regs.display_transfer_config; if (config.trigger & 1) { u8* src_pointer = Memory::GetPointer(Memory::PhysicalToVirtualAddress(config.GetPhysicalInputAddress())); u8* dst_pointer = Memory::GetPointer(Memory::PhysicalToVirtualAddress(config.GetPhysicalOutputAddress())); unsigned horizontal_scale = (config.scale_horizontally != 0) ? 2 : 1; unsigned vertical_scale = (config.scale_vertically != 0) ? 2 : 1; u32 output_width = config.output_width / horizontal_scale; u32 output_height = config.output_height / vertical_scale; if (config.raw_copy) { // Raw copies do not perform color conversion nor tiled->linear / linear->tiled conversions // TODO(Subv): Verify if raw copies perform scaling memcpy(dst_pointer, src_pointer, config.output_width * config.output_height * GPU::Regs::BytesPerPixel(config.output_format)); LOG_TRACE(HW_GPU, "DisplayTriggerTransfer: 0x%08x bytes from 0x%08x(%ux%u)-> 0x%08x(%ux%u), flags 0x%08X, Raw copy", config.output_height * output_width * GPU::Regs::BytesPerPixel(config.output_format), config.GetPhysicalInputAddress(), config.input_width.Value(), config.input_height.Value(), config.GetPhysicalOutputAddress(), config.output_width.Value(), config.output_height.Value(), config.output_format.Value(), config.flags); GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PPF); break; } // TODO(Subv): Blend the pixels when horizontal / vertical scaling is enabled, // right now we're just skipping the extra pixels. for (u32 y = 0; y < output_height; ++y) { for (u32 x = 0; x < output_width; ++x) { Math::Vec4<u8> src_color = { 0, 0, 0, 0 }; u32 scaled_x = x * horizontal_scale; u32 scaled_y = y * vertical_scale; u32 dst_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.output_format); u32 src_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.input_format); u32 src_offset; u32 dst_offset; if (config.output_tiled) { // Interpret the input as linear and the output as tiled u32 coarse_y = y & ~7; u32 stride = output_width * dst_bytes_per_pixel; src_offset = (scaled_x + scaled_y * config.input_width) * src_bytes_per_pixel; dst_offset = VideoCore::GetMortonOffset(x, y, dst_bytes_per_pixel) + coarse_y * stride; } else { // Interpret the input as tiled and the output as linear u32 coarse_y = scaled_y & ~7; u32 stride = config.input_width * src_bytes_per_pixel; src_offset = VideoCore::GetMortonOffset(scaled_x, scaled_y, src_bytes_per_pixel) + coarse_y * stride; dst_offset = (x + y * output_width) * dst_bytes_per_pixel; } const u8* src_pixel = src_pointer + src_offset; switch (config.input_format) { case Regs::PixelFormat::RGBA8: src_color = Color::DecodeRGBA8(src_pixel); break; case Regs::PixelFormat::RGB8: src_color = Color::DecodeRGB8(src_pixel); break; case Regs::PixelFormat::RGB565: src_color = Color::DecodeRGB565(src_pixel); break; case Regs::PixelFormat::RGB5A1: src_color = Color::DecodeRGB5A1(src_pixel); break; case Regs::PixelFormat::RGBA4: src_color = Color::DecodeRGBA4(src_pixel); break; default: LOG_ERROR(HW_GPU, "Unknown source framebuffer format %x", config.input_format.Value()); break; } u8* dst_pixel = dst_pointer + dst_offset; switch (config.output_format) { case Regs::PixelFormat::RGBA8: Color::EncodeRGBA8(src_color, dst_pixel); break; case Regs::PixelFormat::RGB8: Color::EncodeRGB8(src_color, dst_pixel); break; case Regs::PixelFormat::RGB565: Color::EncodeRGB565(src_color, dst_pixel); break; case Regs::PixelFormat::RGB5A1: Color::EncodeRGB5A1(src_color, dst_pixel); break; case Regs::PixelFormat::RGBA4: Color::EncodeRGBA4(src_color, dst_pixel); break; default: LOG_ERROR(HW_GPU, "Unknown destination framebuffer format %x", config.output_format.Value()); break; } } } LOG_TRACE(HW_GPU, "DisplayTriggerTransfer: 0x%08x bytes from 0x%08x(%ux%u)-> 0x%08x(%ux%u), dst format %x, flags 0x%08X", config.output_height * output_width * GPU::Regs::BytesPerPixel(config.output_format), config.GetPhysicalInputAddress(), config.input_width.Value(), config.input_height.Value(), config.GetPhysicalOutputAddress(), output_width, output_height, config.output_format.Value(), config.flags); GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PPF); } break; } // Seems like writing to this register triggers processing case GPU_REG_INDEX(command_processor_config.trigger): { const auto& config = g_regs.command_processor_config; if (config.trigger & 1) { u32* buffer = (u32*)Memory::GetPointer(Memory::PhysicalToVirtualAddress(config.GetPhysicalAddress())); Pica::CommandProcessor::ProcessCommandList(buffer, config.size); } break; } default: break; } }
/// Executes the next GSP command static void ExecuteCommand(const Command& command, u32 thread_id) { // Utility function to convert register ID to address static auto WriteGPURegister = [](u32 id, u32 data) { GPU::Write<u32>(0x1EF00000 + 4 * id, data); }; switch (command.id) { // GX request DMA - typically used for copying memory from GSP heap to VRAM case CommandId::REQUEST_DMA: { MICROPROFILE_SCOPE(GPU_GSP_DMA); Memory::MemorySystem& memory = Core::System::GetInstance().Memory(); // TODO: Consider attempting rasterizer-accelerated surface blit if that usage is ever // possible/likely Memory::RasterizerFlushVirtualRegion(command.dma_request.source_address, command.dma_request.size, Memory::FlushMode::Flush); Memory::RasterizerFlushVirtualRegion(command.dma_request.dest_address, command.dma_request.size, Memory::FlushMode::Invalidate); // TODO(Subv): These memory accesses should not go through the application's memory mapping. // They should go through the GSP module's memory mapping. memory.CopyBlock(*Core::System::GetInstance().Kernel().GetCurrentProcess(), command.dma_request.dest_address, command.dma_request.source_address, command.dma_request.size); SignalInterrupt(InterruptId::DMA); break; } // TODO: This will need some rework in the future. (why?) case CommandId::SUBMIT_GPU_CMDLIST: { auto& params = command.submit_gpu_cmdlist; if (params.do_flush) { // This flag flushes the command list (params.address, params.size) from the cache. // Command lists are not processed by the hardware renderer, so we don't need to // actually flush them in Citra. } WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(command_processor_config.address)), VirtualToPhysicalAddress(params.address) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(command_processor_config.size)), params.size); // TODO: Not sure if we are supposed to always write this .. seems to trigger processing // though WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(command_processor_config.trigger)), 1); // TODO(yuriks): Figure out the meaning of the `flags` field. break; } // It's assumed that the two "blocks" behave equivalently. // Presumably this is done simply to allow two memory fills to run in parallel. case CommandId::SET_MEMORY_FILL: { auto& params = command.memory_fill; if (params.start1 != 0) { WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[0].address_start)), VirtualToPhysicalAddress(params.start1) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[0].address_end)), VirtualToPhysicalAddress(params.end1) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[0].value_32bit)), params.value1); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[0].control)), params.control1); } if (params.start2 != 0) { WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[1].address_start)), VirtualToPhysicalAddress(params.start2) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[1].address_end)), VirtualToPhysicalAddress(params.end2) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[1].value_32bit)), params.value2); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[1].control)), params.control2); } break; }
/// Executes the next GSP command static void ExecuteCommand(const Command& command, u32 thread_id) { // Utility function to convert register ID to address static auto WriteGPURegister = [](u32 id, u32 data) { GPU::Write<u32>(0x1EF00000 + 4 * id, data); }; switch (command.id) { // GX request DMA - typically used for copying memory from GSP heap to VRAM case CommandId::REQUEST_DMA: VideoCore::g_renderer->hw_rasterizer->NotifyPreRead(Memory::VirtualToPhysicalAddress(command.dma_request.source_address), command.dma_request.size); memcpy(Memory::GetPointer(command.dma_request.dest_address), Memory::GetPointer(command.dma_request.source_address), command.dma_request.size); SignalInterrupt(InterruptId::DMA); VideoCore::g_renderer->hw_rasterizer->NotifyFlush(Memory::VirtualToPhysicalAddress(command.dma_request.dest_address), command.dma_request.size); break; // TODO: This will need some rework in the future. (why?) case CommandId::SUBMIT_GPU_CMDLIST: { auto& params = command.submit_gpu_cmdlist; if (params.do_flush) { // This flag flushes the command list (params.address, params.size) from the cache. // Command lists are not processed by the hardware renderer, so we don't need to // actually flush them in Citra. } WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(command_processor_config.address)), Memory::VirtualToPhysicalAddress(params.address) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(command_processor_config.size)), params.size); // TODO: Not sure if we are supposed to always write this .. seems to trigger processing though WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(command_processor_config.trigger)), 1); // TODO(yuriks): Figure out the meaning of the `flags` field. break; } // It's assumed that the two "blocks" behave equivalently. // Presumably this is done simply to allow two memory fills to run in parallel. case CommandId::SET_MEMORY_FILL: { auto& params = command.memory_fill; if (params.start1 != 0) { WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[0].address_start)), Memory::VirtualToPhysicalAddress(params.start1) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[0].address_end)), Memory::VirtualToPhysicalAddress(params.end1) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[0].value_32bit)), params.value1); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[0].control)), params.control1); } if (params.start2 != 0) { WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[1].address_start)), Memory::VirtualToPhysicalAddress(params.start2) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[1].address_end)), Memory::VirtualToPhysicalAddress(params.end2) >> 3); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[1].value_32bit)), params.value2); WriteGPURegister(static_cast<u32>(GPU_REG_INDEX(memory_fill_config[1].control)), params.control2); } break; }
inline void Write(u32 addr, const T data) { addr -= HW::VADDR_GPU; u32 index = addr / 4; // Writes other than u32 are untested, so I'd rather have them abort than silently fail if (index >= Regs::NumIds() || !std::is_same<T, u32>::value) { LOG_ERROR(HW_GPU, "unknown Write%lu 0x%08X @ 0x%08X", sizeof(data) * 8, (u32)data, addr); return; } g_regs[index] = static_cast<u32>(data); switch (index) { // Memory fills are triggered once the fill value is written. case GPU_REG_INDEX_WORKAROUND(memory_fill_config[0].trigger, 0x00004 + 0x3): case GPU_REG_INDEX_WORKAROUND(memory_fill_config[1].trigger, 0x00008 + 0x3): { const bool is_second_filler = (index != GPU_REG_INDEX(memory_fill_config[0].trigger)); auto& config = g_regs.memory_fill_config[is_second_filler]; if (config.trigger) { if (config.address_start) { // Some games pass invalid values here u8* start = Memory::GetPhysicalPointer(config.GetStartAddress()); u8* end = Memory::GetPhysicalPointer(config.GetEndAddress()); if (config.fill_24bit) { // fill with 24-bit values for (u8* ptr = start; ptr < end; ptr += 3) { ptr[0] = config.value_24bit_r; ptr[1] = config.value_24bit_g; ptr[2] = config.value_24bit_b; } } else if (config.fill_32bit) { // fill with 32-bit values for (u32* ptr = (u32*)start; ptr < (u32*)end; ++ptr) *ptr = config.value_32bit; } else { // fill with 16-bit values for (u16* ptr = (u16*)start; ptr < (u16*)end; ++ptr) *ptr = config.value_16bit; } LOG_TRACE(HW_GPU, "MemoryFill from 0x%08x to 0x%08x", config.GetStartAddress(), config.GetEndAddress()); if (!is_second_filler) { GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PSC0); } else { GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PSC1); } VideoCore::g_renderer->hw_rasterizer->NotifyFlush(config.GetStartAddress(), config.GetEndAddress() - config.GetStartAddress()); } // Reset "trigger" flag and set the "finish" flag // NOTE: This was confirmed to happen on hardware even if "address_start" is zero. config.trigger = 0; config.finished = 1; } break; } case GPU_REG_INDEX(display_transfer_config.trigger): { const auto& config = g_regs.display_transfer_config; if (config.trigger & 1) { u8* src_pointer = Memory::GetPhysicalPointer(config.GetPhysicalInputAddress()); u8* dst_pointer = Memory::GetPhysicalPointer(config.GetPhysicalOutputAddress()); if (config.scaling > config.ScaleXY) { LOG_CRITICAL(HW_GPU, "Unimplemented display transfer scaling mode %u", config.scaling.Value()); UNIMPLEMENTED(); break; } if (config.output_tiled && (config.scaling == config.ScaleXY || config.scaling == config.ScaleX)) { LOG_CRITICAL(HW_GPU, "Scaling is only implemented on tiled input"); UNIMPLEMENTED(); break; } bool horizontal_scale = config.scaling != config.NoScale; bool vertical_scale = config.scaling == config.ScaleXY; u32 output_width = config.output_width >> horizontal_scale; u32 output_height = config.output_height >> vertical_scale; u32 input_size = config.input_width * config.input_height * GPU::Regs::BytesPerPixel(config.input_format); u32 output_size = output_width * output_height * GPU::Regs::BytesPerPixel(config.output_format); VideoCore::g_renderer->hw_rasterizer->NotifyPreRead(config.GetPhysicalInputAddress(), input_size); if (config.raw_copy) { // Raw copies do not perform color conversion nor tiled->linear / linear->tiled conversions // TODO(Subv): Verify if raw copies perform scaling memcpy(dst_pointer, src_pointer, output_size); LOG_TRACE(HW_GPU, "DisplayTriggerTransfer: 0x%08x bytes from 0x%08x(%ux%u)-> 0x%08x(%ux%u), output format: %x, flags 0x%08X, Raw copy", output_size, config.GetPhysicalInputAddress(), config.input_width.Value(), config.input_height.Value(), config.GetPhysicalOutputAddress(), config.output_width.Value(), config.output_height.Value(), config.output_format.Value(), config.flags); GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PPF); VideoCore::g_renderer->hw_rasterizer->NotifyFlush(config.GetPhysicalOutputAddress(), output_size); break; } for (u32 y = 0; y < output_height; ++y) { for (u32 x = 0; x < output_width; ++x) { Math::Vec4<u8> src_color; // Calculate the [x,y] position of the input image // based on the current output position and the scale u32 input_x = x << horizontal_scale; u32 input_y = y << vertical_scale; if (config.flip_vertically) { // Flip the y value of the output data, // we do this after calculating the [x,y] position of the input image // to account for the scaling options. y = output_height - y - 1; } u32 dst_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.output_format); u32 src_bytes_per_pixel = GPU::Regs::BytesPerPixel(config.input_format); u32 src_offset; u32 dst_offset; if (config.output_tiled) { // Interpret the input as linear and the output as tiled u32 coarse_y = y & ~7; u32 stride = output_width * dst_bytes_per_pixel; src_offset = (input_x + input_y * config.input_width) * src_bytes_per_pixel; dst_offset = VideoCore::GetMortonOffset(x, y, dst_bytes_per_pixel) + coarse_y * stride; } else { // Interpret the input as tiled and the output as linear u32 coarse_y = input_y & ~7; u32 stride = config.input_width * src_bytes_per_pixel; src_offset = VideoCore::GetMortonOffset(input_x, input_y, src_bytes_per_pixel) + coarse_y * stride; dst_offset = (x + y * output_width) * dst_bytes_per_pixel; } const u8* src_pixel = src_pointer + src_offset; src_color = DecodePixel(config.input_format, src_pixel); if (config.scaling == config.ScaleX) { Math::Vec4<u8> pixel = DecodePixel(config.input_format, src_pixel + src_bytes_per_pixel); src_color = ((src_color + pixel) / 2).Cast<u8>(); } else if (config.scaling == config.ScaleXY) { Math::Vec4<u8> pixel1 = DecodePixel(config.input_format, src_pixel + 1 * src_bytes_per_pixel); Math::Vec4<u8> pixel2 = DecodePixel(config.input_format, src_pixel + 2 * src_bytes_per_pixel); Math::Vec4<u8> pixel3 = DecodePixel(config.input_format, src_pixel + 3 * src_bytes_per_pixel); src_color = (((src_color + pixel1) + (pixel2 + pixel3)) / 4).Cast<u8>(); } u8* dst_pixel = dst_pointer + dst_offset; switch (config.output_format) { case Regs::PixelFormat::RGBA8: Color::EncodeRGBA8(src_color, dst_pixel); break; case Regs::PixelFormat::RGB8: Color::EncodeRGB8(src_color, dst_pixel); break; case Regs::PixelFormat::RGB565: Color::EncodeRGB565(src_color, dst_pixel); break; case Regs::PixelFormat::RGB5A1: Color::EncodeRGB5A1(src_color, dst_pixel); break; case Regs::PixelFormat::RGBA4: Color::EncodeRGBA4(src_color, dst_pixel); break; default: LOG_ERROR(HW_GPU, "Unknown destination framebuffer format %x", config.output_format.Value()); break; } } } LOG_TRACE(HW_GPU, "DisplayTriggerTransfer: 0x%08x bytes from 0x%08x(%ux%u)-> 0x%08x(%ux%u), dst format %x, flags 0x%08X", config.output_height * output_width * GPU::Regs::BytesPerPixel(config.output_format), config.GetPhysicalInputAddress(), config.input_width.Value(), config.input_height.Value(), config.GetPhysicalOutputAddress(), output_width, output_height, config.output_format.Value(), config.flags); g_regs.display_transfer_config.trigger = 0; GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PPF); VideoCore::g_renderer->hw_rasterizer->NotifyFlush(config.GetPhysicalOutputAddress(), output_size); } break; } // Seems like writing to this register triggers processing case GPU_REG_INDEX(command_processor_config.trigger): { const auto& config = g_regs.command_processor_config; if (config.trigger & 1) { u32* buffer = (u32*)Memory::GetPhysicalPointer(config.GetPhysicalAddress()); if (Pica::g_debug_context && Pica::g_debug_context->recorder) { Pica::g_debug_context->recorder->MemoryAccessed((u8*)buffer, config.size * sizeof(u32), config.GetPhysicalAddress()); } Pica::CommandProcessor::ProcessCommandList(buffer, config.size); g_regs.command_processor_config.trigger = 0; } break; } default: break; }