Exemplo n.º 1
0
void VKTexture::CopyRectangleFromTexture(const AbstractTexture* src,
                                         const MathUtil::Rectangle<int>& src_rect, u32 src_layer,
                                         u32 src_level, const MathUtil::Rectangle<int>& dst_rect,
                                         u32 dst_layer, u32 dst_level)
{
  Texture2D* src_texture = static_cast<const VKTexture*>(src)->GetRawTexIdentifier();

  _assert_msg_(VIDEO, static_cast<u32>(src_rect.GetWidth()) <= src_texture->GetWidth() &&
                          static_cast<u32>(src_rect.GetHeight()) <= src_texture->GetHeight(),
               "Source rect is too large for CopyRectangleFromTexture");

  _assert_msg_(VIDEO, static_cast<u32>(dst_rect.GetWidth()) <= m_config.width &&
                          static_cast<u32>(dst_rect.GetHeight()) <= m_config.height,
               "Dest rect is too large for CopyRectangleFromTexture");

  VkImageCopy image_copy = {
      {VK_IMAGE_ASPECT_COLOR_BIT, src_level, src_layer, src_texture->GetLayers()},
      {src_rect.left, src_rect.top, 0},
      {VK_IMAGE_ASPECT_COLOR_BIT, dst_level, dst_layer, m_config.layers},
      {dst_rect.left, dst_rect.top, 0},
      {static_cast<uint32_t>(src_rect.GetWidth()), static_cast<uint32_t>(src_rect.GetHeight()), 1}};

  // Must be called outside of a render pass.
  StateTracker::GetInstance()->EndRenderPass();

  src_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
                                  VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
  m_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
                                VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);

  vkCmdCopyImage(g_command_buffer_mgr->GetCurrentCommandBuffer(), src_texture->GetImage(),
                 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_texture->GetImage(),
                 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &image_copy);

  // Ensure both textures remain in the SHADER_READ_ONLY layout so they can be bound.
  src_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
                                  VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
  m_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
                                VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
Exemplo n.º 2
0
void VulkanRenderManager::Submit(int frame, bool triggerFence) {
	FrameData &frameData = frameData_[frame];
	if (frameData.hasInitCommands) {
		VkResult res = vkEndCommandBuffer(frameData.initCmd);
		_assert_msg_(G3D, res == VK_SUCCESS, "vkEndCommandBuffer failed (init)! result=%s", VulkanResultToString(res));
	}

	VkResult res = vkEndCommandBuffer(frameData.mainCmd);
	_assert_msg_(G3D, res == VK_SUCCESS, "vkEndCommandBuffer failed (main)! result=%s", VulkanResultToString(res));

	VkCommandBuffer cmdBufs[2];
	int numCmdBufs = 0;
	if (frameData.hasInitCommands) {
		cmdBufs[numCmdBufs++] = frameData.initCmd;
		frameData.hasInitCommands = false;
		if (splitSubmit_) {
			// Send the init commands off separately. Used this once to confirm that the cause of a device loss was in the init cmdbuf.
			VkSubmitInfo submit_info{ VK_STRUCTURE_TYPE_SUBMIT_INFO };
			submit_info.commandBufferCount = (uint32_t)numCmdBufs;
			submit_info.pCommandBuffers = cmdBufs;
			res = vkQueueSubmit(vulkan_->GetGraphicsQueue(), 1, &submit_info, VK_NULL_HANDLE);
			if (res == VK_ERROR_DEVICE_LOST) {
				_assert_msg_(G3D, false, "Lost the Vulkan device!");
			} else {
				_assert_msg_(G3D, res == VK_SUCCESS, "vkQueueSubmit failed (init)! result=%s", VulkanResultToString(res));
			}
			numCmdBufs = 0;
		}
	}
	cmdBufs[numCmdBufs++] = frameData.mainCmd;

	VkSubmitInfo submit_info{ VK_STRUCTURE_TYPE_SUBMIT_INFO };
	if (triggerFence && !frameData.skipSwap) {
		submit_info.waitSemaphoreCount = 1;
		submit_info.pWaitSemaphores = &acquireSemaphore_;
		VkPipelineStageFlags waitStage[1]{ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT };
		submit_info.pWaitDstStageMask = waitStage;
	}
	submit_info.commandBufferCount = (uint32_t)numCmdBufs;
	submit_info.pCommandBuffers = cmdBufs;
	if (triggerFence && !frameData.skipSwap) {
		submit_info.signalSemaphoreCount = 1;
		submit_info.pSignalSemaphores = &renderingCompleteSemaphore_;
	}
	res = vkQueueSubmit(vulkan_->GetGraphicsQueue(), 1, &submit_info, triggerFence ? frameData.fence : VK_NULL_HANDLE);
	if (res == VK_ERROR_DEVICE_LOST) {
		_assert_msg_(G3D, false, "Lost the Vulkan device!");
	} else {
		_assert_msg_(G3D, res == VK_SUCCESS, "vkQueueSubmit failed (main, split=%d)! result=%s", (int)splitSubmit_, VulkanResultToString(res));
	}

	// When !triggerFence, we notify after syncing with Vulkan.
	if (useThread_ && triggerFence) {
		VLOG("PULL: Frame %d.readyForFence = true", frame);
		std::unique_lock<std::mutex> lock(frameData.push_mutex);
		frameData.readyForFence = true;
		frameData.push_condVar.notify_all();
	}
}
Exemplo n.º 3
0
	void PPCXEmitter::SetJumpTarget(FixupBranch const &branch)
	{
		s32 distance =  s32(code) - (s32)branch.ptr;
		_assert_msg_(DYNA_REC, distance > -32767
			&& distance <=  32767,
			"SetJumpTarget out of range (%p calls %p)", code,
			branch.ptr);

		switch(branch.type) {
		case _B:
			*(u32*)branch.ptr =  (0x48000000 | ((s32)((distance) & 0x3fffffc)));
			break;
		case _BL:
			*(u32*)branch.ptr =  (0x48000001 | ((s32)((distance) & 0x3fffffc)));
			break;
		case _BEQ:			
			*(u32*)branch.ptr =  (0x41820000 | ((s16)(((distance)+1)) & 0xfffc));
			break;
		case _BNE:
			*(u32*)branch.ptr =  (0x40820000 | ((s16)(((distance)+1)) & 0xfffc));
			break;
		case _BLT:			
			*(u32*)branch.ptr =  (0x41800000 | ((s16)(((distance)+1)) & 0xfffc));
			break;
		case _BLE:
			*(u32*)branch.ptr =  (0x40810000 | ((s16)(((distance)+1)) & 0xfffc));
			break;
		case _BGT:
			*(u32*)branch.ptr =  (0x41810000 | ((s16)(((distance)+1)) & 0xfffc));
			break;
		case _BGE:
			*(u32*)branch.ptr =  (0x40800000 | ((s16)(((distance)+1)) & 0xfffc));
			break;
		default:
			// Error !!!
			_assert_msg_(DYNA_REC, 0, "SetJumpTarget unknow branch type: %d", branch.type);
			break;
		}
	}
Exemplo n.º 4
0
/**
 * GSP_GPU::RegisterInterruptRelayQueue service function
 *  Inputs:
 *      1 : "Flags" field, purpose is unknown
 *      3 : Handle to GSP synchronization event
 *  Outputs:
 *      0 : Result of function, 0 on success, otherwise error code
 *      2 : Thread index into GSP command buffer
 *      4 : Handle to GSP shared memory
 */
void RegisterInterruptRelayQueue(Service::Interface* self) {
    u32* cmd_buff = Service::GetCommandBuffer();
    u32 flags = cmd_buff[1];
    g_interrupt_event = cmd_buff[3];
    g_shared_memory = Kernel::CreateSharedMemory("GSPSharedMem");

    _assert_msg_(GSP, (g_interrupt_event != 0), "handle is not valid!");

    cmd_buff[2] = g_thread_id++; // ThreadID
    cmd_buff[4] = g_shared_memory; // GSP shared memory

    Kernel::SignalEvent(g_interrupt_event); // TODO(bunnei): Is this correct?
}
Exemplo n.º 5
0
CEXIMemoryCard::CEXIMemoryCard(const int index, bool gciFolder) : card_index(index)
{
  _assert_msg_(EXPANSIONINTERFACE, static_cast<std::size_t>(index) < s_et_cmd_done.size(),
               "Trying to create invalid memory card index %d.", index);

  // NOTE: When loading a save state, DMA completion callbacks (s_et_transfer_complete) and such
  //   may have been restored, we need to anticipate those arriving.

  interruptSwitch = 0;
  m_bInterruptSet = 0;
  command = 0;
  status = MC_STATUS_BUSY | MC_STATUS_UNLOCKED | MC_STATUS_READY;
  m_uPosition = 0;
  memset(programming_buffer, 0, sizeof(programming_buffer));
  // Nintendo Memory Card EXI IDs
  // 0x00000004 Memory Card 59     4Mbit
  // 0x00000008 Memory Card 123    8Mb
  // 0x00000010 Memory Card 251    16Mb
  // 0x00000020 Memory Card 507    32Mb
  // 0x00000040 Memory Card 1019   64Mb
  // 0x00000080 Memory Card 2043   128Mb

  // 0x00000510 16Mb "bigben" card
  // card_id = 0xc243;
  card_id = 0xc221;  // It's a Nintendo brand memcard

  // The following games have issues with memory cards bigger than 16Mb
  // Darkened Skye GDQE6S GDQP6S
  // WTA Tour Tennis GWTEA4 GWTJA4 GWTPA4
  // Disney Sports : Skate Boarding GDXEA4 GDXPA4 GDXJA4
  // Disney Sports : Soccer GDKEA4
  // Use a 16Mb (251 block) memory card for these games
  bool useMC251;
  IniFile gameIni = SConfig::GetInstance().LoadGameIni();
  gameIni.GetOrCreateSection("Core")->Get("MemoryCard251", &useMC251, false);
  u16 sizeMb = useMC251 ? MemCard251Mb : MemCard2043Mb;

  if (gciFolder)
  {
    SetupGciFolder(sizeMb);
  }
  else
  {
    SetupRawMemcard(sizeMb);
  }

  memory_card_size = memorycard->GetCardId() * SIZE_TO_Mb;
  u8 header[20] = {0};
  memorycard->Read(0, static_cast<s32>(ArraySize(header)), header);
  SetCardFlashID(header, card_index);
}
void Interpreter::unknown_instruction(UGeckoInstruction _inst)
{
	std::string disasm = GekkoDisassembler::Disassemble(PowerPC::HostRead_U32(last_pc), last_pc);
	NOTICE_LOG(POWERPC, "Last PC = %08x : %s", last_pc, disasm.c_str());
	Dolphin_Debugger::PrintCallstack();
	NOTICE_LOG(POWERPC, "\nIntCPU: Unknown instruction %08x at PC = %08x  last_PC = %08x  LR = %08x\n", _inst.hex, PC, last_pc, LR);
	for (int i = 0; i < 32; i += 4)
		NOTICE_LOG(POWERPC, "r%d: 0x%08x r%d: 0x%08x r%d:0x%08x r%d: 0x%08x",
			i, rGPR[i],
			i + 1, rGPR[i + 1],
			i + 2, rGPR[i + 2],
			i + 3, rGPR[i + 3]);
	_assert_msg_(POWERPC, 0, "\nIntCPU: Unknown instruction %08x at PC = %08x  last_PC = %08x  LR = %08x\n", _inst.hex, PC, last_pc, LR);
}
Exemplo n.º 7
0
u32 sceCtrlReadBufferPositive(u32 ctrlDataPtr, u32 nBufs)
{
	DEBUG_LOG(HLE,"sceCtrlReadBufferPositive(%08x, %i)", ctrlDataPtr, nBufs);
	_assert_msg_(HLE, nBufs > 0, "sceCtrlReadBufferPositive: trying to read nothing?");

	std::lock_guard<std::recursive_mutex> guard(ctrlMutex);
	// Let's just ignore if ctrl is inited or not; some games don't init it (Super Fruit Fall)
	//if (ctrlInited)
	//{
		SampleControls();
		memcpy(Memory::GetPointer(ctrlDataPtr), &ctrl, sizeof(_ctrl_data));
	//}
	return 1;
}
Exemplo n.º 8
0
u32 sceCtrlSetSamplingMode(u32 mode)
{
	u32 retVal = 0;

	DEBUG_LOG(HLE,"sceCtrlSetSamplingMode(%i)", mode);
	_assert_msg_(HLE, mode >= 0 && mode <= 1, "sceCtrlSetSamplingMode: mode outside expected range.");

	if (ctrlInited)
	{
		retVal = analogEnabled == true ? CTRL_MODE_ANALOG : CTRL_MODE_DIGITAL;
		analogEnabled = mode == CTRL_MODE_ANALOG ? true : false;
	}
	return retVal;
}
Exemplo n.º 9
0
/// Synchronize to an OS service
static Result SendSyncRequest(Handle handle) {
    Kernel::Object* object = Kernel::g_object_pool.GetFast<Kernel::Object>(handle);

    _assert_msg_(KERNEL, (object != nullptr), "called, but kernel object is nullptr!");
    DEBUG_LOG(SVC, "called handle=0x%08X(%s)", handle, object->GetTypeName().c_str());

    bool wait = false;
    Result res = object->SyncRequest(&wait);
    if (wait) {
        Kernel::WaitCurrentThread(WAITTYPE_SYNCH); // TODO(bunnei): Is this correct?
    }

    return res;
}
Exemplo n.º 10
0
void ArmRegCache::Unlock(ARMReg R0, ARMReg R1, ARMReg R2, ARMReg R3)
{
    for (u8 RegNum = 0; RegNum < NUMARMREG; ++RegNum)
    {
        if (ArmRegs[RegNum].Reg == R0)
        {
            _assert_msg_(_DYNA_REC, !ArmRegs[RegNum].free, "This register is already unlocked");
            ArmRegs[RegNum].free = true;
        }
        if ( R1 != INVALID_REG && ArmRegs[RegNum].Reg == R1) ArmRegs[RegNum].free = true;
        if ( R2 != INVALID_REG && ArmRegs[RegNum].Reg == R2) ArmRegs[RegNum].free = true;
        if ( R3 != INVALID_REG && ArmRegs[RegNum].Reg == R3) ArmRegs[RegNum].free = true;
    }
}
Exemplo n.º 11
0
void FPURegCache::BindToRegister(const int i, bool doLoad, bool makeDirty) {
	_assert_msg_(DYNA_REC, !regs[i].location.IsImm(), "WTF - load - imm");
	if (!regs[i].away) {
		// Reg is at home in the memory register file. Let's pull it out.
		X64Reg xr = GetFreeXReg();
		_assert_msg_(DYNA_REC, xr < NUM_X_FPREGS, "WTF - load - invalid reg");
		xregs[xr].mipsReg = i;
		xregs[xr].dirty = makeDirty;
		OpArg newloc = ::Gen::R(xr);
		if (doLoad)	{
			if (!regs[i].location.IsImm() && (regs[i].location.offset & 0x3)) {
				PanicAlert("WARNING - misaligned fp register location %i", i);
			}
			emit->MOVSS(xr, regs[i].location);
		}
		regs[i].location = newloc;
		regs[i].away = true;
	} else {
		// There are no immediates in the FPR reg file, so we already had this in a register. Make dirty as necessary.
		xregs[RX(i)].dirty |= makeDirty;
		_assert_msg_(DYNA_REC, regs[i].location.IsSimpleReg(), "not loaded and not simple.");
	}
}
Exemplo n.º 12
0
u16 DSPCore_ReadRegister(int reg)
{
	switch (reg)
	{
	case DSP_REG_AR0:
	case DSP_REG_AR1:
	case DSP_REG_AR2:
	case DSP_REG_AR3:
		return g_dsp.r.ar[reg - DSP_REG_AR0];
	case DSP_REG_IX0:
	case DSP_REG_IX1:
	case DSP_REG_IX2:
	case DSP_REG_IX3:
		return g_dsp.r.ix[reg - DSP_REG_IX0];
	case DSP_REG_WR0:
	case DSP_REG_WR1:
	case DSP_REG_WR2:
	case DSP_REG_WR3:
		return g_dsp.r.wr[reg - DSP_REG_WR0];
	case DSP_REG_ST0:
	case DSP_REG_ST1:
	case DSP_REG_ST2:
	case DSP_REG_ST3:
		return g_dsp.r.st[reg - DSP_REG_ST0];
	case DSP_REG_ACH0:
	case DSP_REG_ACH1:
		return g_dsp.r.ac[reg - DSP_REG_ACH0].h;
	case DSP_REG_CR:     return g_dsp.r.cr;
	case DSP_REG_SR:     return g_dsp.r.sr;
	case DSP_REG_PRODL:  return g_dsp.r.prod.l;
	case DSP_REG_PRODM:  return g_dsp.r.prod.m;
	case DSP_REG_PRODH:  return g_dsp.r.prod.h;
	case DSP_REG_PRODM2: return g_dsp.r.prod.m2;
	case DSP_REG_AXL0:
	case DSP_REG_AXL1:
		return g_dsp.r.ax[reg - DSP_REG_AXL0].l;
	case DSP_REG_AXH0:
	case DSP_REG_AXH1:
		return g_dsp.r.ax[reg - DSP_REG_AXH0].h;
	case DSP_REG_ACL0:
	case DSP_REG_ACL1:
		return g_dsp.r.ac[reg - DSP_REG_ACL0].l;
	case DSP_REG_ACM0:
	case DSP_REG_ACM1:
		return g_dsp.r.ac[reg - DSP_REG_ACM0].m;
	default:
		_assert_msg_(DSP_CORE, 0, "cannot happen");
		return 0;
	}
}
Exemplo n.º 13
0
void FPURegCache::DiscardVS(int vreg) {
	_assert_msg_(JIT, !vregs[vreg].location.IsImm(), "FPU can't handle imm yet.");

	if (vregs[vreg].away) {
		_assert_msg_(JIT, vregs[vreg].lane != 0, "VS expects a SIMD reg.");
		X64Reg xr = vregs[vreg].location.GetSimpleReg();
		_assert_msg_(JIT, xr >= 0 && xr < NUM_X_FPREGS, "DiscardR: MipsReg had bad X64Reg");
		// Note that we DO NOT write it back here. That's the whole point of Discard.
		for (int i = 0; i < 4; ++i) {
			int mr = xregs[xr].mipsRegs[i];
			if (mr != -1) {
				regs[mr].location = GetDefaultLocation(mr);
				regs[mr].away = false;
				regs[mr].tempLocked = false;
			}
			xregs[xr].mipsRegs[i] = -1;
		}
		xregs[xr].dirty = false;
	} else {
		vregs[vreg].tempLocked = false;
	}
	Invariant();
}
Exemplo n.º 14
0
EventType* RegisterEvent(const std::string& name, TimedCallback callback)
{
    // check for existing type with same name.
    // we want event type names to remain unique so that we can use them for serialization.
    _assert_msg_(POWERPC, s_event_types.find(name) == s_event_types.end(),
                 "CoreTiming Event \"%s\" is already registered. Events should only be registered "
                 "during Init to avoid breaking save states.",
                 name.c_str());

    auto info = s_event_types.emplace(name, EventType{callback, nullptr});
    EventType* event_type = &info.first->second;
    event_type->name = &info.first->first;
    return event_type;
}
Exemplo n.º 15
0
ARMReg ArmRegCache::GetReg(bool AutoLock)
{
    for (u8 a = 0; a < NUMARMREG; ++a)
        if (ArmRegs[a].free)
        {
            // Alright, this one is free
            if (AutoLock)
                ArmRegs[a].free = false;
            return ArmRegs[a].Reg;
        }
    // Uh Oh, we have all them locked....
    _assert_msg_(_DYNA_REC_, false, "All available registers are locked dumb dumb");
    return R0;
}
Exemplo n.º 16
0
/**
 * GSP_GPU::RegisterInterruptRelayQueue service function
 *  Inputs:
 *      1 : "Flags" field, purpose is unknown
 *      3 : Handle to GSP synchronization event
 *  Outputs:
 *      0 : Result of function, 0 on success, otherwise error code
 *      2 : Thread index into GSP command buffer
 *      4 : Handle to GSP shared memory
 */
static void RegisterInterruptRelayQueue(Service::Interface* self) {
    u32* cmd_buff = Service::GetCommandBuffer();
    u32 flags = cmd_buff[1];
    g_interrupt_event = cmd_buff[3];
    g_shared_memory = Kernel::CreateSharedMemory("GSPSharedMem");

    _assert_msg_(GSP, (g_interrupt_event != 0), "handle is not valid!");

    cmd_buff[1] = 0x2A07; // Value verified by 3dmoo team, purpose unknown, but needed for GSP init
    cmd_buff[2] = g_thread_id++; // Thread ID
    cmd_buff[4] = g_shared_memory; // GSP shared memory

    Kernel::SignalEvent(g_interrupt_event); // TODO(bunnei): Is this correct?
}
Exemplo n.º 17
0
void TextureCache::CopyTextureRectangle(TCacheEntry* dst_texture,
	const MathUtil::Rectangle<int>& dst_rect,
	Texture2D* src_texture,
	const MathUtil::Rectangle<int>& src_rect)
{
	_assert_msg_(VIDEO, static_cast<u32>(src_rect.GetWidth()) <= src_texture->GetWidth() &&
		static_cast<u32>(src_rect.GetHeight()) <= src_texture->GetHeight(),
		"Source rect is too large for CopyRectangleFromTexture");

	_assert_msg_(VIDEO, static_cast<u32>(dst_rect.GetWidth()) <= dst_texture->config.width &&
		static_cast<u32>(dst_rect.GetHeight()) <= dst_texture->config.height,
		"Dest rect is too large for CopyRectangleFromTexture");

	VkImageCopy image_copy = {
		{ VK_IMAGE_ASPECT_COLOR_BIT, 0, 0,
		src_texture->GetLayers() },        // VkImageSubresourceLayers    srcSubresource
		{ src_rect.left, src_rect.top, 0 },  // VkOffset3D                  srcOffset
		{ VK_IMAGE_ASPECT_COLOR_BIT, 0, 0,  // VkImageSubresourceLayers    dstSubresource
		dst_texture->config.layers },
		{ dst_rect.left, dst_rect.top, 0 },  // VkOffset3D                  dstOffset
		{ static_cast<uint32_t>(src_rect.GetWidth()), static_cast<uint32_t>(src_rect.GetHeight()),
		1 }  // VkExtent3D                  extent
	};

	// Must be called outside of a render pass.
	StateTracker::GetInstance()->EndRenderPass();

	src_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
		VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
	dst_texture->GetTexture()->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);

	vkCmdCopyImage(g_command_buffer_mgr->GetCurrentCommandBuffer(), src_texture->GetImage(),
		VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_texture->GetTexture()->GetImage(),
		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &image_copy);
}
Exemplo n.º 18
0
/// Duplicates a kernel handle
static Result DuplicateHandle(Handle* out, Handle handle) {
    DEBUG_LOG(SVC, "called handle=0x%08X", handle);

    // Translate kernel handles -> real handles
    if (handle == Kernel::CurrentThread) {
        handle = Kernel::GetCurrentThreadHandle();
    }
    _assert_msg_(KERNEL, (handle != Kernel::CurrentProcess),
        "(UNIMPLEMENTED) process handle duplication!");

    // TODO(bunnei): FixMe - This is a hack to return the handle that we were asked to duplicate.
    *out = handle;

    return 0;
}
Exemplo n.º 19
0
// Can be called multiple times with no bad side effects. This is so that we can either begin a frame the normal way,
// or stop it in the middle for a synchronous readback, then start over again mostly normally but without repeating
// the backbuffer image acquisition.
void VulkanRenderManager::BeginSubmitFrame(int frame) {
	FrameData &frameData = frameData_[frame];
	if (!frameData.hasBegun) {
		// Get the index of the next available swapchain image, and a semaphore to block command buffer execution on.
		// Now, I wonder if we should do this early in the frame or late? Right now we do it early, which should be fine.
		VkResult res = vkAcquireNextImageKHR(vulkan_->GetDevice(), vulkan_->GetSwapchain(), UINT64_MAX, acquireSemaphore_, (VkFence)VK_NULL_HANDLE, &frameData.curSwapchainImage);
		if (res == VK_SUBOPTIMAL_KHR) {
			// Hopefully the resize will happen shortly. Ignore - one frame might look bad or something.
		} else if (res == VK_ERROR_OUT_OF_DATE_KHR) {
			frameData.skipSwap = true;
		} else {
			_assert_msg_(G3D, res == VK_SUCCESS, "vkAcquireNextImageKHR failed! result=%s", VulkanResultToString(res));
		}

		VkCommandBufferBeginInfo begin{ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO };
		begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
		res = vkBeginCommandBuffer(frameData.mainCmd, &begin);
		_assert_msg_(G3D, res == VK_SUCCESS, "vkBeginCommandBuffer failed! result=%s", VulkanResultToString(res));

		queueRunner_.SetBackbuffer(framebuffers_[frameData.curSwapchainImage], swapchainImages_[frameData.curSwapchainImage].image);

		frameData.hasBegun = true;
	}
}
Exemplo n.º 20
0
void Jit64::ps_arith(UGeckoInstruction inst)
{
	INSTRUCTION_START
	JITDISABLE(bJITPairedOff);
	FALLBACK_IF(inst.Rc);

	switch (inst.SUBOP5)
	{
	case 18: tri_op(inst.FD, inst.FA, inst.FB, false, &XEmitter::DIVPD); break; //div
	case 20: tri_op(inst.FD, inst.FA, inst.FB, false, &XEmitter::SUBPD); break; //sub
	case 21: tri_op(inst.FD, inst.FA, inst.FB, true,  &XEmitter::ADDPD); break; //add
	case 25: tri_op(inst.FD, inst.FA, inst.FC, true, &XEmitter::MULPD); break; //mul
	default:
		_assert_msg_(DYNA_REC, 0, "ps_arith WTF!!!");
	}
}
Exemplo n.º 21
0
// This is to be called when outside threads, such as the graphics thread, wants to
// schedule things to be executed on the main thread.
void ScheduleEvent_Threadsafe(s64 cyclesIntoFuture, int event_type, u64 userdata)
{
	_assert_msg_(POWERPC, !Core::IsCPUThread(), "ScheduleEvent_Threadsafe from wrong thread");
	if (Core::g_want_determinism)
	{
		ERROR_LOG(POWERPC, "Someone scheduled an off-thread \"%s\" event while netplay or movie play/record "
		                   "was active.  This is likely to cause a desync.",
		                   event_types[event_type].name.c_str());
	}
	std::lock_guard<std::mutex> lk(tsWriteLock);
	Event ne;
	ne.time = g_globalTimer + cyclesIntoFuture;
	ne.type = event_type;
	ne.userdata = userdata;
	tsQueue.Push(ne);
}
Exemplo n.º 22
0
void Init()
{
  s_devnode_name_map.clear();

  // During initialization we use udev to iterate over all /dev/input/event* devices.
  // Note: the Linux kernel is currently limited to just 32 event devices. If this ever
  //            changes, hopefully udev will take care of this.

  udev* udev = udev_new();
  _assert_msg_(PAD, udev != nullptr, "Couldn't initialize libudev.");

  // List all input devices
  udev_enumerate* enumerate = udev_enumerate_new(udev);
  udev_enumerate_add_match_subsystem(enumerate, "input");
  udev_enumerate_scan_devices(enumerate);
  udev_list_entry* devices = udev_enumerate_get_list_entry(enumerate);

  // Iterate over all input devices
  udev_list_entry* dev_list_entry;
  udev_list_entry_foreach(dev_list_entry, devices)
  {
    const char* path = udev_list_entry_get_name(dev_list_entry);

    udev_device* dev = udev_device_new_from_syspath(udev, path);

    const char* devnode = udev_device_get_devnode(dev);
    // We only care about devices which we have read/write access to.
    if (devnode && access(devnode, W_OK) == 0)
    {
      // Unfortunately udev gives us no way to filter out the non event device interfaces.
      // So we open it and see if it works with evdev ioctls or not.
      std::string name = GetName(devnode);
      auto input = std::make_shared<evdevDevice>(devnode);

      if (input->IsInteresting())
      {
        g_controller_interface.AddDevice(std::move(input));
        s_devnode_name_map.insert(std::pair<std::string, std::string>(devnode, name));
      }
    }
    udev_device_unref(dev);
  }
  udev_enumerate_unref(enumerate);
  udev_unref(udev);

  StartHotplugThread();
}
Exemplo n.º 23
0
// This must be run ONLY from within the CPU thread
// cyclesIntoFuture may be VERY inaccurate if called from anything else
// than Advance
void ScheduleEvent(s64 cyclesIntoFuture, int event_type, u64 userdata)
{
	_assert_msg_(POWERPC, Core::IsCPUThread() || Core::GetState() == Core::CORE_PAUSE,
				 "ScheduleEvent from wrong thread");

	Event *ne = GetNewEvent();
	ne->userdata = userdata;
	ne->type = event_type;
	ne->time = GetTicks() + cyclesIntoFuture;

	// If this event needs to be scheduled before the next advance(), force one early
	if (!globalTimerIsSane)
		ForceExceptionCheck(cyclesIntoFuture);


	AddEventToQueue(ne);
}
Exemplo n.º 24
0
void VKTexture::ScaleRectangleFromTexture(const AbstractTexture* source,
                                          const MathUtil::Rectangle<int>& src_rect,
                                          const MathUtil::Rectangle<int>& dst_rect)
{
  Texture2D* src_texture = static_cast<const VKTexture*>(source)->GetRawTexIdentifier();

  // Can't do this within a game render pass.
  StateTracker::GetInstance()->EndRenderPass();
  StateTracker::GetInstance()->SetPendingRebind();

  // Can't render to a non-rendertarget (no framebuffer).
  _assert_msg_(VIDEO, m_config.rendertarget,
               "Destination texture for partial copy is not a rendertarget");

  // Render pass expects dst_texture to be in COLOR_ATTACHMENT_OPTIMAL state.
  // src_texture should already be in SHADER_READ_ONLY state, but transition in case (XFB).
  src_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
                                  VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
  m_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
                                VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);

  VkRenderPass render_pass = g_object_cache->GetRenderPass(
      m_texture->GetFormat(), VK_FORMAT_UNDEFINED, 1, VK_ATTACHMENT_LOAD_OP_DONT_CARE);
  UtilityShaderDraw draw(g_command_buffer_mgr->GetCurrentCommandBuffer(),
                         g_object_cache->GetPipelineLayout(PIPELINE_LAYOUT_STANDARD), render_pass,
                         g_shader_cache->GetPassthroughVertexShader(),
                         g_shader_cache->GetPassthroughGeometryShader(),
                         TextureCache::GetInstance()->GetCopyShader());

  VkRect2D region = {
      {dst_rect.left, dst_rect.top},
      {static_cast<u32>(dst_rect.GetWidth()), static_cast<u32>(dst_rect.GetHeight())}};
  draw.BeginRenderPass(m_framebuffer, region);
  draw.SetPSSampler(0, src_texture->GetView(), g_object_cache->GetLinearSampler());
  draw.DrawQuad(dst_rect.left, dst_rect.top, dst_rect.GetWidth(), dst_rect.GetHeight(),
                src_rect.left, src_rect.top, 0, src_rect.GetWidth(), src_rect.GetHeight(),
                static_cast<int>(src_texture->GetWidth()),
                static_cast<int>(src_texture->GetHeight()));
  draw.EndRenderPass();

  // Ensure both textures remain in the SHADER_READ_ONLY layout so they can be bound.
  src_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
                                  VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
  m_texture->TransitionToLayout(g_command_buffer_mgr->GetCurrentCommandBuffer(),
                                VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
}
Exemplo n.º 25
0
/**
 * Maps a shared memory block to an address in system memory
 * @param handle Shared memory block handle
 * @param address Address in system memory to map shared memory block to
 * @param permissions Memory block map permissions (specified by SVC field)
 * @param other_permissions Memory block map other permissions (specified by SVC field)
 * @return Result of operation, 0 on success, otherwise error code
 */
Result MapSharedMemory(u32 handle, u32 address, MemoryPermission permissions, 
    MemoryPermission other_permissions) {

    if (address < Memory::SHARED_MEMORY_VADDR || address >= Memory::SHARED_MEMORY_VADDR_END) {
        ERROR_LOG(KERNEL, "cannot map handle=0x%08X, address=0x%08X outside of shared mem bounds!",
            handle);
        return -1;
    }
    SharedMemory* shared_memory = Kernel::g_object_pool.GetFast<SharedMemory>(handle);
    _assert_msg_(KERNEL, (shared_memory != nullptr), "handle 0x%08X is not valid!", handle);

    shared_memory->base_address = address;
    shared_memory->permissions = permissions;
    shared_memory->other_permissions = other_permissions;

    return 0;
}
Exemplo n.º 26
0
void GenerateDepalShader(char *buffer, GEBufferFormat pixelFormat, ShaderLanguage language) {
	switch (language) {
	case GLSL_140:
		GenerateDepalShaderFloat(buffer, pixelFormat, language);
		break;
	case GLSL_300:
	case GLSL_VULKAN:
	case HLSL_D3D11:
		GenerateDepalShader300(buffer, pixelFormat, language);
		break;
	case HLSL_DX9:
		GenerateDepalShaderFloat(buffer, pixelFormat, language);
		break;
	case HLSL_D3D11_LEVEL9:
	default:
		_assert_msg_(G3D, false, "Depal shader language not supported: %d", (int)language);
	}
}
Exemplo n.º 27
0
/// Synchronize to an OS service
static Result SendSyncRequest(Handle handle) {
    // TODO(yuriks): ObjectPool::Get tries to check the Object type, which fails since this is a generic base Object,
    // so we are forced to use GetFast and manually verify the handle.
    if (!Kernel::g_object_pool.IsValid(handle)) {
        return InvalidHandle(ErrorModule::Kernel).raw;
    }
    Kernel::Object* object = Kernel::g_object_pool.GetFast<Kernel::Object>(handle);

    _assert_msg_(KERNEL, (object != nullptr), "called, but kernel object is nullptr!");
    DEBUG_LOG(SVC, "called handle=0x%08X(%s)", handle, object->GetTypeName().c_str());

    ResultVal<bool> wait = object->SyncRequest();
    if (wait.Succeeded() && *wait) {
        Kernel::WaitCurrentThread(WAITTYPE_SYNCH); // TODO(bunnei): Is this correct?
    }

    return wait.Code().raw;
}
Exemplo n.º 28
0
GekkoOPInfo *GetOpInfo(UGeckoInstruction _inst)
{
	GekkoOPInfo *info = m_infoTable[_inst.OPCD];
	if ((info->type & 0xFFFFFF) == OPTYPE_SUBTABLE)
	{
		int table = info->type>>24;
		switch(table)
		{
		case 4:  return m_infoTable4[_inst.SUBOP10];
		case 19: return m_infoTable19[_inst.SUBOP10];
		case 31: return m_infoTable31[_inst.SUBOP10];
		case 59: return m_infoTable59[_inst.SUBOP5];
		case 63: return m_infoTable63[_inst.SUBOP10];
		default:
			_assert_msg_(POWERPC,0,"GetOpInfo - invalid subtable op %08x @ %08x", _inst.hex, PC);
			return 0;
		}
	}
Exemplo n.º 29
0
/// Stops the current thread
void StopThread(Handle handle, const char* reason) {
    Thread* thread = g_object_pool.GetFast<Thread>(handle);
    _assert_msg_(KERNEL, (thread != nullptr), "called, but thread is nullptr!");
    
    ChangeReadyState(thread, false);
    thread->status = THREADSTATUS_DORMANT;
    for (size_t i = 0; i < thread->waiting_threads.size(); ++i) {
        const Handle waiting_thread = thread->waiting_threads[i];
        if (VerifyWait(waiting_thread, WAITTYPE_THREADEND, handle)) {
            ResumeThreadFromWait(waiting_thread);
        }
    }
    thread->waiting_threads.clear();

    // Stopped threads are never waiting.
    thread->wait_type = WAITTYPE_NONE;
    thread->wait_handle = 0;
}
Exemplo n.º 30
0
void FPURegCache::Flush() {
	for (int i = 0; i < NUM_MIPS_FPRS; i++) {
		if (regs[i].locked) {
			PanicAlert("Somebody forgot to unlock MIPS reg %i.", i);
		}
		if (regs[i].away) {
			if (regs[i].location.IsSimpleReg()) {
				X64Reg xr = RX(i);
				StoreFromRegister(i);
				xregs[xr].dirty = false;
			} else if (regs[i].location.IsImm()) {
				StoreFromRegister(i);
			} else {
				_assert_msg_(DYNA_REC,0,"Jit64 - Flush unhandled case, reg %i PC: %08x", i, mips->pc);
			}
		}
	}
}