Esempio n. 1
0
FixupBranch EmuCodeBlock::CheckIfSafeAddress(const OpArg& reg_value, X64Reg reg_addr,
                                             BitSet32 registers_in_use)
{
  registers_in_use[reg_addr] = true;
  if (reg_value.IsSimpleReg())
    registers_in_use[reg_value.GetSimpleReg()] = true;

  // Get ourselves two free registers
  if (registers_in_use[RSCRATCH])
    PUSH(RSCRATCH);
  if (registers_in_use[RSCRATCH_EXTRA])
    PUSH(RSCRATCH_EXTRA);

  if (reg_addr != RSCRATCH_EXTRA)
    MOV(32, R(RSCRATCH_EXTRA), R(reg_addr));

  // Perform lookup to see if we can use fast path.
  MOV(64, R(RSCRATCH), ImmPtr(&PowerPC::dbat_table[0]));
  SHR(32, R(RSCRATCH_EXTRA), Imm8(PowerPC::BAT_INDEX_SHIFT));
  TEST(32, MComplex(RSCRATCH, RSCRATCH_EXTRA, SCALE_4, 0), Imm32(PowerPC::BAT_PHYSICAL_BIT));

  if (registers_in_use[RSCRATCH_EXTRA])
    POP(RSCRATCH_EXTRA);
  if (registers_in_use[RSCRATCH])
    POP(RSCRATCH);

  return J_CC(CC_Z, m_far_code.Enabled());
}
Esempio n. 2
0
void GPRRegCache::Start(MIPSState *mips, MIPSAnalyst::AnalysisResults &stats) {
	this->mips = mips;
	for (int i = 0; i < NUM_X_REGS; i++) {
		xregs[i].free = true;
		xregs[i].dirty = false;
		xregs[i].allocLocked = false;
	}
	memset(regs, 0, sizeof(regs));
	OpArg base = GetDefaultLocation(MIPS_REG_ZERO);
	for (int i = 0; i < NUM_MIPS_GPRS; i++) {
		regs[i].location = base;
		base.IncreaseOffset(sizeof(u32));
	}

	// todo: sort to find the most popular regs
	/*
	int maxPreload = 2;
	for (int i = 0; i < NUM_MIPS_GPRS; i++)
	{
		if (stats.numReads[i] > 2 || stats.numWrites[i] >= 2)
		{
			LoadToX64(i, true, false); //stats.firstRead[i] <= stats.firstWrite[i], false);
			maxPreload--;
			if (!maxPreload)
				break;
		}
	}*/
	//Find top regs - preload them (load bursts ain't bad)
	//But only preload IF written OR reads >= 3
}
Esempio n. 3
0
void EmuCodeBlock::UnsafeWriteRegToReg(OpArg reg_value, X64Reg reg_addr, int accessSize, s32 offset,
                                       bool swap, MovInfo* info)
{
  if (info)
  {
    info->address = GetWritableCodePtr();
    info->nonAtomicSwapStore = false;
  }

  OpArg dest = MComplex(RMEM, reg_addr, SCALE_1, offset);
  if (reg_value.IsImm())
  {
    if (swap)
      reg_value = SwapImmediate(accessSize, reg_value);
    MOV(accessSize, dest, reg_value);
  }
  else if (swap)
  {
    SwapAndStore(accessSize, dest, reg_value.GetSimpleReg(), info);
  }
  else
  {
    MOV(accessSize, dest, reg_value);
  }
}
Esempio n. 4
0
void JitSafeMem::DoSlowWrite(const void *safeFunc, const OpArg& src, int suboffset)
{
	if (iaddr_ != (u32) -1)
		jit_->MOV(32, R(EAX), Imm32((iaddr_ + suboffset) & alignMask_));
	else
	{
		jit_->LEA(32, EAX, MDisp(xaddr_, offset_ + suboffset));
		if (alignMask_ != 0xFFFFFFFF)
			jit_->AND(32, R(EAX), Imm32(alignMask_));
	}

#ifdef _M_IX86
	jit_->PUSH(EDX);
#endif
	if (!src.IsSimpleReg(EDX)) {
		jit_->MOV(32, R(EDX), src);
	}
	if (!g_Config.bIgnoreBadMemAccess) {
		jit_->MOV(32, M(&jit_->mips_->pc), Imm32(jit_->GetCompilerPC()));
	}
	// This is a special jit-ABI'd function.
	jit_->CALL(safeFunc);
#ifdef _M_IX86
	jit_->POP(EDX);
#endif
	needsCheck_ = true;
}
Esempio n. 5
0
void GPRRegCache::Start(MIPSState *mips, MIPSComp::JitState *js, MIPSComp::JitOptions *jo, MIPSAnalyst::AnalysisResults &stats) {
#ifdef _M_X64
	if (allocationOrderR15[0] == INVALID_REG) {
		memcpy(allocationOrderR15, allocationOrder, sizeof(allocationOrder));
		allocationOrderR15[ARRAY_SIZE(allocationOrderR15) - 1] = R15;
	}
#endif

	this->mips = mips;
	for (int i = 0; i < NUM_X_REGS; i++) {
		xregs[i].free = true;
		xregs[i].dirty = false;
		xregs[i].allocLocked = false;
	}
	memset(regs, 0, sizeof(regs));
	OpArg base = GetDefaultLocation(MIPS_REG_ZERO);
	for (int i = 0; i < 32; i++) {
		regs[i].location = base;
		base.IncreaseOffset(sizeof(u32));
	}
	for (int i = 32; i < NUM_MIPS_GPRS; i++) {
		regs[i].location = GetDefaultLocation(MIPSGPReg(i));
	}
	SetImm(MIPS_REG_ZERO, 0);

	// todo: sort to find the most popular regs
	/*
	int maxPreload = 2;
	for (int i = 0; i < NUM_MIPS_GPRS; i++)
	{
		if (stats.numReads[i] > 2 || stats.numWrites[i] >= 2)
		{
			LoadToX64(i, true, false); //stats.firstRead[i] <= stats.firstWrite[i], false);
			maxPreload--;
			if (!maxPreload)
				break;
		}
	}*/
	//Find top regs - preload them (load bursts ain't bad)
	//But only preload IF written OR reads >= 3

	js_ = js;
	jo_ = jo;
}
Esempio n. 6
0
void FPURegCache::SetupInitialRegs() {
	for (int i = 0; i < NUM_X_FPREGS; i++) {
		memset(xregsInitial[i].mipsRegs, -1, sizeof(xregsInitial[i].mipsRegs));
		xregsInitial[i].dirty = false;
	}
	memset(regsInitial, 0, sizeof(regsInitial));
	OpArg base = GetDefaultLocation(0);
	for (int i = 0; i < 32; i++) {
		regsInitial[i].location = base;
		base.IncreaseOffset(sizeof(float));
	}
	for (int i = 32; i < 32 + 128; i++) {
		regsInitial[i].location = GetDefaultLocation(i);
	}
	base = GetDefaultLocation(32 + 128);
	for (int i = 32 + 128; i < NUM_MIPS_FPRS; i++) {
		regsInitial[i].location = base;
		base.IncreaseOffset(sizeof(float));
	}
}
Esempio n. 7
0
void DSPJitRegCache::writeReg(int dreg, OpArg arg)
{
    OpArg reg;
    getReg(dreg, reg, false);
    if (arg.IsImm())
    {
        switch(regs[dreg].size)
        {
        case 2:
            emitter.MOV(16, reg, Imm16((u16) arg.offset));
            break;
        case 4:
            emitter.MOV(32, reg, Imm32((u32) arg.offset));
            break;
#if _M_X86_64
        case 8:
            if ((u32) arg.offset == arg.offset)
            {
                emitter.MOV(64, reg, Imm32((u32) arg.offset));
            }
            else
            {
                emitter.MOV(64, reg, Imm64(arg.offset));
            }
            break;
#endif
        default:
            _assert_msg_(DSPLLE, 0, "unsupported memory size");
            break;
        }
    }
    else
    {
        switch(regs[dreg].size)
        {
        case 2:
            emitter.MOV(16, reg, arg);
            break;
        case 4:
            emitter.MOV(32, reg, arg);
            break;
#if _M_X86_64
        case 8:
            emitter.MOV(64, reg, arg);
            break;
#endif
        default:
            _assert_msg_(DSPLLE, 0, "unsupported memory size");
            break;
        }
    }
    putReg(dreg, true);
}
Esempio n. 8
0
bool EmuCodeBlock::UnsafeLoadToReg(X64Reg reg_value, OpArg opAddress, int accessSize, s32 offset,
                                   bool signExtend, MovInfo* info)
{
  bool offsetAddedToAddress = false;
  OpArg memOperand;
  if (opAddress.IsSimpleReg())
  {
    // Deal with potential wraparound.  (This is just a heuristic, and it would
    // be more correct to actually mirror the first page at the end, but the
    // only case where it probably actually matters is JitIL turning adds into
    // offsets with the wrong sign, so whatever.  Since the original code
    // *could* try to wrap an address around, however, this is the correct
    // place to address the issue.)
    if ((u32)offset >= 0x1000)
    {
      // This method can potentially clobber the address if it shares a register
      // with the load target. In this case we can just subtract offset from the
      // register (see Jit64Base for this implementation).
      offsetAddedToAddress = (reg_value == opAddress.GetSimpleReg());

      LEA(32, reg_value, MDisp(opAddress.GetSimpleReg(), offset));
      opAddress = R(reg_value);
      offset = 0;
    }
    memOperand = MComplex(RMEM, opAddress.GetSimpleReg(), SCALE_1, offset);
  }
  else if (opAddress.IsImm())
  {
    MOV(32, R(reg_value), Imm32((u32)(opAddress.Imm32() + offset)));
    memOperand = MRegSum(RMEM, reg_value);
  }
  else
  {
    MOV(32, R(reg_value), opAddress);
    memOperand = MComplex(RMEM, reg_value, SCALE_1, offset);
  }

  LoadAndSwap(accessSize, reg_value, memOperand, signExtend, info);
  return offsetAddedToAddress;
}
Esempio n. 9
0
int VertexLoaderX64::ReadVertex(OpArg data, u64 attribute, int format, int count_in, int count_out, bool dequantize, u8 scaling_exponent, AttributeFormat* native_format)
{
	static const __m128i shuffle_lut[5][3] = {
		{_mm_set_epi32(0xFFFFFFFFL, 0xFFFFFFFFL, 0xFFFFFFFFL, 0xFFFFFF00L),  // 1x u8
		 _mm_set_epi32(0xFFFFFFFFL, 0xFFFFFFFFL, 0xFFFFFF01L, 0xFFFFFF00L),  // 2x u8
		 _mm_set_epi32(0xFFFFFFFFL, 0xFFFFFF02L, 0xFFFFFF01L, 0xFFFFFF00L)}, // 3x u8
		{_mm_set_epi32(0xFFFFFFFFL, 0xFFFFFFFFL, 0xFFFFFFFFL, 0x00FFFFFFL),  // 1x s8
		 _mm_set_epi32(0xFFFFFFFFL, 0xFFFFFFFFL, 0x01FFFFFFL, 0x00FFFFFFL),  // 2x s8
		 _mm_set_epi32(0xFFFFFFFFL, 0x02FFFFFFL, 0x01FFFFFFL, 0x00FFFFFFL)}, // 3x s8
		{_mm_set_epi32(0xFFFFFFFFL, 0xFFFFFFFFL, 0xFFFFFFFFL, 0xFFFF0001L),  // 1x u16
		 _mm_set_epi32(0xFFFFFFFFL, 0xFFFFFFFFL, 0xFFFF0203L, 0xFFFF0001L),  // 2x u16
		 _mm_set_epi32(0xFFFFFFFFL, 0xFFFF0405L, 0xFFFF0203L, 0xFFFF0001L)}, // 3x u16
		{_mm_set_epi32(0xFFFFFFFFL, 0xFFFFFFFFL, 0xFFFFFFFFL, 0x0001FFFFL),  // 1x s16
		 _mm_set_epi32(0xFFFFFFFFL, 0xFFFFFFFFL, 0x0203FFFFL, 0x0001FFFFL),  // 2x s16
		 _mm_set_epi32(0xFFFFFFFFL, 0x0405FFFFL, 0x0203FFFFL, 0x0001FFFFL)}, // 3x s16
		{_mm_set_epi32(0xFFFFFFFFL, 0xFFFFFFFFL, 0xFFFFFFFFL, 0x00010203L),  // 1x float
		 _mm_set_epi32(0xFFFFFFFFL, 0xFFFFFFFFL, 0x04050607L, 0x00010203L),  // 2x float
		 _mm_set_epi32(0xFFFFFFFFL, 0x08090A0BL, 0x04050607L, 0x00010203L)}, // 3x float
	};
	static const __m128 scale_factors[32] = {
		_mm_set_ps1(1./(1u<< 0)), _mm_set_ps1(1./(1u<< 1)), _mm_set_ps1(1./(1u<< 2)), _mm_set_ps1(1./(1u<< 3)),
		_mm_set_ps1(1./(1u<< 4)), _mm_set_ps1(1./(1u<< 5)), _mm_set_ps1(1./(1u<< 6)), _mm_set_ps1(1./(1u<< 7)),
		_mm_set_ps1(1./(1u<< 8)), _mm_set_ps1(1./(1u<< 9)), _mm_set_ps1(1./(1u<<10)), _mm_set_ps1(1./(1u<<11)),
		_mm_set_ps1(1./(1u<<12)), _mm_set_ps1(1./(1u<<13)), _mm_set_ps1(1./(1u<<14)), _mm_set_ps1(1./(1u<<15)),
		_mm_set_ps1(1./(1u<<16)), _mm_set_ps1(1./(1u<<17)), _mm_set_ps1(1./(1u<<18)), _mm_set_ps1(1./(1u<<19)),
		_mm_set_ps1(1./(1u<<20)), _mm_set_ps1(1./(1u<<21)), _mm_set_ps1(1./(1u<<22)), _mm_set_ps1(1./(1u<<23)),
		_mm_set_ps1(1./(1u<<24)), _mm_set_ps1(1./(1u<<25)), _mm_set_ps1(1./(1u<<26)), _mm_set_ps1(1./(1u<<27)),
		_mm_set_ps1(1./(1u<<28)), _mm_set_ps1(1./(1u<<29)), _mm_set_ps1(1./(1u<<30)), _mm_set_ps1(1./(1u<<31)),
	};

	X64Reg coords = XMM0;

	int elem_size = 1 << (format / 2);
	int load_bytes = elem_size * count_in;
	OpArg dest = MDisp(dst_reg, m_dst_ofs);

	native_format->components = count_out;
	native_format->enable = true;
	native_format->offset = m_dst_ofs;
	native_format->type = VAR_FLOAT;
	native_format->integer = false;

	m_dst_ofs += sizeof(float) * count_out;

	if (attribute == DIRECT)
		m_src_ofs += load_bytes;

	if (cpu_info.bSSSE3)
	{
		if (load_bytes > 8)
			MOVDQU(coords, data);
		else if (load_bytes > 4)
			MOVQ_xmm(coords, data);
		else
			MOVD_xmm(coords, data);

		PSHUFB(coords, MPIC(&shuffle_lut[format][count_in - 1]));

		// Sign-extend.
		if (format == FORMAT_BYTE)
			PSRAD(coords, 24);
		if (format == FORMAT_SHORT)
			PSRAD(coords, 16);
	}
	else
	{
		// SSE2
		X64Reg temp = XMM1;
		switch (format)
		{
		case FORMAT_UBYTE:
			MOVD_xmm(coords, data);
			PXOR(temp, R(temp));
			PUNPCKLBW(coords, R(temp));
			PUNPCKLWD(coords, R(temp));
			break;
		case FORMAT_BYTE:
			MOVD_xmm(coords, data);
			PUNPCKLBW(coords, R(coords));
			PUNPCKLWD(coords, R(coords));
			PSRAD(coords, 24);
			break;
		case FORMAT_USHORT:
		case FORMAT_SHORT:
			switch (count_in)
			{
			case 1:
				LoadAndSwap(32, scratch3, data);
				MOVD_xmm(coords, R(scratch3));    // ......X.
				break;
			case 2:
				LoadAndSwap(32, scratch3, data);
				MOVD_xmm(coords, R(scratch3));    // ......XY
				PSHUFLW(coords, R(coords), 0x24); // ....Y.X.
				break;
			case 3:
				LoadAndSwap(64, scratch3, data);
				MOVQ_xmm(coords, R(scratch3));    // ....XYZ.
				PUNPCKLQDQ(coords, R(coords));    // ..Z.XYZ.
				PSHUFLW(coords, R(coords), 0xAC); // ..Z.Y.X.
				break;
			}
			if (format == FORMAT_SHORT)
				PSRAD(coords, 16);
			else
				PSRLD(coords, 16);
			break;
		case FORMAT_FLOAT:
			// Floats don't need to be scaled or converted,
			// so we can just load/swap/store them directly
			// and return early.
			// (In SSSE3 we still need to store them.)
			for (int i = 0; i < count_in; i++)
			{
				LoadAndSwap(32, scratch3, data);
				MOV(32, dest, R(scratch3));
				data.AddMemOffset(sizeof(float));
				dest.AddMemOffset(sizeof(float));

				// zfreeze
				if (native_format == &m_native_vtx_decl.position)
				{
					if (cpu_info.bSSE4_1)
					{
						PINSRD(coords, R(scratch3), i);
					}
					else
					{
						PINSRW(coords, R(scratch3), 2 * i + 0);
						SHR(32, R(scratch3), Imm8(16));
						PINSRW(coords, R(scratch3), 2 * i + 1);
					}
				}
			}

			// zfreeze
			if (native_format == &m_native_vtx_decl.position)
			{
				CMP(32, R(count_reg), Imm8(3));
				FixupBranch dont_store = J_CC(CC_A);
				LEA(32, scratch3, MScaled(count_reg, SCALE_4, -4));
				MOVUPS(MPIC(VertexLoaderManager::position_cache, scratch3, SCALE_4), coords);
				SetJumpTarget(dont_store);
			}
			return load_bytes;
		}
	}

	if (format != FORMAT_FLOAT)
	{
		CVTDQ2PS(coords, R(coords));

		if (dequantize && scaling_exponent)
			MULPS(coords, MPIC(&scale_factors[scaling_exponent]));
	}

	switch (count_out)
	{
	case 1: MOVSS(dest, coords); break;
	case 2: MOVLPS(dest, coords); break;
	case 3: MOVUPS(dest, coords); break;
	}

	// zfreeze
	if (native_format == &m_native_vtx_decl.position)
	{
		CMP(32, R(count_reg), Imm8(3));
		FixupBranch dont_store = J_CC(CC_A);
		LEA(32, scratch3, MScaled(count_reg, SCALE_4, -4));
		MOVUPS(MPIC(VertexLoaderManager::position_cache, scratch3, SCALE_4), coords);
		SetJumpTarget(dont_store);
	}

	return load_bytes;
}
Esempio n. 10
0
void VertexLoaderX64::GenerateVertexLoader()
{
	BitSet32 regs = {src_reg, dst_reg, scratch1, scratch2, scratch3, count_reg, skipped_reg, base_reg};
	regs &= ABI_ALL_CALLEE_SAVED;
	ABI_PushRegistersAndAdjustStack(regs, 0);

	// Backup count since we're going to count it down.
	PUSH(32, R(ABI_PARAM3));

	// ABI_PARAM3 is one of the lower registers, so free it for scratch2.
	MOV(32, R(count_reg), R(ABI_PARAM3));

	MOV(64, R(base_reg), R(ABI_PARAM4));

	if (m_VtxDesc.Position & MASK_INDEXED)
		XOR(32, R(skipped_reg), R(skipped_reg));

	// TODO: load constants into registers outside the main loop

	const u8* loop_start = GetCodePtr();

	if (m_VtxDesc.PosMatIdx)
	{
		MOVZX(32, 8, scratch1, MDisp(src_reg, m_src_ofs));
		AND(32, R(scratch1), Imm8(0x3F));
		MOV(32, MDisp(dst_reg, m_dst_ofs), R(scratch1));

		// zfreeze
		CMP(32, R(count_reg), Imm8(3));
		FixupBranch dont_store = J_CC(CC_A);
		MOV(32, MPIC(VertexLoaderManager::position_matrix_index - 1, count_reg, SCALE_4), R(scratch1));
		SetJumpTarget(dont_store);

		m_native_components |= VB_HAS_POSMTXIDX;
		m_native_vtx_decl.posmtx.components = 4;
		m_native_vtx_decl.posmtx.enable = true;
		m_native_vtx_decl.posmtx.offset = m_dst_ofs;
		m_native_vtx_decl.posmtx.type = VAR_UNSIGNED_BYTE;
		m_native_vtx_decl.posmtx.integer = true;
		m_src_ofs += sizeof(u8);
		m_dst_ofs += sizeof(u32);
	}

	u32 texmatidx_ofs[8];
	const u64 tm[8] = {
		m_VtxDesc.Tex0MatIdx, m_VtxDesc.Tex1MatIdx, m_VtxDesc.Tex2MatIdx, m_VtxDesc.Tex3MatIdx,
		m_VtxDesc.Tex4MatIdx, m_VtxDesc.Tex5MatIdx, m_VtxDesc.Tex6MatIdx, m_VtxDesc.Tex7MatIdx,
	};
	for (int i = 0; i < 8; i++)
	{
		if (tm[i])
			texmatidx_ofs[i] = m_src_ofs++;
	}

	OpArg data = GetVertexAddr(ARRAY_POSITION, m_VtxDesc.Position);
	int pos_elements = 2 + m_VtxAttr.PosElements;
	ReadVertex(data, m_VtxDesc.Position, m_VtxAttr.PosFormat, pos_elements, pos_elements,
	           m_VtxAttr.ByteDequant, m_VtxAttr.PosFrac, &m_native_vtx_decl.position);

	if (m_VtxDesc.Normal)
	{
		static const u8 map[8] = { 7, 6, 15, 14 };
		u8 scaling_exponent = map[m_VtxAttr.NormalFormat];

		for (int i = 0; i < (m_VtxAttr.NormalElements ? 3 : 1); i++)
		{
			if (!i || m_VtxAttr.NormalIndex3)
			{
				data = GetVertexAddr(ARRAY_NORMAL, m_VtxDesc.Normal);
				int elem_size = 1 << (m_VtxAttr.NormalFormat / 2);
				data.AddMemOffset(i * elem_size * 3);
			}
			data.AddMemOffset(ReadVertex(data, m_VtxDesc.Normal, m_VtxAttr.NormalFormat, 3, 3,
			                             true, scaling_exponent, &m_native_vtx_decl.normals[i]));
		}

		m_native_components |= VB_HAS_NRM0;
		if (m_VtxAttr.NormalElements)
			m_native_components |= VB_HAS_NRM1 | VB_HAS_NRM2;
	}

	const u64 col[2] = { m_VtxDesc.Color0, m_VtxDesc.Color1 };
	for (int i = 0; i < 2; i++)
	{
		if (col[i])
		{
			data = GetVertexAddr(ARRAY_COLOR + i, col[i]);
			ReadColor(data, col[i], m_VtxAttr.color[i].Comp);
			m_native_components |= VB_HAS_COL0 << i;
			m_native_vtx_decl.colors[i].components = 4;
			m_native_vtx_decl.colors[i].enable = true;
			m_native_vtx_decl.colors[i].offset = m_dst_ofs;
			m_native_vtx_decl.colors[i].type = VAR_UNSIGNED_BYTE;
			m_native_vtx_decl.colors[i].integer = false;
			m_dst_ofs += 4;
		}
	}

	const u64 tc[8] = {
		m_VtxDesc.Tex0Coord, m_VtxDesc.Tex1Coord, m_VtxDesc.Tex2Coord, m_VtxDesc.Tex3Coord,
		m_VtxDesc.Tex4Coord, m_VtxDesc.Tex5Coord, m_VtxDesc.Tex6Coord, m_VtxDesc.Tex7Coord,
	};
	for (int i = 0; i < 8; i++)
	{
		int elements = m_VtxAttr.texCoord[i].Elements + 1;
		if (tc[i])
		{
			data = GetVertexAddr(ARRAY_TEXCOORD0 + i, tc[i]);
			u8 scaling_exponent = m_VtxAttr.texCoord[i].Frac;
			ReadVertex(data, tc[i], m_VtxAttr.texCoord[i].Format, elements, tm[i] ? 2 : elements,
			           m_VtxAttr.ByteDequant, scaling_exponent, &m_native_vtx_decl.texcoords[i]);
			m_native_components |= VB_HAS_UV0 << i;
		}
		if (tm[i])
		{
			m_native_components |= VB_HAS_TEXMTXIDX0 << i;
			m_native_vtx_decl.texcoords[i].components = 3;
			m_native_vtx_decl.texcoords[i].enable = true;
			m_native_vtx_decl.texcoords[i].type = VAR_FLOAT;
			m_native_vtx_decl.texcoords[i].integer = false;
			MOVZX(64, 8, scratch1, MDisp(src_reg, texmatidx_ofs[i]));
			if (tc[i])
			{
				CVTSI2SS(XMM0, R(scratch1));
				MOVSS(MDisp(dst_reg, m_dst_ofs), XMM0);
				m_dst_ofs += sizeof(float);
			}
			else
			{
				m_native_vtx_decl.texcoords[i].offset = m_dst_ofs;
				PXOR(XMM0, R(XMM0));
				CVTSI2SS(XMM0, R(scratch1));
				SHUFPS(XMM0, R(XMM0), 0x45); // 000X -> 0X00
				MOVUPS(MDisp(dst_reg, m_dst_ofs), XMM0);
				m_dst_ofs += sizeof(float) * 3;
			}
		}
	}

	// Prepare for the next vertex.
	ADD(64, R(dst_reg), Imm32(m_dst_ofs));
	const u8* cont = GetCodePtr();
	ADD(64, R(src_reg), Imm32(m_src_ofs));

	SUB(32, R(count_reg), Imm8(1));
	J_CC(CC_NZ, loop_start);

	// Get the original count.
	POP(32, R(ABI_RETURN));

	ABI_PopRegistersAndAdjustStack(regs, 0);

	if (m_VtxDesc.Position & MASK_INDEXED)
	{
		SUB(32, R(ABI_RETURN), R(skipped_reg));
		RET();

		SetJumpTarget(m_skip_vertex);
		ADD(32, R(skipped_reg), Imm8(1));
		JMP(cont);
	}
	else
	{
		RET();
	}

	m_VertexSize = m_src_ofs;
	m_native_vtx_decl.stride = m_dst_ofs;
}
Esempio n. 11
0
void VertexLoaderX64::ReadColor(OpArg data, u64 attribute, int format)
{
	int load_bytes = 0;
	switch (format)
	{
		case FORMAT_24B_888:
		case FORMAT_32B_888x:
		case FORMAT_32B_8888:
			MOV(32, R(scratch1), data);
			if (format != FORMAT_32B_8888)
				OR(32, R(scratch1), Imm32(0xFF000000));
			MOV(32, MDisp(dst_reg, m_dst_ofs), R(scratch1));
			load_bytes = 3 + (format != FORMAT_24B_888);
			break;

		case FORMAT_16B_565:
			//                   RRRRRGGG GGGBBBBB
			// AAAAAAAA BBBBBBBB GGGGGGGG RRRRRRRR
			LoadAndSwap(16, scratch1, data);
			if (cpu_info.bBMI1 && cpu_info.bBMI2)
			{
				MOV(32, R(scratch2), Imm32(0x07C3F7C0));
				PDEP(32, scratch3, scratch1, R(scratch2));

				MOV(32, R(scratch2), Imm32(0xF8FCF800));
				PDEP(32, scratch1, scratch1, R(scratch2));
				ANDN(32, scratch2, scratch2, R(scratch3));

				OR(32, R(scratch1), R(scratch2));
			}
			else
			{
				MOV(32, R(scratch3), R(scratch1));
				SHL(32, R(scratch1), Imm8(16));
				AND(32, R(scratch1), Imm32(0xF8000000));

				MOV(32, R(scratch2), R(scratch3));
				SHL(32, R(scratch2), Imm8(13));
				AND(32, R(scratch2), Imm32(0x00FC0000));
				OR(32, R(scratch1), R(scratch2));

				SHL(32, R(scratch3), Imm8(11));
				AND(32, R(scratch3), Imm32(0x0000F800));
				OR(32, R(scratch1), R(scratch3));

				MOV(32, R(scratch2), R(scratch1));
				SHR(32, R(scratch1), Imm8(5));
				AND(32, R(scratch1), Imm32(0x07000700));
				OR(32, R(scratch1), R(scratch2));

				SHR(32, R(scratch2), Imm8(6));
				AND(32, R(scratch2), Imm32(0x00030000));
				OR(32, R(scratch1), R(scratch2));
			}

			OR(32, R(scratch1), Imm32(0x000000FF));
			SwapAndStore(32, MDisp(dst_reg, m_dst_ofs), scratch1);
			load_bytes = 2;
			break;

		case FORMAT_16B_4444:
			//                   RRRRGGGG BBBBAAAA
			// AAAAAAAA BBBBBBBB GGGGGGGG RRRRRRRR
			LoadAndSwap(16, scratch1, data);
			if (cpu_info.bBMI2)
			{
				MOV(32, R(scratch2), Imm32(0x0F0F0F0F));
				PDEP(32, scratch1, scratch1, R(scratch2));
			}
			else
			{
				MOV(32, R(scratch2), R(scratch1));
				SHL(32, R(scratch1), Imm8(8));
				OR(32, R(scratch1), R(scratch2));
				AND(32, R(scratch1), Imm32(0x00FF00FF));

				MOV(32, R(scratch2), R(scratch1));
				SHL(32, R(scratch1), Imm8(4));
				OR(32, R(scratch1), R(scratch2));
				AND(32, R(scratch1), Imm32(0x0F0F0F0F));

			}
			MOV(32, R(scratch2), R(scratch1));
			SHL(32, R(scratch1), Imm8(4));
			OR(32, R(scratch1), R(scratch2));
			SwapAndStore(32, MDisp(dst_reg, m_dst_ofs), scratch1);
			load_bytes = 2;
			break;

		case FORMAT_24B_6666:
			//          RRRRRRGG GGGGBBBB BBAAAAAA
			// AAAAAAAA BBBBBBBB GGGGGGGG RRRRRRRR
			data.AddMemOffset(-1); // subtract one from address so we can use a 32bit load and bswap
			LoadAndSwap(32, scratch1, data);
			if (cpu_info.bBMI2)
			{
				MOV(32, R(scratch2), Imm32(0xFCFCFCFC));
				PDEP(32, scratch1, scratch1, R(scratch2));
				MOV(32, R(scratch2), R(scratch1));
			}
			else
			{
				MOV(32, R(scratch3), R(scratch1));
				SHL(32, R(scratch1), Imm8(8));
				AND(32, R(scratch1), Imm32(0xFC000000));
				MOV(32, R(scratch2), R(scratch1));

				MOV(32, R(scratch1), R(scratch3));
				SHL(32, R(scratch1), Imm8(6));
				AND(32, R(scratch1), Imm32(0x00FC0000));
				OR(32, R(scratch2), R(scratch1));

				MOV(32, R(scratch1), R(scratch3));
				SHL(32, R(scratch1), Imm8(4));
				AND(32, R(scratch1), Imm32(0x0000FC00));
				OR(32, R(scratch2), R(scratch1));

				SHL(32, R(scratch3), Imm8(2));
				AND(32, R(scratch3), Imm32(0x000000FC));
				OR(32, R(scratch2), R(scratch3));

				MOV(32, R(scratch1), R(scratch2));
			}

			SHR(32, R(scratch1), Imm8(6));
			AND(32, R(scratch1), Imm32(0x03030303));
			OR(32, R(scratch1), R(scratch2));

			SwapAndStore(32, MDisp(dst_reg, m_dst_ofs), scratch1);
			load_bytes = 3;
			break;
	}
	if (attribute == DIRECT)
		m_src_ofs += load_bytes;
}
Esempio n. 12
0
void Jit64::cmpXX(UGeckoInstruction inst)
{
	// USES_CR
	INSTRUCTION_START
	JITDISABLE(bJITIntegerOff);
	int a = inst.RA;
	int b = inst.RB;
	int crf = inst.CRFD;

	bool merge_branch = false;
	int test_crf = js.next_inst.BI >> 2;
	// Check if the next instruction is a branch - if it is, merge the two.
	if (((js.next_inst.OPCD == 16 /* bcx */) ||
	    ((js.next_inst.OPCD == 19) && (js.next_inst.SUBOP10 == 528) /* bcctrx */) ||
	    ((js.next_inst.OPCD == 19) && (js.next_inst.SUBOP10 == 16) /* bclrx */)) &&
	    (js.next_inst.BO & BO_DONT_DECREMENT_FLAG) &&
	    !(js.next_inst.BO & BO_DONT_CHECK_CONDITION))
	{
			// Looks like a decent conditional branch that we can merge with.
			// It only test CR, not CTR.
			if (test_crf == crf)
			{
				merge_branch = true;
			}
	}

	OpArg comparand;
	bool signedCompare;
	if (inst.OPCD == 31)
	{
		// cmp / cmpl
		gpr.Lock(a, b);
		comparand = gpr.R(b);
		signedCompare = (inst.SUBOP10 == 0);
	}
	else
	{
		gpr.Lock(a);
		if (inst.OPCD == 10)
		{
			//cmpli
			comparand = Imm32((u32)inst.UIMM);
			signedCompare = false;
		}
		else if (inst.OPCD == 11)
		{
			//cmpi
			comparand = Imm32((u32)(s32)(s16)inst.UIMM);
			signedCompare = true;
		}
		else
		{
			signedCompare = false; // silence compiler warning
			PanicAlert("cmpXX");
		}
	}

	if (gpr.R(a).IsImm() && comparand.IsImm())
	{
		// Both registers contain immediate values, so we can pre-compile the compare result
		u8 compareResult;
		if (signedCompare)
		{
			if ((s32)gpr.R(a).offset == (s32)comparand.offset)
				compareResult = CR_EQ;
			else if ((s32)gpr.R(a).offset > (s32)comparand.offset)
				compareResult = CR_GT;
			else
				compareResult = CR_LT;
		}
		else
		{
			if ((u32)gpr.R(a).offset == (u32)comparand.offset)
				compareResult = CR_EQ;
			else if ((u32)gpr.R(a).offset > (u32)comparand.offset)
				compareResult = CR_GT;
			else
				compareResult = CR_LT;
		}
		MOV(64, R(RAX), Imm64(PPCCRToInternal(compareResult)));
		MOV(64, M(&PowerPC::ppcState.cr_val[crf]), R(RAX));
		gpr.UnlockAll();

		if (merge_branch)
		{
			js.downcountAmount++;
			js.skipnext = true;

			int test_bit = 8 >> (js.next_inst.BI & 3);
			u8 conditionResult = (js.next_inst.BO & BO_BRANCH_IF_TRUE) ? test_bit : 0;
			if ((compareResult & test_bit) == conditionResult)
			{
				gpr.Flush();
				fpr.Flush();

				if (js.next_inst.OPCD == 16) // bcx
				{
					if (js.next_inst.LK)
						MOV(32, M(&LR), Imm32(js.compilerPC + 4));

					u32 destination;
					if (js.next_inst.AA)
						destination = SignExt16(js.next_inst.BD << 2);
					else
						destination = js.next_compilerPC + SignExt16(js.next_inst.BD << 2);
					WriteExit(destination);
				}
				else if ((js.next_inst.OPCD == 19) && (js.next_inst.SUBOP10 == 528)) // bcctrx
				{
					if (js.next_inst.LK)
						MOV(32, M(&LR), Imm32(js.compilerPC + 4));
					MOV(32, R(EAX), M(&CTR));
					AND(32, R(EAX), Imm32(0xFFFFFFFC));
					WriteExitDestInEAX();
				}
				else if ((js.next_inst.OPCD == 19) && (js.next_inst.SUBOP10 == 16)) // bclrx
				{
					MOV(32, R(EAX), M(&LR));
					if (js.next_inst.LK)
						MOV(32, M(&LR), Imm32(js.compilerPC + 4));
					WriteExitDestInEAX();
				}
				else
				{
					PanicAlert("WTF invalid branch");
				}
			}
			else
			{
				if (!analyzer.HasOption(PPCAnalyst::PPCAnalyzer::OPTION_CONDITIONAL_CONTINUE))
				{
					WriteExit(js.next_compilerPC + 4);
				}
			}
		}
	}
Esempio n. 13
0
void DSPJitRegCache::putReg(int reg, bool dirty)
{
    int real_reg = reg;
    if (regs[reg].parentReg != DSP_REG_NONE)
        real_reg = regs[reg].parentReg;

    OpArg oparg = regs[real_reg].loc;

    switch (reg)
    {
    case DSP_REG_ACH0:
    case DSP_REG_ACH1:
        if (dirty)
        {
            // no need to extend to full 64bit here until interpreter
            // uses that
            if (oparg.IsSimpleReg())
            {
                // register is already shifted correctly
                // (if at all)

                // sign extend from the bottom 8 bits.
                emitter.MOVSX(16, 8, oparg.GetSimpleReg(), oparg);
            }
            else if (oparg.IsImm())
            {
                // TODO: Immediates?
            }
            else
            {
                // this works on the memory, so use reg instead
                // of real_reg, since it has the right loc
                X64Reg tmp;
                getFreeXReg(tmp);
                // sign extend from the bottom 8 bits.
                emitter.MOVSX(16, 8, tmp, regs[reg].loc);
                emitter.MOV(16, regs[reg].loc, R(tmp));
                putXReg(tmp);
            }
        }
        break;
    case DSP_REG_ACC0_64:
    case DSP_REG_ACC1_64:
        if (dirty)
        {
            emitter.SHL(64, oparg, Imm8(64-40)); // sign extend
            emitter.SAR(64, oparg, Imm8(64-40));
        }
        break;
    default:
        break;
    }

    regs[real_reg].used = false;

    if (regs[real_reg].loc.IsSimpleReg())
    {
        regs[real_reg].dirty |= dirty;
        regs[real_reg].last_use_ctr = use_ctr;
        use_ctr++;
    }
}