void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMOVL_GwEwM(bxInstruction_c *i) { #if BX_CPU_LEVEL >= 6 BX_CPU_CALL_METHODR(i->ResolveModrm, (i)); Bit16u op2_16 = read_virtual_word(i->seg(), RMAddr(i)); if (getB_SF() != getB_OF()) BX_WRITE_16BIT_REG(i->nnn(), op2_16); #else BX_INFO(("CMOVL_GwEw: --enable-cpu-level=6 required")); UndefinedOpcode(i); #endif }
void BX_CPU_C::CMOV_GdEd(bxInstruction_c *i) { #if (BX_CPU_LEVEL >= 6) || (BX_CPU_LEVEL_HACKED >= 6) // Note: CMOV accesses a memory source operand (read), regardless // of whether condition is true or not. Thus, exceptions may // occur even if the MOV does not take place. bx_bool condition = 0; Bit32u op2_32; switch (i->b1()) { // CMOV opcodes: case 0x140: condition = get_OF(); break; case 0x141: condition = !get_OF(); break; case 0x142: condition = get_CF(); break; case 0x143: condition = !get_CF(); break; case 0x144: condition = get_ZF(); break; case 0x145: condition = !get_ZF(); break; case 0x146: condition = get_CF() || get_ZF(); break; case 0x147: condition = !get_CF() && !get_ZF(); break; case 0x148: condition = get_SF(); break; case 0x149: condition = !get_SF(); break; case 0x14A: condition = get_PF(); break; case 0x14B: condition = !get_PF(); break; case 0x14C: condition = getB_SF() != getB_OF(); break; case 0x14D: condition = getB_SF() == getB_OF(); break; case 0x14E: condition = get_ZF() || (getB_SF() != getB_OF()); break; case 0x14F: condition = !get_ZF() && (getB_SF() == getB_OF()); break; default: BX_PANIC(("CMOV_GdEd: default case")); } if (i->modC0()) { op2_32 = BX_READ_32BIT_REG(i->rm()); } else { /* pointer, segment address pair */ read_virtual_dword(i->seg(), RMAddr(i), &op2_32); } if (condition) { BX_WRITE_32BIT_REGZ(i->nnn(), op2_32); } BX_CLEAR_64BIT_HIGH(i->nnn()); // always clear upper part of the register #else BX_INFO(("CMOV_GdEd: -enable-cpu-level=6 required")); UndefinedOpcode(i); #endif }
void BX_CPU_C::LDS_GwMp(bxInstruction_c *i) { if (i->modC0()) { BX_DEBUG(("LDS_GwMp: invalid use of LDS, must be memory reference!")); UndefinedOpcode(i); } Bit16u reg_16, ds; read_virtual_word(i->seg(), RMAddr(i), ®_16); read_virtual_word(i->seg(), RMAddr(i) + 2, &ds); load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS], ds); BX_WRITE_16BIT_REG(i->nnn(), reg_16); }
void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_EwSwR(bxInstruction_c *i) { /* Illegal to use nonexisting segments */ if (i->nnn() >= 6) { BX_INFO(("MOV_EwSw: using of nonexisting segment register %d", i->nnn())); UndefinedOpcode(i); } Bit16u seg_reg = BX_CPU_THIS_PTR sregs[i->nnn()].selector.value; if (i->os32L()) { BX_WRITE_32BIT_REGZ(i->rm(), seg_reg); } else { BX_WRITE_16BIT_REG(i->rm(), seg_reg); } }
void BX_CPU_C::LES_GdMp(bxInstruction_c *i) { if (i->modC0()) { BX_DEBUG(("LES_GdMp: invalid use of LES, must be memory reference!")); UndefinedOpcode(i); } Bit16u es; Bit32u reg_32; read_virtual_dword(i->seg(), RMAddr(i), ®_32); read_virtual_word(i->seg(), RMAddr(i) + 4, &es); load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES], es); BX_WRITE_32BIT_REGZ(i->nnn(), reg_32); }
void BX_CPU_C::LSS_GqMp(bxInstruction_c *i) { if (i->modC0()) { BX_DEBUG(("LSS_GqMp: invalid use of LSS, must be memory reference!")); UndefinedOpcode(i); } Bit64u reg_64; Bit16u ss; read_virtual_qword(i->seg(), RMAddr(i), ®_64); read_virtual_word(i->seg(), RMAddr(i) + 8, &ss); load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS], ss); BX_WRITE_64BIT_REG(i->nnn(), reg_64); }
void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPXCHG_EwGwR(bxInstruction_c *i) { #if BX_CPU_LEVEL >= 4 Bit16u op1_16, op2_16, diff_16; op1_16 = BX_READ_16BIT_REG(i->rm()); diff_16 = AX - op1_16; SET_FLAGS_OSZAPC_SUB_16(AX, op1_16, diff_16); if (diff_16 == 0) { // if accumulator == dest // dest <-- src op2_16 = BX_READ_16BIT_REG(i->nnn()); BX_WRITE_16BIT_REG(i->rm(), op2_16); } else { // accumulator <-- dest AX = op1_16; } #else BX_INFO(("CMPXCHG_EwGw: not supported for cpu-level <= 3")); UndefinedOpcode(i); #endif }
void BX_CPU_C::BSWAP_ERX(bxInstruction_c *i) { #if (BX_CPU_LEVEL >= 4) || (BX_CPU_LEVEL_HACKED >= 4) Bit32u val32, b0, b1, b2, b3; if (i->os32L() == 0) { BX_ERROR(("BSWAP with 16-bit opsize: undefined behavior !")); } val32 = BX_READ_32BIT_REG(i->opcodeReg()); b0 = val32 & 0xff; val32 >>= 8; b1 = val32 & 0xff; val32 >>= 8; b2 = val32 & 0xff; val32 >>= 8; b3 = val32; val32 = (b0<<24) | (b1<<16) | (b2<<8) | b3; // zero extended // in 64-bit mode, hi-order 32 bits are not modified BX_WRITE_32BIT_REGZ(i->opcodeReg(), val32); #else BX_INFO(("BSWAP_ERX: required CPU >= 4, use --enable-cpu-level=4 option")); UndefinedOpcode(i); #endif }
void BX_CPP_AttrRegparmN(1) BX_CPU_C::CPUID(bxInstruction_c *i) { #if BX_CPU_LEVEL >= 4 Bit32u function = EAX; #if BX_SUPPORT_XSAVE Bit32u subfunction = ECX; #endif if(function < 0x80000000) { if(function < MAX_STD_CPUID_FUNCTION) { RAX = BX_CPU_THIS_PTR cpuid_std_function[function].eax; RBX = BX_CPU_THIS_PTR cpuid_std_function[function].ebx; RCX = BX_CPU_THIS_PTR cpuid_std_function[function].ecx; RDX = BX_CPU_THIS_PTR cpuid_std_function[function].edx; #if BX_SUPPORT_APIC if (function == 1) { // if MSR_APICBASE APIC Global Enable bit has been cleared, // the CPUID feature flag for the APIC is set to 0. if ((BX_CPU_THIS_PTR msr.apicbase & 0x800) == 0) RDX &= ~(1<<9); // APIC on chip } #endif #if BX_SUPPORT_XSAVE if (function == 0xD && subfunction > 0) { RAX = 0; RBX = 0; RCX = 0; RDX = 0; } #endif return; } } else { function -= 0x80000000; if(function < MAX_EXT_CPUID_FUNCTION) { RAX = BX_CPU_THIS_PTR cpuid_ext_function[function].eax; RBX = BX_CPU_THIS_PTR cpuid_ext_function[function].ebx; RCX = BX_CPU_THIS_PTR cpuid_ext_function[function].ecx; RDX = BX_CPU_THIS_PTR cpuid_ext_function[function].edx; #if BX_SUPPORT_APIC if (function == 1) { // if MSR_APICBASE APIC Global Enable bit has been cleared, // the CPUID feature flag for the APIC is set to 0. if ((BX_CPU_THIS_PTR msr.apicbase & 0x800) == 0) RDX &= ~(1<<9); // APIC on chip } #endif return; } } // unknown CPUID function RAX = 0; RBX = 0; RCX = 0; RDX = 0; #else BX_INFO(("CPUID: not available on < 486")); UndefinedOpcode(i); #endif }
/* 66 0F 3A 61 */ void BX_CPP_AttrRegparmN(1) BX_CPU_C::PCMPESTRI_VdqWdqIb(bxInstruction_c *i) { #if (BX_SUPPORT_SSE >= 5) || (BX_SUPPORT_SSE >= 4 && BX_SUPPORT_SSE_EXTENSION > 0) BX_CPU_THIS_PTR prepareSSE(); BxPackedXmmRegister op1 = BX_READ_XMM_REG(i->nnn()), op2; Bit8u imm8 = i->Ib(); /* op2 is a register or memory reference */ if (i->modC0()) { op2 = BX_READ_XMM_REG(i->rm()); } else { BX_CPU_CALL_METHODR(i->ResolveModrm, (i)); /* pointer, segment address pair */ readVirtualDQwordAligned(i->seg(), RMAddr(i), (Bit8u *) &op2); } // compare all pairs of Ai, Bj bx_bool BoolRes[16][16]; compare_strings(BoolRes, op1, op2, imm8); unsigned len1, len2, num_elements = (imm8 & 0x1) ? 8 : 16; int index; #if BX_SUPPORT_X86_64 if (i->os64L()) { len1 = find_eos64(RAX, imm8); len2 = find_eos64(RDX, imm8); } else #endif { len1 = find_eos32(EAX, imm8); len2 = find_eos32(EDX, imm8); } Bit16u result2 = aggregate(BoolRes, len1, len2, imm8); // The index of the first (or last, according to imm8[6]) set bit of result2 // is returned to ECX. If no bits are set in IntRes2, ECX is set to 16 (8) if (imm8 & 0x40) { // The index returned to ECX is of the MSB in result2 for (index=num_elements-1; index>=0; index--) if (result2 & (1<<index)) break; if (index < 0) index = num_elements; } else { // The index returned to ECX is of the LSB in result2 for (index=0; index<(int)num_elements; index++) if (result2 & (1<<index)) break; } RCX = index; Bit32u flags = 0; if (result2 != 0) flags |= EFlagsCFMask; if (len1 < num_elements) flags |= EFlagsSFMask; if (len2 < num_elements) flags |= EFlagsZFMask; if (result2 & 0x1) flags |= EFlagsOFMask; setEFlagsOSZAPC(flags); #else BX_INFO(("PCMPESTRI_VdqWdqIb: required SSE4.2, use --enable-sse and --enable-sse-extension options")); UndefinedOpcode(i); #endif }
/* 66 0F 3A 60 */ void BX_CPP_AttrRegparmN(1) BX_CPU_C::PCMPESTRM_VdqWdqIb(bxInstruction_c *i) { #if (BX_SUPPORT_SSE >= 5) || (BX_SUPPORT_SSE >= 4 && BX_SUPPORT_SSE_EXTENSION > 0) BX_CPU_THIS_PTR prepareSSE(); BxPackedXmmRegister op1 = BX_READ_XMM_REG(i->nnn()), op2, result; Bit8u imm8 = i->Ib(); /* op2 is a register or memory reference */ if (i->modC0()) { op2 = BX_READ_XMM_REG(i->rm()); } else { BX_CPU_CALL_METHODR(i->ResolveModrm, (i)); /* pointer, segment address pair */ readVirtualDQwordAligned(i->seg(), RMAddr(i), (Bit8u *) &op2); } // compare all pairs of Ai, Bj bx_bool BoolRes[16][16]; compare_strings(BoolRes, op1, op2, imm8); unsigned len1, len2, num_elements = (imm8 & 0x1) ? 8 : 16; #if BX_SUPPORT_X86_64 if (i->os64L()) { len1 = find_eos64(RAX, imm8); len2 = find_eos64(RDX, imm8); } else #endif { len1 = find_eos32(EAX, imm8); len2 = find_eos32(EDX, imm8); } Bit16u result2 = aggregate(BoolRes, len1, len2, imm8); // As defined by imm8[6], result2 is then either stored to the least // significant bits of XMM0 (zero extended to 128 bits) or expanded // into a byte/word-mask and then stored to XMM0 if (imm8 & 0x40) { if (num_elements == 8) { for (int index = 0; index < 8; index++) result.xmm16u(index) = (result2 & (1<<index)) ? 0xffff : 0; } else { // num_elements = 16 for (int index = 0; index < 16; index++) result.xmmubyte(index) = (result2 & (1<<index)) ? 0xff : 0; } } else { result.xmm64u(1) = 0; result.xmm64u(0) = (Bit64u) result2; } Bit32u flags = 0; if (result2 != 0) flags |= EFlagsCFMask; if (len1 < num_elements) flags |= EFlagsSFMask; if (len2 < num_elements) flags |= EFlagsZFMask; if (result2 & 0x1) flags |= EFlagsOFMask; setEFlagsOSZAPC(flags); BX_WRITE_XMM_REG(0, result); /* store result XMM0 */ #else BX_INFO(("PCMPESTRM_VdqWdqIb: required SSE4.2, use --enable-sse and --enable-sse-extension options")); UndefinedOpcode(i); #endif }