void X86X64FuncDecl::reset() { uint32_t i; _convention = kFuncConvNone; _calleePopsStack = false; _direction = kFuncDirRtl; _reserved0 = 0; _argCount = 0; _retCount = 0; _argStackSize = 0; _redZoneSize = 0; _spillZoneSize = 0; for (i = 0; i < ASMJIT_ARRAY_SIZE(_argList); i++) { _argList[i].reset(); } _retList[0].reset(); _retList[1].reset(); _used.reset(); _passed.reset(); _preserved.reset(); ::memset(_passedOrderGp, kInvalidReg, ASMJIT_ARRAY_SIZE(_passedOrderGp)); ::memset(_passedOrderXmm, kInvalidReg, ASMJIT_ARRAY_SIZE(_passedOrderXmm)); }
Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* name, va_list ap) { ASMJIT_ASSERT(vType < kX86VarTypeCount); vType = _targetVarMapping[vType]; ASMJIT_ASSERT(vType != kInvalidVar); // The assertion won't be compiled in release build, however, we want to check // this anyway. if (vType == kInvalidVar) { static_cast<X86Var*>(var)->reset(); return kErrorInvalidArgument; } const X86VarInfo& vInfo = _x86VarInfo[vType]; char buf[64]; // Format the name if `ap` is given. if (ap) { vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), name, ap); buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0'; name = buf; } VarData* vd = _newVd(vType, vInfo.getSize(), vInfo.getClass(), name); if (vd == NULL) { static_cast<X86Var*>(var)->reset(); return getLastError(); } var->_init_packed_op_sz_w0_id(kOperandTypeVar, vInfo.getSize(), vInfo.getReg() << 8, vd->getId()); var->_vreg.vType = vType; return kErrorOk; }
void Logger::logBinary(uint32_t style, const void* data, size_t size) { static const char prefix[] = ".data "; static const char hex[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; const uint8_t* s = static_cast<const uint8_t*>(data); size_t i = size; char buffer[128]; ::memcpy(buffer, prefix, ASMJIT_ARRAY_SIZE(prefix) - 1); while (i) { uint32_t n = static_cast<uint32_t>(IntUtil::iMin<size_t>(i, 16)); char* p = buffer + ASMJIT_ARRAY_SIZE(prefix) - 1; i -= n; do { uint32_t c = s[0]; p[0] = hex[c >> 4]; p[1] = hex[c & 15]; p += 2; s += 1; } while (--n); *p++ = '\n'; logString(style, buffer, static_cast<size_t>(p - buffer)); } }
Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* fmt, va_list ap) noexcept { char name[64]; vsnprintf(name, ASMJIT_ARRAY_SIZE(name), fmt, ap); name[ASMJIT_ARRAY_SIZE(name) - 1] = '\0'; return _newVar(var, vType, name); }
//! \internal //! //! Detect ARM CPU features on Linux. //! //! The detection is based on `getauxval()`. static void armDetectCpuInfoOnLinux(CpuInfo* cpuInfo) noexcept { #if ASMJIT_ARCH_ARM32 cpuInfo->setArch(kArchArm32); // `AT_HWCAP` provides ARMv7 (and less) related flags. static const LinuxHWCapMapping hwCapMapping[] = { { /* HWCAP_VFPv3 */ (1 << 13), CpuInfo::kArmFeatureVFP3 }, { /* HWCAP_VFPv4 */ (1 << 16), CpuInfo::kArmFeatureVFP4 }, { /* HWCAP_IDIVA */ (3 << 17), CpuInfo::kArmFeatureIDIV }, { /* HWCAP_VFPD32 */ (1 << 19), CpuInfo::kArmFeatureVFP_D32 }, { /* HWCAP_NEON */ (1 << 12), CpuInfo::kArmFeatureNEON }, { /* HWCAP_EDSP */ (1 << 7), CpuInfo::kArmFeatureDSP } }; armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping)); // VFP3 implies VFP2. if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP3)) cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2); // VFP2 implies ARMv6. if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP2)) cpuInfo->addFeature(CpuInfo::kArmFeatureV6); // VFP3 or NEON implies ARMv7. if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP3) || cpuInfo->hasFeature(CpuInfo::kArmFeatureNEON)) cpuInfo->addFeature(CpuInfo::kArmFeatureV7); // `AT_HWCAP2` provides ARMv8 related flags. static const LinuxHWCapMapping hwCap2Mapping[] = { { /* HWCAP2_AES */ (1 << 0), CpuInfo::kArmFeatureAES }, { /* HWCAP2_CRC32 */ (1 << 4), CpuInfo::kArmFeatureCRC32 }, { /* HWCAP2_PMULL */ (1 << 1), CpuInfo::kArmFeaturePMULL }, { /* HWCAP2_SHA1 */ (1 << 2), CpuInfo::kArmFeatureSHA1 }, { /* HWCAP2_SHA2 */ (1 << 3), CpuInfo::kArmFeatureSHA256 } }; armDetectHWCaps(cpuInfo, AT_HWCAP2, hwCap2Mapping, ASMJIT_ARRAY_SIZE(hwCapMapping2)); if (cpuInfo->hasFeature(CpuInfo::kArmFeatureAES ) || cpuInfo->hasFeature(CpuInfo::kArmFeatureCRC32 ) || cpuInfo->hasFeature(CpuInfo::kArmFeaturePMULL ) || cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA1 ) || cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA256)) { cpuInfo->addFeature(CpuInfo::kArmFeatureV8); } #else cpuInfo->setArch(kArchArm64); armPopulateBaseline64Features(cpuInfo); // `AT_HWCAP` provides ARMv8 related flags. static const LinuxHWCapMapping hwCapMapping[] = { { /* HWCAP_ASIMD */ (1 << 1), CpuInfo::kArmFeatureNEON }, { /* HWCAP_AES */ (1 << 3), CpuInfo::kArmFeatureAES }, { /* HWCAP_CRC32 */ (1 << 7), CpuInfo::kArmFeatureCRC32 }, { /* HWCAP_PMULL */ (1 << 4), CpuInfo::kArmFeaturePMULL }, { /* HWCAP_SHA1 */ (1 << 5), CpuInfo::kArmFeatureSHA1 }, { /* HWCAP_SHA2 */ (1 << 6), CpuInfo::kArmFeatureSHA256 } { /* HWCAP_ATOMICS */ (1 << 8), CpuInfo::kArmFeatureAtomics64 } };
void Logger::setIndentation(const char* indentation) { ::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation)); if (!indentation) return; size_t length = StringUtil::nlen(indentation, ASMJIT_ARRAY_SIZE(_indentation) - 1); ::memcpy(_indentation, indentation, length); }
Error StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept { char buf[1024]; vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap); buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0'; return _opString(op, buf); }
void Logger::setInstructionPrefix(const char* prefix) { memset(_instructionPrefix, 0, ASMJIT_ARRAY_SIZE(_instructionPrefix)); if (!prefix) return; size_t length = strnlen(prefix, ASMJIT_ARRAY_SIZE(_instructionPrefix) - 1); memcpy(_instructionPrefix, prefix, length); }
void X86FuncDecl::reset() { uint32_t i; // -------------------------------------------------------------------------- // [Core] // -------------------------------------------------------------------------- _returnType = kVarTypeInvalid; _argumentsCount = 0; _reserved0[0] = 0; _reserved0[1] = 0; for (i = 0; i < ASMJIT_ARRAY_SIZE(_arguments); i++) _arguments[i].reset(); _argumentsStackSize = 0; _gpArgumentsMask = 0x0; _mmArgumentsMask = 0x0; _xmmArgumentsMask = 0x0; // -------------------------------------------------------------------------- // [Convention] // -------------------------------------------------------------------------- _convention = kFuncConvNone; _calleePopsStack = false; _argumentsDirection = kFuncArgsRTL; _reserved1 = 0; for (i = 0; i < ASMJIT_ARRAY_SIZE(_gpList); i++) _gpList[i] = kRegIndexInvalid; for (i = 0; i < ASMJIT_ARRAY_SIZE(_xmmList); i++) _xmmList[i] = kRegIndexInvalid; _gpListMask = 0x0; _mmListMask = 0x0; _xmmListMask = 0x0; _gpPreservedMask = 0x0; _mmPreservedMask = 0x0; _xmmPreservedMask = 0x0; }
void ConstPool::fill(void* dst) const { // Clears possible gaps, asmjit should never emit garbage to the output. ::memset(dst, 0, _size); ConstPoolFill filler(static_cast<uint8_t*>(dst), 1); for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) { _tree[i].iterate(filler); filler._dataSize <<= 1; } }
void ConstPool::reset() { for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) { _tree[i].reset(); _gaps[i] = nullptr; } _gapPool = nullptr; _size = 0; _alignment = 0; }
void Compiler::rename(Var& var, const char* fmt, ...) noexcept { if (var.getId() == kInvalidValue) return; VarData* vd = getVdById(var.getId()); vd->_name = noName; if (fmt != nullptr && fmt[0] != '\0') { char buf[64]; va_list ap; va_start(ap, fmt); vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap); buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0'; vd->_name = _stringAllocator.sdup(buf); va_end(ap); } }
ASMJIT_FAVOR_SIZE void ArchInfo::init(uint32_t type, uint32_t subType) noexcept { uint32_t index = type < ASMJIT_ARRAY_SIZE(archInfoTable) ? type : uint32_t(0); // Make sure the `archInfoTable` array is correctly indexed. _signature = archInfoTable[index]; ASMJIT_ASSERT(_type == index); // Even if the architecture is not known we setup its type and sub-type, // however, such architecture is not really useful. _type = type; _subType = subType; }
ConstPool::ConstPool(Zone* zone) { _zone = zone; size_t dataSize = 1; for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) { _tree[i].setDataSize(dataSize); _gaps[i] = nullptr; dataSize <<= 1; } _gapPool = nullptr; _size = 0; _alignment = 0; }
void X86CpuUtil::detect(X86CpuInfo* cpuInfo) { X86CpuId regs; uint32_t i; uint32_t maxId; // Clear everything except the '_size' member. ::memset(reinterpret_cast<uint8_t*>(cpuInfo) + sizeof(uint32_t), 0, sizeof(CpuInfo) - sizeof(uint32_t)); // Fill safe defaults. cpuInfo->_hwThreadsCount = CpuInfo::detectHwThreadsCount(); // -------------------------------------------------------------------------- // [CPUID EAX=0x00000000] // -------------------------------------------------------------------------- // Get vendor string/id. callCpuId(0, 0, ®s); maxId = regs.eax; ::memcpy(cpuInfo->_vendorString, ®s.ebx, 4); ::memcpy(cpuInfo->_vendorString + 4, ®s.edx, 4); ::memcpy(cpuInfo->_vendorString + 8, ®s.ecx, 4); for (i = 0; i < ASMJIT_ARRAY_SIZE(x86CpuVendorList); i++) { if (x86CpuVendorEq(x86CpuVendorList[i], cpuInfo->_vendorString)) { cpuInfo->_vendorId = x86CpuVendorList[i].id; break; } } // -------------------------------------------------------------------------- // [CPUID EAX=0x00000001] // -------------------------------------------------------------------------- // Get feature flags in ecx/edx and family/model in eax. callCpuId(1, 0, ®s); // Fill family and model fields. cpuInfo->_family = (regs.eax >> 8) & 0x0F; cpuInfo->_model = (regs.eax >> 4) & 0x0F; cpuInfo->_stepping = (regs.eax ) & 0x0F; // Use extended family and model fields. if (cpuInfo->_family == 0x0F) { cpuInfo->_family += ((regs.eax >> 20) & 0xFF); cpuInfo->_model += ((regs.eax >> 16) & 0x0F) << 4; }
char* Zone::sformat(const char* fmt, ...) noexcept { if (fmt == nullptr) return nullptr; char buf[512]; size_t len; va_list ap; va_start(ap, fmt); len = vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf) - 1, fmt, ap); buf[len++] = 0; va_end(ap); return static_cast<char*>(dup(buf, len)); }
void CpuUtil::detect(CpuInfo* cpuInfo) { CpuId regs; uint32_t i; uint32_t maxId; // Clear everything except the '_size' member. ::memset(reinterpret_cast<uint8_t*>(cpuInfo) + sizeof(uint32_t), 0, sizeof(BaseCpuInfo) - sizeof(uint32_t)); // Fill safe defaults. ::memcpy(cpuInfo->_vendorString, "Unknown", 8); cpuInfo->_coresCount = BaseCpuInfo::detectNumberOfCores(); // Get vendor string/id. callCpuId(0, 0, ®s); maxId = regs.eax; ::memcpy(cpuInfo->_vendorString, ®s.ebx, 4); ::memcpy(cpuInfo->_vendorString + 4, ®s.edx, 4); ::memcpy(cpuInfo->_vendorString + 8, ®s.ecx, 4); for (i = 0; i < ASMJIT_ARRAY_SIZE(cpuVendorTable); i++) { if (cpuVendorEq(cpuVendorTable[i], cpuInfo->_vendorString)) { cpuInfo->_vendorId = cpuVendorTable[i].id; break; } } // Get feature flags in ecx/edx and family/model in eax. callCpuId(1, 0, ®s); // Fill family and model fields. cpuInfo->_family = (regs.eax >> 8) & 0x0F; cpuInfo->_model = (regs.eax >> 4) & 0x0F; cpuInfo->_stepping = (regs.eax ) & 0x0F; // Use extended family and model fields. if (cpuInfo->_family == 0x0F) { cpuInfo->_family += ((regs.eax >> 20) & 0xFF); cpuInfo->_model += ((regs.eax >> 16) & 0x0F) << 4; }
//! \internal //! //! Detect ARM CPU features on Windows. //! //! The detection is based on `IsProcessorFeaturePresent()` API call. static void armDetectCpuInfoOnWindows(CpuInfo* cpuInfo) noexcept { #if ASMJIT_ARCH_ARM32 cpuInfo->setArch(kArchArm32); // Windows for ARM requires at least ARMv7 with DSP extensions. cpuInfo->addFeature(CpuInfo::kArmFeatureV6); cpuInfo->addFeature(CpuInfo::kArmFeatureV7); cpuInfo->addFeature(CpuInfo::kArmFeatureDSP); // Windows for ARM requires VFP3. cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2); cpuInfo->addFeature(CpuInfo::kArmFeatureVFP3); // Windows for ARM requires and uses THUMB2. cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB); cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB2); #else cpuInfo->setArch(kArchArm64); armPopulateBaseline64Features(cpuInfo); #endif // Windows for ARM requires NEON. cpuInfo->addFeature(CpuInfo::kArmFeatureNEON); // Detect additional CPU features by calling `IsProcessorFeaturePresent()`. struct WinPFPMapping { uint32_t pfpId, featureId; }; static const WinPFPMapping mapping[] = { { PF_ARM_FMAC_INSTRUCTIONS_AVAILABLE , CpuInfo::kArmFeatureVFP4 }, { PF_ARM_VFP_32_REGISTERS_AVAILABLE , CpuInfo::kArmFeatureVFP_D32 }, { PF_ARM_DIVIDE_INSTRUCTION_AVAILABLE, CpuInfo::kArmFeatureIDIV }, { PF_ARM_64BIT_LOADSTORE_ATOMIC , CpuInfo::kArmFeatureAtomics64 } }; for (uint32_t i = 0; i < ASMJIT_ARRAY_SIZE(mapping); i++) if (::IsProcessorFeaturePresent(mapping[i].pfpId)) cpuInfo->addFeature(mapping[i].featureId); }
static Error X86X64FuncDecl_initFunc(X86X64FuncDecl* self, uint32_t arch, uint32_t ret, const uint32_t* argList, uint32_t argCount) { ASMJIT_ASSERT(argCount <= kFuncArgCount); uint32_t conv = self->_convention; uint32_t regSize = (arch == kArchX86) ? 4 : 8; int32_t i = 0; int32_t gpPos = 0; int32_t xmmPos = 0; int32_t stackOffset = 0; const uint8_t* varMapping; #if defined(ASMJIT_BUILD_X86) if (arch == kArchX86) varMapping = x86::_varMapping; #endif // ASMJIT_BUILD_X86 #if defined(ASMJIT_BUILD_X64) if (arch == kArchX64) varMapping = x64::_varMapping; #endif // ASMJIT_BUILD_X64 self->_argCount = static_cast<uint8_t>(argCount); self->_retCount = 0; for (i = 0; i < static_cast<int32_t>(argCount); i++) { FuncInOut& arg = self->getArg(i); arg._varType = static_cast<uint8_t>(argList[i]); arg._regIndex = kInvalidReg; arg._stackOffset = kFuncStackInvalid; } for (; i < kFuncArgCount; i++) { self->_argList[i].reset(); } self->_retList[0].reset(); self->_retList[1].reset(); self->_argStackSize = 0; self->_used.reset(); if (ret != kVarTypeInvalid) { ret = varMapping[ret]; switch (ret) { case kVarTypeInt64: case kVarTypeUInt64: // 64-bit value is returned in EDX:EAX on x86. #if defined(ASMJIT_BUILD_X86) if (arch == kArchX86) { self->_retCount = 2; self->_retList[0]._varType = kVarTypeUInt32; self->_retList[0]._regIndex = kRegIndexAx; self->_retList[1]._varType = static_cast<uint8_t>(ret - 2); self->_retList[1]._regIndex = kRegIndexDx; } #endif // ASMJIT_BUILD_X86 // ... Fall through ... case kVarTypeInt8: case kVarTypeUInt8: case kVarTypeInt16: case kVarTypeUInt16: case kVarTypeInt32: case kVarTypeUInt32: self->_retCount = 1; self->_retList[0]._varType = static_cast<uint8_t>(ret); self->_retList[0]._regIndex = kRegIndexAx; break; case kVarTypeMm: self->_retCount = 1; self->_retList[0]._varType = static_cast<uint8_t>(ret); self->_retList[0]._regIndex = 0; break; case kVarTypeFp32: self->_retCount = 1; if (arch == kArchX86) { self->_retList[0]._varType = kVarTypeFp32; self->_retList[0]._regIndex = 0; } else { self->_retList[0]._varType = kVarTypeXmmSs; self->_retList[0]._regIndex = 0; } break; case kVarTypeFp64: self->_retCount = 1; if (arch == kArchX86) { self->_retList[0]._varType = kVarTypeFp64; self->_retList[0]._regIndex = 0; } else { self->_retList[0]._varType = kVarTypeXmmSd; self->_retList[0]._regIndex = 0; break; } break; case kVarTypeXmm: case kVarTypeXmmSs: case kVarTypeXmmSd: case kVarTypeXmmPs: case kVarTypeXmmPd: self->_retCount = 1; self->_retList[0]._varType = static_cast<uint8_t>(ret); self->_retList[0]._regIndex = 0; break; } } if (self->_argCount == 0) return kErrorOk; #if defined(ASMJIT_BUILD_X86) if (arch == kArchX86) { // Register arguments (Integer), always left-to-right. for (i = 0; i != static_cast<int32_t>(argCount); i++) { FuncInOut& arg = self->getArg(i); uint32_t varType = varMapping[arg.getVarType()]; if (!x86ArgIsInt(varType) || gpPos >= ASMJIT_ARRAY_SIZE(self->_passedOrderGp)) continue; if (self->_passedOrderGp[gpPos] == kInvalidReg) continue; arg._regIndex = self->_passedOrderGp[gpPos++]; self->_used.add(kRegClassGp, IntUtil::mask(arg.getRegIndex())); } // Stack arguments. int32_t iStart = static_cast<int32_t>(argCount - 1); int32_t iEnd = -1; int32_t iStep = -1; if (self->_direction == kFuncDirLtr) { iStart = 0; iEnd = static_cast<int32_t>(argCount); iStep = 1; } for (i = iStart; i != iEnd; i += iStep) { FuncInOut& arg = self->getArg(i); uint32_t varType = varMapping[arg.getVarType()]; if (arg.hasRegIndex()) continue; if (x86ArgIsInt(varType)) { stackOffset -= 4; arg._stackOffset = static_cast<int16_t>(stackOffset); } else if (x86ArgIsFp(varType)) { int32_t size = static_cast<int32_t>(_varInfo[varType].getSize()); stackOffset -= size; arg._stackOffset = static_cast<int16_t>(stackOffset); } } } #endif // ASMJIT_BUILD_X86 #if defined(ASMJIT_BUILD_X64) if (arch == kArchX64) { if (conv == kFuncConvX64W) { int32_t argMax = IntUtil::iMin<int32_t>(argCount, 4); // Register arguments (Gp/Xmm), always left-to-right. for (i = 0; i != argMax; i++) { FuncInOut& arg = self->getArg(i); uint32_t varType = varMapping[arg.getVarType()]; if (x86ArgIsInt(varType) && i < ASMJIT_ARRAY_SIZE(self->_passedOrderGp)) { arg._regIndex = self->_passedOrderGp[i]; self->_used.add(kRegClassGp, IntUtil::mask(arg.getRegIndex())); continue; } if (x86ArgIsFp(varType) && i < ASMJIT_ARRAY_SIZE(self->_passedOrderXmm)) { arg._varType = static_cast<uint8_t>(x86ArgTypeToXmmType(varType)); arg._regIndex = self->_passedOrderXmm[i]; self->_used.add(kRegClassXyz, IntUtil::mask(arg.getRegIndex())); } } // Stack arguments (always right-to-left). for (i = argCount - 1; i != -1; i--) { FuncInOut& arg = self->getArg(i); uint32_t varType = varMapping[arg.getVarType()]; if (arg.hasRegIndex()) continue; if (x86ArgIsInt(varType)) { stackOffset -= 8; // Always 8 bytes. arg._stackOffset = stackOffset; } else if (x86ArgIsFp(varType)) { stackOffset -= 8; // Always 8 bytes (float/double). arg._stackOffset = stackOffset; } } // 32 bytes shadow space (X64W calling convention specific). stackOffset -= 4 * 8; } else { // Register arguments (Gp), always left-to-right. for (i = 0; i != static_cast<int32_t>(argCount); i++) { FuncInOut& arg = self->getArg(i); uint32_t varType = varMapping[arg.getVarType()]; if (!x86ArgIsInt(varType) || gpPos >= ASMJIT_ARRAY_SIZE(self->_passedOrderGp)) continue; if (self->_passedOrderGp[gpPos] == kInvalidReg) continue; arg._regIndex = self->_passedOrderGp[gpPos++]; self->_used.add(kRegClassGp, IntUtil::mask(arg.getRegIndex())); } // Register arguments (Xmm), always left-to-right. for (i = 0; i != static_cast<int32_t>(argCount); i++) { FuncInOut& arg = self->getArg(i); uint32_t varType = varMapping[arg.getVarType()]; if (x86ArgIsFp(varType)) { arg._varType = static_cast<uint8_t>(x86ArgTypeToXmmType(varType)); arg._regIndex = self->_passedOrderXmm[xmmPos++]; self->_used.add(kRegClassXyz, IntUtil::mask(arg.getRegIndex())); } } // Stack arguments. for (i = argCount - 1; i != -1; i--) { FuncInOut& arg = self->getArg(i); uint32_t varType = varMapping[arg.getVarType()]; if (arg.hasRegIndex()) continue; if (x86ArgIsInt(varType)) { stackOffset -= 8; arg._stackOffset = static_cast<int16_t>(stackOffset); } else if (x86ArgIsFp(varType)) { int32_t size = static_cast<int32_t>(_varInfo[varType].getSize()); stackOffset -= size; arg._stackOffset = static_cast<int16_t>(stackOffset); } } } } #endif // ASMJIT_BUILD_X64 // Modify the stack offset, thus in result all parameters would have positive // non-zero stack offset. for (i = 0; i < static_cast<int32_t>(argCount); i++) { FuncInOut& arg = self->getArg(i); if (!arg.hasRegIndex()) { arg._stackOffset += static_cast<uint16_t>(static_cast<int32_t>(regSize) - stackOffset); } } self->_argStackSize = static_cast<uint32_t>(-stackOffset); return kErrorOk; }
static uint32_t X86X64FuncDecl_initConv(X86X64FuncDecl* self, uint32_t arch, uint32_t conv) { // Setup defaults. self->_argStackSize = 0; self->_redZoneSize = 0; self->_spillZoneSize = 0; self->_convention = static_cast<uint8_t>(conv); self->_calleePopsStack = false; self->_direction = kFuncDirRtl; self->_passed.reset(); self->_preserved.reset(); ::memset(self->_passedOrderGp, kInvalidReg, ASMJIT_ARRAY_SIZE(self->_passedOrderGp)); ::memset(self->_passedOrderXmm, kInvalidReg, ASMJIT_ARRAY_SIZE(self->_passedOrderXmm)); // -------------------------------------------------------------------------- // [X86 Support] // -------------------------------------------------------------------------- #if defined(ASMJIT_BUILD_X86) if (arch == kArchX86) { self->_preserved.set(kRegClassGp, IntUtil::mask(R(Bx), R(Sp), R(Bp), R(Si), R(Di))); switch (conv) { case kFuncConvCDecl: break; case kFuncConvStdCall: self->_calleePopsStack = true; break; case kFuncConvMsThisCall: self->_calleePopsStack = true; self->_passed.set(kRegClassGp, IntUtil::mask(R(Cx))); self->_passedOrderGp[0] = R(Cx); break; case kFuncConvMsFastCall: self->_calleePopsStack = true; self->_passed.set(kRegClassGp, IntUtil::mask(R(Cx), R(Cx))); self->_passedOrderGp[0] = R(Cx); self->_passedOrderGp[1] = R(Dx); break; case kFuncConvBorlandFastCall: self->_calleePopsStack = true; self->_direction = kFuncDirLtr; self->_passed.set(kRegClassGp, IntUtil::mask(R(Ax), R(Dx), R(Cx))); self->_passedOrderGp[0] = R(Ax); self->_passedOrderGp[1] = R(Dx); self->_passedOrderGp[2] = R(Cx); break; case kFuncConvGccFastCall: self->_calleePopsStack = true; self->_passed.set(kRegClassGp, IntUtil::mask(R(Cx), R(Dx))); self->_passedOrderGp[0] = R(Cx); self->_passedOrderGp[1] = R(Dx); break; case kFuncConvGccRegParm1: self->_passed.set(kRegClassGp, IntUtil::mask(R(Ax))); self->_passedOrderGp[0] = R(Ax); break; case kFuncConvGccRegParm2: self->_passed.set(kRegClassGp, IntUtil::mask(R(Ax), R(Dx))); self->_passedOrderGp[0] = R(Ax); self->_passedOrderGp[1] = R(Dx); break; case kFuncConvGccRegParm3: self->_passed.set(kRegClassGp, IntUtil::mask(R(Ax), R(Dx), R(Cx))); self->_passedOrderGp[0] = R(Ax); self->_passedOrderGp[1] = R(Dx); self->_passedOrderGp[2] = R(Cx); break; default: ASMJIT_ASSERT(!"Reached"); } return kErrorOk; } #endif // ASMJIT_BUILD_X86 // -------------------------------------------------------------------------- // [X64 Support] // -------------------------------------------------------------------------- #if defined(ASMJIT_BUILD_X64) switch (conv) { case kFuncConvX64W: self->_spillZoneSize = 32; self->_passed.set(kRegClassGp, IntUtil::mask(R(Cx), R(Dx), 8, 9)); self->_passedOrderGp[0] = R(Cx); self->_passedOrderGp[1] = R(Dx); self->_passedOrderGp[2] = 8; self->_passedOrderGp[3] = 9; self->_passed.set(kRegClassXyz, IntUtil::mask(0, 1, 2, 3)); self->_passedOrderXmm[0] = 0; self->_passedOrderXmm[1] = 1; self->_passedOrderXmm[2] = 2; self->_passedOrderXmm[3] = 3; self->_preserved.set(kRegClassGp , IntUtil::mask(R(Bx), R(Sp), R(Bp), R(Si), R(Di), 12, 13, 14, 15)); self->_preserved.set(kRegClassXyz, IntUtil::mask(6, 7, 8, 9, 10, 11, 12, 13, 14, 15)); break; case kFuncConvX64U: self->_redZoneSize = 128; self->_passed.set(kRegClassGp, IntUtil::mask(R(Di), R(Si), R(Dx), R(Cx), 8, 9)); self->_passedOrderGp[0] = R(Di); self->_passedOrderGp[1] = R(Si); self->_passedOrderGp[2] = R(Dx); self->_passedOrderGp[3] = R(Cx); self->_passedOrderGp[4] = 8; self->_passedOrderGp[5] = 9; self->_passed.set(kRegClassXyz, IntUtil::mask(0, 1, 2, 3, 4, 5, 6, 7)); self->_passedOrderXmm[0] = 0; self->_passedOrderXmm[1] = 1; self->_passedOrderXmm[2] = 2; self->_passedOrderXmm[3] = 3; self->_passedOrderXmm[4] = 4; self->_passedOrderXmm[5] = 5; self->_passedOrderXmm[6] = 6; self->_passedOrderXmm[7] = 7; self->_preserved.set(kRegClassGp, IntUtil::mask(R(Bx), R(Sp), R(Bp), 12, 13, 14, 15)); break; default: ASMJIT_ASSERT(!"Reached"); } #endif // ASMJIT_BUILD_X64 return kErrorOk; }
static void dumpCpu(void) { const asmjit::CpuInfo& cpu = asmjit::CpuInfo::getHost(); INFO("Host CPU:"); INFO(" Vendor string : %s", cpu.getVendorString()); INFO(" Brand string : %s", cpu.getBrandString()); INFO(" Family : %u", cpu.getFamily()); INFO(" Model : %u", cpu.getModel()); INFO(" Stepping : %u", cpu.getStepping()); INFO(" HW-Threads Count : %u", cpu.getHwThreadsCount()); INFO(""); // -------------------------------------------------------------------------- // [ARM / ARM64] // -------------------------------------------------------------------------- #if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 static const DumpCpuFeature armFeaturesList[] = { { asmjit::CpuInfo::kArmFeatureV6 , "ARMv6" }, { asmjit::CpuInfo::kArmFeatureV7 , "ARMv7" }, { asmjit::CpuInfo::kArmFeatureV8 , "ARMv8" }, { asmjit::CpuInfo::kArmFeatureTHUMB , "THUMB" }, { asmjit::CpuInfo::kArmFeatureTHUMB2 , "THUMBv2" }, { asmjit::CpuInfo::kArmFeatureVFP2 , "VFPv2" }, { asmjit::CpuInfo::kArmFeatureVFP3 , "VFPv3" }, { asmjit::CpuInfo::kArmFeatureVFP4 , "VFPv4" }, { asmjit::CpuInfo::kArmFeatureVFP_D32 , "VFP D32" }, { asmjit::CpuInfo::kArmFeatureNEON , "NEON" }, { asmjit::CpuInfo::kArmFeatureDSP , "DSP" }, { asmjit::CpuInfo::kArmFeatureIDIV , "IDIV" }, { asmjit::CpuInfo::kArmFeatureAES , "AES" }, { asmjit::CpuInfo::kArmFeatureCRC32 , "CRC32" }, { asmjit::CpuInfo::kArmFeatureSHA1 , "SHA1" }, { asmjit::CpuInfo::kArmFeatureSHA256 , "SHA256" }, { asmjit::CpuInfo::kArmFeatureAtomics64 , "64-bit atomics" } }; INFO("ARM Features:"); dumpCpuFeatures(cpu, armFeaturesList, ASMJIT_ARRAY_SIZE(armFeaturesList)); INFO(""); #endif // -------------------------------------------------------------------------- // [X86 / X64] // -------------------------------------------------------------------------- #if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 static const DumpCpuFeature x86FeaturesList[] = { { asmjit::CpuInfo::kX86FeatureNX , "NX (Non-Execute Bit)" }, { asmjit::CpuInfo::kX86FeatureMT , "MT (Multi-Threading)" }, { asmjit::CpuInfo::kX86FeatureRDTSC , "RDTSC" }, { asmjit::CpuInfo::kX86FeatureRDTSCP , "RDTSCP" }, { asmjit::CpuInfo::kX86FeatureCMOV , "CMOV" }, { asmjit::CpuInfo::kX86FeatureCMPXCHG8B , "CMPXCHG8B" }, { asmjit::CpuInfo::kX86FeatureCMPXCHG16B , "CMPXCHG16B" }, { asmjit::CpuInfo::kX86FeatureCLFLUSH , "CLFLUSH" }, { asmjit::CpuInfo::kX86FeatureCLFLUSH_OPT , "CLFLUSH (Opt)" }, { asmjit::CpuInfo::kX86FeatureCLWB , "CLWB" }, { asmjit::CpuInfo::kX86FeaturePCOMMIT , "PCOMMIT" }, { asmjit::CpuInfo::kX86FeaturePREFETCH , "PREFETCH" }, { asmjit::CpuInfo::kX86FeaturePREFETCHWT1 , "PREFETCHWT1" }, { asmjit::CpuInfo::kX86FeatureLAHF_SAHF , "LAHF/SAHF" }, { asmjit::CpuInfo::kX86FeatureFXSR , "FXSR" }, { asmjit::CpuInfo::kX86FeatureFXSR_OPT , "FXSR (Opt)" }, { asmjit::CpuInfo::kX86FeatureMMX , "MMX" }, { asmjit::CpuInfo::kX86FeatureMMX2 , "MMX2" }, { asmjit::CpuInfo::kX86Feature3DNOW , "3DNOW" }, { asmjit::CpuInfo::kX86Feature3DNOW2 , "3DNOW2" }, { asmjit::CpuInfo::kX86FeatureSSE , "SSE" }, { asmjit::CpuInfo::kX86FeatureSSE2 , "SSE2" }, { asmjit::CpuInfo::kX86FeatureSSE3 , "SSE3" }, { asmjit::CpuInfo::kX86FeatureSSSE3 , "SSSE3" }, { asmjit::CpuInfo::kX86FeatureSSE4A , "SSE4A" }, { asmjit::CpuInfo::kX86FeatureSSE4_1 , "SSE4.1" }, { asmjit::CpuInfo::kX86FeatureSSE4_2 , "SSE4.2" }, { asmjit::CpuInfo::kX86FeatureMSSE , "Misaligned SSE" }, { asmjit::CpuInfo::kX86FeatureMONITOR , "MONITOR/MWAIT" }, { asmjit::CpuInfo::kX86FeatureMOVBE , "MOVBE" }, { asmjit::CpuInfo::kX86FeaturePOPCNT , "POPCNT" }, { asmjit::CpuInfo::kX86FeatureLZCNT , "LZCNT" }, { asmjit::CpuInfo::kX86FeatureAESNI , "AESNI" }, { asmjit::CpuInfo::kX86FeaturePCLMULQDQ , "PCLMULQDQ" }, { asmjit::CpuInfo::kX86FeatureRDRAND , "RDRAND" }, { asmjit::CpuInfo::kX86FeatureRDSEED , "RDSEED" }, { asmjit::CpuInfo::kX86FeatureSMAP , "SMAP" }, { asmjit::CpuInfo::kX86FeatureSMEP , "SMEP" }, { asmjit::CpuInfo::kX86FeatureSHA , "SHA" }, { asmjit::CpuInfo::kX86FeatureXSAVE , "XSAVE" }, { asmjit::CpuInfo::kX86FeatureXSAVE_OS , "XSAVE (OS)" }, { asmjit::CpuInfo::kX86FeatureAVX , "AVX" }, { asmjit::CpuInfo::kX86FeatureAVX2 , "AVX2" }, { asmjit::CpuInfo::kX86FeatureF16C , "F16C" }, { asmjit::CpuInfo::kX86FeatureFMA3 , "FMA3" }, { asmjit::CpuInfo::kX86FeatureFMA4 , "FMA4" }, { asmjit::CpuInfo::kX86FeatureXOP , "XOP" }, { asmjit::CpuInfo::kX86FeatureBMI , "BMI" }, { asmjit::CpuInfo::kX86FeatureBMI2 , "BMI2" }, { asmjit::CpuInfo::kX86FeatureADX , "ADX" }, { asmjit::CpuInfo::kX86FeatureTBM , "TBM" }, { asmjit::CpuInfo::kX86FeatureMPX , "MPX" }, { asmjit::CpuInfo::kX86FeatureHLE , "HLE" }, { asmjit::CpuInfo::kX86FeatureRTM , "RTM" }, { asmjit::CpuInfo::kX86FeatureERMS , "ERMS" }, { asmjit::CpuInfo::kX86FeatureFSGSBASE , "FS/GS Base" }, { asmjit::CpuInfo::kX86FeatureAVX512F , "AVX512F" }, { asmjit::CpuInfo::kX86FeatureAVX512CD , "AVX512CD" }, { asmjit::CpuInfo::kX86FeatureAVX512PF , "AVX512PF" }, { asmjit::CpuInfo::kX86FeatureAVX512ER , "AVX512ER" }, { asmjit::CpuInfo::kX86FeatureAVX512DQ , "AVX512DQ" }, { asmjit::CpuInfo::kX86FeatureAVX512BW , "AVX512BW" }, { asmjit::CpuInfo::kX86FeatureAVX512VL , "AVX512VL" }, { asmjit::CpuInfo::kX86FeatureAVX512IFMA , "AVX512IFMA" }, { asmjit::CpuInfo::kX86FeatureAVX512VBMI , "AVX512VBMI" } }; INFO("X86 Specific:"); INFO(" Processor Type : %u", cpu.getX86ProcessorType()); INFO(" Brand Index : %u", cpu.getX86BrandIndex()); INFO(" CL Flush Cache Line : %u", cpu.getX86FlushCacheLineSize()); INFO(" Max logical Processors : %u", cpu.getX86MaxLogicalProcessors()); INFO(""); INFO("X86 Features:"); dumpCpuFeatures(cpu, x86FeaturesList, ASMJIT_ARRAY_SIZE(x86FeaturesList)); INFO(""); #endif }
Logger::Logger() { _options = 0; ::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation)); }
Logger::Logger() : _flags(kLoggerIsEnabled | kLoggerIsUsed) { memset(_instructionPrefix, 0, ASMJIT_ARRAY_SIZE(_instructionPrefix)); }
static void X86FuncDecl_initCallingConvention(X86FuncDecl* self, uint32_t convention) { uint32_t i; // -------------------------------------------------------------------------- // [Inir] // -------------------------------------------------------------------------- self->_convention = convention; self->_calleePopsStack = false; self->_argumentsDirection = kFuncArgsRTL; for (i = 0; i < ASMJIT_ARRAY_SIZE(self->_gpList); i++) self->_gpList[i] = kRegIndexInvalid; for (i = 0; i < ASMJIT_ARRAY_SIZE(self->_xmmList); i++) self->_xmmList[i] = kRegIndexInvalid; self->_gpListMask = 0x0; self->_mmListMask = 0x0; self->_xmmListMask = 0x0; self->_gpPreservedMask = 0x0; self->_mmPreservedMask = 0x0; self->_xmmPreservedMask = 0x0; // -------------------------------------------------------------------------- // [X86 Calling Conventions] // -------------------------------------------------------------------------- #if defined(ASMJIT_X86) self->_gpPreservedMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexEbx) | IntUtil::maskFromIndex(kX86RegIndexEsp) | IntUtil::maskFromIndex(kX86RegIndexEbp) | IntUtil::maskFromIndex(kX86RegIndexEsi) | IntUtil::maskFromIndex(kX86RegIndexEdi)); self->_xmmPreservedMask = 0; switch (convention) { // ------------------------------------------------------------------------ // [CDecl] // ------------------------------------------------------------------------ case kX86FuncConvCDecl: break; // ------------------------------------------------------------------------ // [StdCall] // ------------------------------------------------------------------------ case kX86FuncConvStdCall: self->_calleePopsStack = true; break; // ------------------------------------------------------------------------ // [MS-ThisCall] // ------------------------------------------------------------------------ case kX86FuncConvMsThisCall: self->_calleePopsStack = true; self->_gpList[0] = kX86RegIndexEcx; self->_gpListMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexEcx)); break; // ------------------------------------------------------------------------ // [MS-FastCall] // ------------------------------------------------------------------------ case kX86FuncConvMsFastCall: self->_calleePopsStack = true; self->_gpList[0] = kX86RegIndexEcx; self->_gpList[1] = kX86RegIndexEdx; self->_gpListMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexEcx) | IntUtil::maskFromIndex(kX86RegIndexEdx)); break; // ------------------------------------------------------------------------ // [Borland-FastCall] // ------------------------------------------------------------------------ case kX86FuncConvBorlandFastCall: self->_calleePopsStack = true; self->_argumentsDirection = kFuncArgsLTR; self->_gpList[0] = kX86RegIndexEax; self->_gpList[1] = kX86RegIndexEdx; self->_gpList[2] = kX86RegIndexEcx; self->_gpListMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexEax) | IntUtil::maskFromIndex(kX86RegIndexEdx) | IntUtil::maskFromIndex(kX86RegIndexEcx)); break; // ------------------------------------------------------------------------ // [Gcc-FastCall] // ------------------------------------------------------------------------ case kX86FuncConvGccFastCall: self->_calleePopsStack = true; self->_gpList[0] = kX86RegIndexEcx; self->_gpList[1] = kX86RegIndexEdx; self->_gpListMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexEcx) | IntUtil::maskFromIndex(kX86RegIndexEdx)); break; // ------------------------------------------------------------------------ // [Gcc-Regparm(1)] // ------------------------------------------------------------------------ case kX86FuncConvGccRegParm1: self->_calleePopsStack = false; self->_gpList[0] = kX86RegIndexEax; self->_gpListMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexEax)); break; // ------------------------------------------------------------------------ // [Gcc-Regparm(2)] // ------------------------------------------------------------------------ case kX86FuncConvGccRegParm2: self->_calleePopsStack = false; self->_gpList[0] = kX86RegIndexEax; self->_gpList[1] = kX86RegIndexEdx; self->_gpListMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexEax) | IntUtil::maskFromIndex(kX86RegIndexEdx)); break; // ------------------------------------------------------------------------ // [Gcc-Regparm(3)] // ------------------------------------------------------------------------ case kX86FuncConvGccRegParm3: self->_calleePopsStack = false; self->_gpList[0] = kX86RegIndexEax; self->_gpList[1] = kX86RegIndexEdx; self->_gpList[2] = kX86RegIndexEcx; self->_gpListMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexEax) | IntUtil::maskFromIndex(kX86RegIndexEdx) | IntUtil::maskFromIndex(kX86RegIndexEcx)); break; // ------------------------------------------------------------------------ // [Illegal] // ------------------------------------------------------------------------ default: // Illegal calling convention. ASMJIT_ASSERT(0); } #endif // ASMJIT_X86 // -------------------------------------------------------------------------- // [X64 Calling Conventions] // -------------------------------------------------------------------------- #if defined(ASMJIT_X64) switch (convention) { // ------------------------------------------------------------------------ // [X64-Windows] // ------------------------------------------------------------------------ case kX86FuncConvX64W: self->_gpList[0] = kX86RegIndexRcx; self->_gpList[1] = kX86RegIndexRdx; self->_gpList[2] = kX86RegIndexR8; self->_gpList[3] = kX86RegIndexR9; self->_xmmList[0] = kX86RegIndexXmm0; self->_xmmList[1] = kX86RegIndexXmm1; self->_xmmList[2] = kX86RegIndexXmm2; self->_xmmList[3] = kX86RegIndexXmm3; self->_gpListMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexRcx ) | IntUtil::maskFromIndex(kX86RegIndexRdx ) | IntUtil::maskFromIndex(kX86RegIndexR8 ) | IntUtil::maskFromIndex(kX86RegIndexR9 )); self->_xmmListMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexXmm0 ) | IntUtil::maskFromIndex(kX86RegIndexXmm1 ) | IntUtil::maskFromIndex(kX86RegIndexXmm2 ) | IntUtil::maskFromIndex(kX86RegIndexXmm3 )); self->_gpPreservedMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexRbx ) | IntUtil::maskFromIndex(kX86RegIndexRsp ) | IntUtil::maskFromIndex(kX86RegIndexRbp ) | IntUtil::maskFromIndex(kX86RegIndexRsi ) | IntUtil::maskFromIndex(kX86RegIndexRdi ) | IntUtil::maskFromIndex(kX86RegIndexR12 ) | IntUtil::maskFromIndex(kX86RegIndexR13 ) | IntUtil::maskFromIndex(kX86RegIndexR14 ) | IntUtil::maskFromIndex(kX86RegIndexR15 )); self->_xmmPreservedMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexXmm6 ) | IntUtil::maskFromIndex(kX86RegIndexXmm7 ) | IntUtil::maskFromIndex(kX86RegIndexXmm8 ) | IntUtil::maskFromIndex(kX86RegIndexXmm9 ) | IntUtil::maskFromIndex(kX86RegIndexXmm10) | IntUtil::maskFromIndex(kX86RegIndexXmm11) | IntUtil::maskFromIndex(kX86RegIndexXmm12) | IntUtil::maskFromIndex(kX86RegIndexXmm13) | IntUtil::maskFromIndex(kX86RegIndexXmm14) | IntUtil::maskFromIndex(kX86RegIndexXmm15)); break; // ------------------------------------------------------------------------ // [X64-Unix] // ------------------------------------------------------------------------ case kX86FuncConvX64U: self->_gpList[0] = kX86RegIndexRdi; self->_gpList[1] = kX86RegIndexRsi; self->_gpList[2] = kX86RegIndexRdx; self->_gpList[3] = kX86RegIndexRcx; self->_gpList[4] = kX86RegIndexR8; self->_gpList[5] = kX86RegIndexR9; self->_xmmList[0] = kX86RegIndexXmm0; self->_xmmList[1] = kX86RegIndexXmm1; self->_xmmList[2] = kX86RegIndexXmm2; self->_xmmList[3] = kX86RegIndexXmm3; self->_xmmList[4] = kX86RegIndexXmm4; self->_xmmList[5] = kX86RegIndexXmm5; self->_xmmList[6] = kX86RegIndexXmm6; self->_xmmList[7] = kX86RegIndexXmm7; self->_gpListMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexRdi ) | IntUtil::maskFromIndex(kX86RegIndexRsi ) | IntUtil::maskFromIndex(kX86RegIndexRdx ) | IntUtil::maskFromIndex(kX86RegIndexRcx ) | IntUtil::maskFromIndex(kX86RegIndexR8 ) | IntUtil::maskFromIndex(kX86RegIndexR9 )); self->_xmmListMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexXmm0 ) | IntUtil::maskFromIndex(kX86RegIndexXmm1 ) | IntUtil::maskFromIndex(kX86RegIndexXmm2 ) | IntUtil::maskFromIndex(kX86RegIndexXmm3 ) | IntUtil::maskFromIndex(kX86RegIndexXmm4 ) | IntUtil::maskFromIndex(kX86RegIndexXmm5 ) | IntUtil::maskFromIndex(kX86RegIndexXmm6 ) | IntUtil::maskFromIndex(kX86RegIndexXmm7 )); self->_gpPreservedMask = static_cast<uint16_t>( IntUtil::maskFromIndex(kX86RegIndexRbx ) | IntUtil::maskFromIndex(kX86RegIndexRsp ) | IntUtil::maskFromIndex(kX86RegIndexRbp ) | IntUtil::maskFromIndex(kX86RegIndexR12 ) | IntUtil::maskFromIndex(kX86RegIndexR13 ) | IntUtil::maskFromIndex(kX86RegIndexR14 ) | IntUtil::maskFromIndex(kX86RegIndexR15 )); break; // ------------------------------------------------------------------------ // [Illegal] // ------------------------------------------------------------------------ default: // Illegal calling convention. ASMJIT_ASSERT(0); } #endif // ASMJIT_X64 }
Error StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) noexcept { if (base < 2 || base > 36) base = 10; char buf[128]; char* p = buf + ASMJIT_ARRAY_SIZE(buf); uint64_t orig = i; char sign = '\0'; // -------------------------------------------------------------------------- // [Sign] // -------------------------------------------------------------------------- if ((flags & kStringFormatSigned) != 0 && static_cast<int64_t>(i) < 0) { i = static_cast<uint64_t>(-static_cast<int64_t>(i)); sign = '-'; } else if ((flags & kStringFormatShowSign) != 0) { sign = '+'; } else if ((flags & kStringFormatShowSpace) != 0) { sign = ' '; } // -------------------------------------------------------------------------- // [Number] // -------------------------------------------------------------------------- do { uint64_t d = i / base; uint64_t r = i % base; *--p = StringBuilder_numbers[r]; i = d; } while (i); size_t numberLength = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p); // -------------------------------------------------------------------------- // [Alternate Form] // -------------------------------------------------------------------------- if ((flags & kStringFormatAlternate) != 0) { if (base == 8) { if (orig != 0) *--p = '0'; } if (base == 16) { *--p = 'x'; *--p = '0'; } } // -------------------------------------------------------------------------- // [Width] // -------------------------------------------------------------------------- if (sign != 0) *--p = sign; if (width > 256) width = 256; if (width <= numberLength) width = 0; else width -= numberLength; // -------------------------------------------------------------------------- // Write] // -------------------------------------------------------------------------- size_t prefixLength = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberLength; char* data = prepare(op, prefixLength + width + numberLength); if (!data) return DebugUtils::errored(kErrorNoHeapMemory); ::memcpy(data, p, prefixLength); data += prefixLength; ::memset(data, '0', width); data += width; ::memcpy(data, p + prefixLength, numberLength); return kErrorOk; }