HLNode* Compiler::addNode(HLNode* node) noexcept { ASMJIT_ASSERT(node != nullptr); ASMJIT_ASSERT(node->_prev == nullptr); ASMJIT_ASSERT(node->_next == nullptr); if (_cursor == nullptr) { if (_firstNode == nullptr) { _firstNode = node; _lastNode = node; } else { node->_next = _firstNode; _firstNode->_prev = node; _firstNode = node; } } else { HLNode* prev = _cursor; HLNode* next = _cursor->_next; node->_prev = prev; node->_next = next; prev->_next = node; if (next) next->_prev = node; else _lastNode = node; } _cursor = node; return node; }
CBNode* CodeBuilder::addNode(CBNode* node) noexcept { ASMJIT_ASSERT(node); ASMJIT_ASSERT(node->_prev == nullptr); ASMJIT_ASSERT(node->_next == nullptr); if (!_cursor) { if (!_firstNode) { _firstNode = node; _lastNode = node; } else { node->_next = _firstNode; _firstNode->_prev = node; _firstNode = node; } } else { CBNode* prev = _cursor; CBNode* next = _cursor->_next; node->_prev = prev; node->_next = next; prev->_next = node; if (next) next->_prev = node; else _lastNode = node; } _cursor = node; return node; }
Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* name, va_list ap) { ASMJIT_ASSERT(vType < kX86VarTypeCount); vType = _targetVarMapping[vType]; ASMJIT_ASSERT(vType != kInvalidVar); // The assertion won't be compiled in release build, however, we want to check // this anyway. if (vType == kInvalidVar) { static_cast<X86Var*>(var)->reset(); return kErrorInvalidArgument; } const X86VarInfo& vInfo = _x86VarInfo[vType]; char buf[64]; // Format the name if `ap` is given. if (ap) { vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), name, ap); buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0'; name = buf; } VarData* vd = _newVd(vType, vInfo.getSize(), vInfo.getClass(), name); if (vd == NULL) { static_cast<X86Var*>(var)->reset(); return getLastError(); } var->_init_packed_op_sz_w0_id(kOperandTypeVar, vInfo.getSize(), vInfo.getReg() << 8, vd->getId()); var->_vreg.vType = vType; return kErrorOk; }
Node* BaseCompiler::addNode(Node* node) { ASMJIT_ASSERT(node != NULL); ASMJIT_ASSERT(node->_prev == NULL); ASMJIT_ASSERT(node->_next == NULL); if (_cursor == NULL) { if (_firstNode == NULL) { _firstNode = node; _lastNode = node; } else { node->_next = _firstNode; _firstNode->_prev = node; _firstNode = node; } } else { Node* prev = _cursor; Node* next = _cursor->_next; node->_prev = prev; node->_next = next; prev->_next = node; if (next) next->_prev = node; else _lastNode = node; } _cursor = node; return node; }
void X86Compiler::bind(const Label& label) { uint32_t id = label.getId() & kOperandIdValueMask; ASMJIT_ASSERT(id != kInvalidValue); ASMJIT_ASSERT(id < _targets.getLength()); addItem(_targets[id]); }
static ASMJIT_INLINE void CodeBuilder_nodeRemoved(CodeBuilder* self, CBNode* node_) noexcept { if (node_->isJmpOrJcc()) { CBJump* node = static_cast<CBJump*>(node_); CBLabel* label = node->getTarget(); if (label) { // Disconnect. CBJump** pPrev = &label->_from; for (;;) { ASMJIT_ASSERT(*pPrev != nullptr); CBJump* current = *pPrev; if (!current) break; if (current == node) { *pPrev = node->_jumpNext; break; } pPrev = ¤t->_jumpNext; } label->subNumRefs(); } } }
static ASMJIT_INLINE void BaseCompiler_nodeRemoved(BaseCompiler* self, Node* node_) { if (node_->isJmpOrJcc()) { JumpNode* node = static_cast<JumpNode*>(node_); TargetNode* target = node->getTarget(); // Disconnect. JumpNode** pPrev = &target->_from; for (;;) { ASMJIT_ASSERT(*pPrev != NULL); JumpNode* current = *pPrev; if (current == NULL) break; if (current == node) { *pPrev = node->_jumpNext; break; } pPrev = ¤t->_jumpNext; } target->subNumRefs(); } }
Error Compiler::bind(const Label& label) { uint32_t index = label.getId(); ASMJIT_ASSERT(index < _targetList.getLength()); addNode(_targetList[index]); return kErrorOk; }
XmmVar X86Compiler::newXmmVar(uint32_t varType, const char* name) { ASMJIT_ASSERT((varType < kX86VarTypeCount) && (x86VarInfo[varType].getClass() & kX86VarClassXmm) != 0); X86CompilerVar* var = _newVar(name, varType, 16); return var->asXmmVar(); }
MemCell* Context::_newVarCell(VarData* vd) { ASMJIT_ASSERT(vd->_memCell == NULL); MemCell* cell; uint32_t size = vd->getSize(); if (vd->isStack()) { cell = _newStackCell(size, vd->getAlignment()); if (cell == NULL) return NULL; } else { cell = static_cast<MemCell*>(_baseZone.alloc(sizeof(MemCell))); if (cell == NULL) goto _NoMemory; cell->_next = _memVarCells; _memVarCells = cell; cell->_offset = 0; cell->_size = size; cell->_alignment = size; _memMaxAlign = IntUtil::iMax<uint32_t>(_memMaxAlign, size); _memVarTotal += size; switch (size) { case 1: _mem1ByteVarsUsed++ ; break; case 2: _mem2ByteVarsUsed++ ; break; case 4: _mem4ByteVarsUsed++ ; break; case 8: _mem8ByteVarsUsed++ ; break; case 16: _mem16ByteVarsUsed++; break; case 32: _mem32ByteVarsUsed++; break; case 64: _mem64ByteVarsUsed++; break; default: ASMJIT_ASSERT(!"Reached"); } } vd->_memCell = cell; return cell; _NoMemory: _compiler->setError(kErrorNoHeapMemory); return NULL; }
Error CodeBuilder::registerLabelNode(CBLabel* node) noexcept { if (_lastError) return _lastError; ASMJIT_ASSERT(_code != nullptr); // Don't call setLastError() from here, we are noexcept and we are called // by `newLabelNode()` and `newFuncNode()`, which are noexcept as well. uint32_t id; ASMJIT_PROPAGATE(_code->newLabelId(id)); size_t index = Operand::unpackId(id); // We just added one label so it must be true. ASMJIT_ASSERT(_cbLabels.getLength() < index + 1); ASMJIT_PROPAGATE(_cbLabels.resize(index + 1)); _cbLabels[index] = node; node->_id = id; return kErrorOk; }
void X86Compiler::rename(Var& var, const char* name) { if (var.getId() == kInvalidValue) return; X86CompilerVar* vdata = _getVar(var.getId()); ASMJIT_ASSERT(vdata != NULL); vdata->_name = _zoneMemory.sdup(name); }
void X86Compiler::setSaveOnUnuse(Var& var, bool value) { if (var.getId() == kInvalidValue) return; X86CompilerVar* vdata = _getVar(var.getId()); ASMJIT_ASSERT(vdata != NULL); vdata->saveOnUnuse = value; }
bool X86Compiler::getSaveOnUnuse(Var& var) const { if (var.getId() == kInvalidValue) return false; X86CompilerVar* vdata = _getVar(var.getId()); ASMJIT_ASSERT(vdata != NULL); return (bool)vdata->saveOnUnuse; }
uint32_t X86Compiler::getPriority(Var& var) const { if (var.getId() == kInvalidValue) return kInvalidValue; X86CompilerVar* vdata = _getVar(var.getId()); ASMJIT_ASSERT(vdata != NULL); return vdata->getPriority(); }
HLLabel* Compiler::newLabelNode() noexcept { Assembler* assembler = getAssembler(); if (assembler == nullptr) return nullptr; uint32_t id = assembler->_newLabelId(); LabelData* ld = assembler->getLabelData(id); HLLabel* node = newNode<HLLabel>(id); if (node == nullptr) return nullptr; // These have to be zero now. ASMJIT_ASSERT(ld->exId == 0); ASMJIT_ASSERT(ld->exData == nullptr); ld->exId = _exId; ld->exData = node; return node; }
HLNode* Compiler::addNodeAfter(HLNode* node, HLNode* ref) noexcept { ASMJIT_ASSERT(node != nullptr); ASMJIT_ASSERT(node->_prev == nullptr); ASMJIT_ASSERT(node->_next == nullptr); ASMJIT_ASSERT(ref != nullptr); HLNode* prev = ref; HLNode* next = ref->_next; node->_prev = prev; node->_next = next; prev->_next = node; if (next) next->_prev = node; else _lastNode = node; return node; }
Node* BaseCompiler::addNodeAfter(Node* node, Node* ref) { ASMJIT_ASSERT(node != NULL); ASMJIT_ASSERT(node->_prev == NULL); ASMJIT_ASSERT(node->_next == NULL); ASMJIT_ASSERT(ref != NULL); Node* prev = ref; Node* next = ref->_next; node->_prev = prev; node->_next = next; prev->_next = node; if (next) next->_prev = node; else _lastNode = node; return node; }
void X86Compiler::setPriority(Var& var, uint32_t priority) { if (var.getId() == kInvalidValue) return; X86CompilerVar* vdata = _getVar(var.getId()); ASMJIT_ASSERT(vdata != NULL); if (priority > 100) priority = 100; vdata->_priority = static_cast<uint8_t>(priority); }
void X86Compiler::_vhint(Var& var, uint32_t hintId, uint32_t hintValue) { if (var.getId() == kInvalidValue) return; X86CompilerVar* cv = _getVar(var.getId()); ASMJIT_ASSERT(cv != NULL); X86CompilerHint* item = Compiler_newItem<X86CompilerHint>(this, cv, hintId, hintValue); addItem(item); }
HLNode* Compiler::addNodeBefore(HLNode* node, HLNode* ref) noexcept { ASMJIT_ASSERT(node != nullptr); ASMJIT_ASSERT(node->_prev == nullptr); ASMJIT_ASSERT(node->_next == nullptr); ASMJIT_ASSERT(ref != nullptr); HLNode* prev = ref->_prev; HLNode* next = ref; node->_prev = prev; node->_next = next; next->_prev = node; if (prev) prev->_next = node; else _firstNode = node; return node; }
Node* BaseCompiler::addNodeBefore(Node* node, Node* ref) { ASMJIT_ASSERT(node != NULL); ASMJIT_ASSERT(node->_prev == NULL); ASMJIT_ASSERT(node->_next == NULL); ASMJIT_ASSERT(ref != NULL); Node* prev = ref->_prev; Node* next = ref; node->_prev = prev; node->_next = next; next->_prev = node; if (prev) prev->_next = node; else _firstNode = node; return node; }
CBNode* CodeBuilder::addBefore(CBNode* node, CBNode* ref) noexcept { ASMJIT_ASSERT(node != nullptr); ASMJIT_ASSERT(node->_prev == nullptr); ASMJIT_ASSERT(node->_next == nullptr); ASMJIT_ASSERT(ref != nullptr); CBNode* prev = ref->_prev; CBNode* next = ref; node->_prev = prev; node->_next = next; next->_prev = node; if (prev) prev->_next = node; else _firstNode = node; return node; }
CBNode* CodeBuilder::addAfter(CBNode* node, CBNode* ref) noexcept { ASMJIT_ASSERT(node); ASMJIT_ASSERT(ref); ASMJIT_ASSERT(node->_prev == nullptr); ASMJIT_ASSERT(node->_next == nullptr); CBNode* prev = ref; CBNode* next = ref->_next; node->_prev = prev; node->_next = next; prev->_next = node; if (next) next->_prev = node; else _lastNode = node; return node; }
ASMJIT_FAVOR_SIZE void ArchInfo::init(uint32_t type, uint32_t subType) noexcept { uint32_t index = type < ASMJIT_ARRAY_SIZE(archInfoTable) ? type : uint32_t(0); // Make sure the `archInfoTable` array is correctly indexed. _signature = archInfoTable[index]; ASMJIT_ASSERT(_type == index); // Even if the architecture is not known we setup its type and sub-type, // however, such architecture is not really useful. _type = type; _subType = subType; }
X86CompilerFuncDecl* X86Compiler::endFunc() { X86CompilerFuncDecl* func = getFunc(); ASMJIT_ASSERT(func != NULL); bind(func->_exitLabel); addItem(func->_end); func->setFuncFlag(kFuncFlagIsFinished); _func = NULL; return func; }
HLNode* Compiler::addFunc(HLFunc* func) noexcept { ASMJIT_ASSERT(_func == nullptr); _func = func; addNode(func); // Add function node. addNode(func->getEntryNode()); // Add function entry. HLNode* cursor = getCursor(); addNode(func->getExitNode()); // Add function exit / epilog marker. addNode(func->getEnd()); // Add function end. setCursor(cursor); return func; }
Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* name) noexcept { ASMJIT_ASSERT(vType < kX86VarTypeCount); vType = _targetVarMapping[vType]; ASMJIT_ASSERT(vType != kInvalidVar); // The assertion won't be compiled in release build, however, we want to check // this anyway. if (vType == kInvalidVar) { static_cast<X86Var*>(var)->reset(); return kErrorInvalidArgument; } const VarInfo& vInfo = _x86VarInfo[vType]; VarData* vd = _newVd(vInfo, name); if (vd == nullptr) { static_cast<X86Var*>(var)->reset(); return getLastError(); } var->_init_packed_op_sz_w0_id(Operand::kTypeVar, vInfo.getSize(), vInfo.getRegType() << 8, vd->getId()); var->_vreg.vType = vType; return kErrorOk; }
void* VMem::allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, bool canExecute) { // VirtualAlloc rounds allocated size to page size automatically. size_t msize = IntUtil::roundUp(length, vm().pageSize); // Windows XP SP2 / Vista allow Data Excution Prevention (DEP). WORD protect = canExecute ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; LPVOID mbase = VirtualAllocEx(hProcess, NULL, msize, MEM_COMMIT | MEM_RESERVE, protect); if (mbase == NULL) return NULL; ASMJIT_ASSERT(IntUtil::isAligned<size_t>(reinterpret_cast<size_t>(mbase), vm().alignment)); if (allocated != NULL) *allocated = msize; return mbase; }
void* Zone::_alloc(size_t size) noexcept { Block* curBlock = _block; size_t blockSize = Utils::iMax<size_t>(_blockSize, size); // The `_alloc()` method can only be called if there is not enough space // in the current block, see `alloc()` implementation for more details. ASMJIT_ASSERT(curBlock == &Zone_zeroBlock || curBlock->getRemainingSize() < size); // If the `Zone` has been reset the current block doesn't have to be the // last one. Check if there is a block that can be used instead of allocating // a new one. If there is a `next` block it's completely unused, we don't have // to check for remaining bytes. Block* next = curBlock->next; if (next != nullptr && next->getBlockSize() >= size) { next->pos = next->data + size; _block = next; return static_cast<void*>(next->data); } // Prevent arithmetic overflow. if (blockSize > ~static_cast<size_t>(0) - sizeof(Block)) return nullptr; Block* newBlock = static_cast<Block*>(ASMJIT_ALLOC(sizeof(Block) - sizeof(void*) + blockSize)); if (newBlock == nullptr) return nullptr; newBlock->pos = newBlock->data + size; newBlock->end = newBlock->data + blockSize; newBlock->prev = nullptr; newBlock->next = nullptr; if (curBlock != &Zone_zeroBlock) { newBlock->prev = curBlock; curBlock->next = newBlock; // Does only happen if there is a next block, but the requested memory // can't fit into it. In this case a new buffer is allocated and inserted // between the current block and the next one. if (next != nullptr) { newBlock->next = next; next->prev = newBlock; } } _block = newBlock; return static_cast<void*>(newBlock->data); }