bool CapstoneTokenizer::tokenizeImmOperand(const cs_x86_op & op) { duint value = duint(op.imm); auto valueType = TokenType::Value; if(_cp.InGroup(CS_GRP_JUMP) || _cp.InGroup(CS_GRP_CALL) || _cp.IsLoop()) { valueType = TokenType::Address; } auto tokenValue = TokenValue(op.size, value); addToken(valueType, printValue(tokenValue, true, _maxModuleLength), tokenValue); return true; }
bool LinearPass::Analyse() { // Divide the work up between each thread // THREAD_WORK = (TOTAL / # THREADS) duint workAmount = m_DataSize / IdealThreadCount(); // Initialize thread vector auto threadBlocks = new std::vector<BasicBlock>[IdealThreadCount()]; concurrency::parallel_for(duint(0), IdealThreadCount(), [&](duint i) { duint threadWorkStart = m_VirtualStart + (workAmount * i); duint threadWorkStop = min((threadWorkStart + workAmount), m_VirtualEnd); // Allow a 256-byte variance of scanning because of // integer rounding errors and instruction overlap if(threadWorkStart > m_VirtualStart) { threadWorkStart = max((threadWorkStart - 256), m_VirtualStart); threadWorkStop = min((threadWorkStop + 256), m_VirtualEnd); } // Memory allocation optimization // TODO: Option to conserve memory threadBlocks[i].reserve(100000); // Execute AnalysisWorker(threadWorkStart, threadWorkStop, &threadBlocks[i]); }); // Clear old data and combine vectors m_MainBlocks.clear(); for(duint i = 0; i < IdealThreadCount(); i++) { std::move(threadBlocks[i].begin(), threadBlocks[i].end(), std::back_inserter(m_MainBlocks)); // Free old elements to conserve memory further BBlockArray().swap(threadBlocks[i]); } // Free memory ASAP delete[] threadBlocks; // Sort and remove duplicates std::sort(m_MainBlocks.begin(), m_MainBlocks.end()); m_MainBlocks.erase(std::unique(m_MainBlocks.begin(), m_MainBlocks.end()), m_MainBlocks.end()); // Run overlap analysis sub-pass AnalyseOverlaps(); return true; }
bool set(Type numValue) { DENG2_ASSERT(type == Int || type == UInt || type == Float); switch(type) { case Int: if(value.int32 != dint(numValue)) { value.int32 = dint(numValue); return true; } break; case UInt: if(value.uint32 != duint(numValue)) { value.uint32 = duint(numValue); return true; } break; case Float: if(!fequal(value.float32, dfloat(numValue))) { value.float32 = dfloat(numValue); return true; } break; default: break; } return false; }
bool FunctionPass::Analyse() { // THREAD_WORK = ceil(TOTAL / # THREADS) duint workAmount = (m_MainBlocks.size() + (IdealThreadCount() - 1)) / IdealThreadCount(); // Initialize thread vector auto threadFunctions = new std::vector<FunctionDef>[IdealThreadCount()]; concurrency::parallel_for(duint(0), IdealThreadCount(), [&](duint i) { // Memory allocation optimization // TODO: Option to conserve memory threadFunctions[i].reserve(30000); // Execute duint threadWorkStart = (workAmount * i); duint threadWorkStop = min((threadWorkStart + workAmount), m_MainBlocks.size()); AnalysisWorker(threadWorkStart, threadWorkStop, &threadFunctions[i]); }); // Merge thread vectors into single local std::vector<FunctionDef> funcs; for(duint i = 0; i < IdealThreadCount(); i++) std::move(threadFunctions[i].begin(), threadFunctions[i].end(), std::back_inserter(funcs)); // Sort and remove duplicates std::sort(funcs.begin(), funcs.end()); funcs.erase(std::unique(funcs.begin(), funcs.end()), funcs.end()); dprintf(QT_TRANSLATE_NOOP("DBG", "%u functions\n"), DWORD(funcs.size())); FunctionDelRange(m_VirtualStart, m_VirtualEnd - 1, false); for(auto & func : funcs) { FunctionAdd(func.VirtualStart, func.VirtualEnd, false, func.InstrCount); } GuiUpdateAllViews(); delete[] threadFunctions; return true; }
bool CapstoneTokenizer::tokenizeMemOperand(const cs_x86_op & op) { //memory size const char* sizeText = _cp.MemSizeName(op.size); if(!sizeText) return false; addToken(TokenType::MemorySize, QString(sizeText) + " ptr"); addToken(TokenType::Space, " "); //memory segment const auto & mem = op.mem; const char* segmentText = _cp.RegName(x86_reg(mem.segment)); if(mem.segment == X86_REG_INVALID) //segment not set { switch(x86_reg(mem.base)) { case X86_REG_ESP: case X86_REG_RSP: case X86_REG_EBP: case X86_REG_RBP: segmentText = "ss"; break; default: segmentText = "ds"; break; } } addToken(TokenType::MemorySegment, segmentText); addToken(TokenType::Uncategorized, ":"); //memory opening bracket auto bracketsType = TokenType::MemoryBrackets; switch(x86_reg(mem.base)) { case X86_REG_ESP: case X86_REG_RSP: case X86_REG_EBP: case X86_REG_RBP: bracketsType = TokenType::MemoryStackBrackets; default: break; } addToken(bracketsType, "["); //stuff inside the brackets if(mem.base == X86_REG_RIP) //rip-relative (#replacement) { duint addr = _cp.Address() + duint(mem.disp) + _cp.Size(); TokenValue value = TokenValue(op.size, addr); // TODO auto displacementType = DbgMemIsValidReadPtr(addr) ? TokenType::Address : TokenType::Value; addToken(displacementType, printValue(value, false, _maxModuleLength), value); } else //#base + #index * #scale + #displacement { bool prependPlus = false; if(mem.base != X86_REG_INVALID) //base register { addToken(TokenType::MemoryBaseRegister, _cp.RegName(x86_reg(mem.base))); prependPlus = true; } if(mem.index != X86_REG_INVALID) //index register { if(prependPlus) addMemoryOperator('+'); addToken(TokenType::MemoryIndexRegister, _cp.RegName(x86_reg(mem.index))); if(mem.scale > 1) { addMemoryOperator('*'); addToken(TokenType::MemoryScale, QString().sprintf("%d", mem.scale)); } prependPlus = true; } if(mem.disp) { char operatorText = '+'; TokenValue value(op.size, duint(mem.disp)); auto displacementType = DbgMemIsValidReadPtr(duint(mem.disp)) ? TokenType::Address : TokenType::Value; QString valueText; if(mem.disp < 0) { operatorText = '-'; valueText = printValue(TokenValue(op.size, duint(mem.disp * -1)), false, _maxModuleLength); } else valueText = printValue(value, false, _maxModuleLength); if(prependPlus) addMemoryOperator(operatorText); addToken(displacementType, valueText, value); } } //closing bracket addToken(bracketsType, "]"); return true; }
void fillbasicinfo(Capstone* cp, BASIC_INSTRUCTION_INFO* basicinfo) { //zero basicinfo memset(basicinfo, 0, sizeof(BASIC_INSTRUCTION_INFO)); //copy instruction text strcpy_s(basicinfo->instruction, cp->InstructionText().c_str()); //instruction size basicinfo->size = cp->Size(); //branch/call info if(cp->InGroup(CS_GRP_CALL)) { basicinfo->branch = true; basicinfo->call = true; } else if(cp->InGroup(CS_GRP_JUMP) || cp->IsLoop()) { basicinfo->branch = true; } //handle operands for(int i = 0; i < cp->x86().op_count; i++) { const cs_x86_op & op = cp->x86().operands[i]; switch(op.type) { case X86_OP_IMM: { if(basicinfo->branch) { basicinfo->type |= TYPE_ADDR; basicinfo->addr = duint(op.imm); basicinfo->value.value = duint(op.imm); } else { basicinfo->type |= TYPE_VALUE; basicinfo->value.size = VALUE_SIZE(op.size); basicinfo->value.value = duint(op.imm); } } break; case X86_OP_MEM: { const x86_op_mem & mem = op.mem; strcpy_s(basicinfo->memory.mnemonic, cp->OperandText(i).c_str()); basicinfo->memory.size = MEMORY_SIZE(op.size); if(op.mem.base == X86_REG_RIP) //rip-relative { basicinfo->memory.value = ULONG_PTR(cp->GetInstr()->address + op.mem.disp + basicinfo->size); basicinfo->type |= TYPE_MEMORY; } else if(mem.disp) { basicinfo->type |= TYPE_MEMORY; basicinfo->memory.value = ULONG_PTR(mem.disp); } } break; } } }
GLState &GLState::setBlendOp(gl::BlendOp op) { d->props.set(BlendOp, duint(op)); return *this; }
GLState &GLState::setBlendFunc(gl::BlendFunc func) { d->props.set(BlendFuncSrc, duint(func.first)); d->props.set(BlendFuncDest, duint(func.second)); return *this; }
GLState &GLState::setBlendFunc(gl::Blend src, gl::Blend dest) { d->props.set(BlendFuncSrc, duint(src)); d->props.set(BlendFuncDest, duint(dest)); return *this; }
GLState &GLState::setDepthFunc(gl::Comparison func) { d->props.set(DepthFunc, duint(func)); return *this; }
void LinearPass::AnalyseOverlaps() { // Goal of this function: // // Remove all overlapping basic blocks because of threads // not ending or starting at absolutely defined points. // (Example: one thread starts in the middle of an instruction) // // This also checks for basic block targets jumping into // the middle of other basic blocks. // // THREAD_WORK = ceil(TOTAL / # THREADS) duint workTotal = m_MainBlocks.size(); duint workAmount = (workTotal + (IdealThreadCount() - 1)) / IdealThreadCount(); // Initialize thread vectors auto threadInserts = new std::vector<BasicBlock>[IdealThreadCount()]; concurrency::parallel_for(duint(0), IdealThreadCount(), [&](duint i) { duint threadWorkStart = (workAmount * i); duint threadWorkStop = min((threadWorkStart + workAmount), workTotal); // Again, allow an overlap of +/- 1 entry if(threadWorkStart > 0) { threadWorkStart = max((threadWorkStart - 1), 0); threadWorkStop = min((threadWorkStop + 1), workTotal); } // Execute AnalysisOverlapWorker(threadWorkStart, threadWorkStop, &threadInserts[i]); }); // THREAD VECTOR std::vector<BasicBlock> overlapInserts; { for(duint i = 0; i < IdealThreadCount(); i++) std::move(threadInserts[i].begin(), threadInserts[i].end(), std::back_inserter(overlapInserts)); // Sort and remove duplicates std::sort(overlapInserts.begin(), overlapInserts.end()); overlapInserts.erase(std::unique(overlapInserts.begin(), overlapInserts.end()), overlapInserts.end()); delete[] threadInserts; } // GLOBAL VECTOR { // Erase blocks marked for deletion m_MainBlocks.erase(std::remove_if(m_MainBlocks.begin(), m_MainBlocks.end(), [](BasicBlock & Elem) { return Elem.GetFlag(BASIC_BLOCK_FLAG_DELETE); })); // Insert std::move(overlapInserts.begin(), overlapInserts.end(), std::back_inserter(m_MainBlocks)); // Final sort std::sort(m_MainBlocks.begin(), m_MainBlocks.end()); } }