bool isReturn(const Jump *jump, const Dataflow &dataflow) { assert(jump != nullptr); return isReturnAddress(jump->thenTarget(), dataflow) || isReturnAddress(jump->elseTarget(), dataflow); }
bool isReturnAddress(const JumpTarget &target, const Dataflow &dataflow) { return target.address() && isReturnAddress(target.address(), dataflow); }
Value *DataflowAnalyzer::computeValue(const Term *term, const MemoryLocation &memoryLocation, const ReachingDefinitions &definitions) { assert(term); assert(term->isRead()); assert(memoryLocation || definitions.empty()); auto value = dataflow().getValue(term); if (definitions.empty()) { return value; } auto byteOrder = architecture()->getByteOrder(memoryLocation.domain()); /* * Merge abstract values. */ auto abstractValue = value->abstractValue(); foreach (const auto &chunk, definitions.chunks()) { assert(memoryLocation.covers(chunk.location())); /* * Mask of bits inside abstractValue which are covered by chunk's location. */ auto mask = bitMask<ConstantValue>(chunk.location().size()); if (byteOrder == ByteOrder::LittleEndian) { mask = bitShift(mask, chunk.location().addr() - memoryLocation.addr()); } else { mask = bitShift(mask, memoryLocation.endAddr() - chunk.location().endAddr()); } foreach (auto definition, chunk.definitions()) { auto definitionLocation = dataflow().getMemoryLocation(definition); assert(definitionLocation.covers(chunk.location())); auto definitionValue = dataflow().getValue(definition); auto definitionAbstractValue = definitionValue->abstractValue(); /* * Shift definition's abstract value to match term's location. */ if (byteOrder == ByteOrder::LittleEndian) { definitionAbstractValue.shift(definitionLocation.addr() - memoryLocation.addr()); } else { definitionAbstractValue.shift(memoryLocation.endAddr() - definitionLocation.endAddr()); } /* Project the value to the defined location. */ definitionAbstractValue.project(mask); /* Update term's value. */ abstractValue.merge(definitionAbstractValue); } } value->setAbstractValue(abstractValue.resize(term->size())); /* * Merge stack offset and product flags. * * Heuristic: merge information only from terms that define lower bits of the term's value. */ const std::vector<const Term *> *lowerBitsDefinitions = nullptr; if (byteOrder == ByteOrder::LittleEndian) { if (definitions.chunks().front().location().addr() == memoryLocation.addr()) { lowerBitsDefinitions = &definitions.chunks().front().definitions(); } } else { if (definitions.chunks().back().location().endAddr() == memoryLocation.endAddr()) { lowerBitsDefinitions = &definitions.chunks().back().definitions(); } } if (lowerBitsDefinitions) { foreach (auto definition, *lowerBitsDefinitions) { auto definitionValue = dataflow().getValue(definition); if (definitionValue->isNotStackOffset()) { value->makeNotStackOffset(); } else if (definitionValue->isStackOffset()) { value->makeStackOffset(definitionValue->stackOffset()); } if (definitionValue->isNotProduct()) { value->makeNotProduct(); } else if (definitionValue->isProduct()) { value->makeProduct(); } } } /* * Merge return address flag. */ if (definitions.chunks().front().location() == memoryLocation) { foreach (auto definition, definitions.chunks().front().definitions()) { auto definitionValue = dataflow().getValue(definition); if (definitionValue->isNotReturnAddress()) { value->makeNotReturnAddress(); } else if (definitionValue->isReturnAddress()) { value->makeReturnAddress(); } } }