Ejemplo n.º 1
0
void taint_pointer(
        FastShad *shad_dest, uint64_t dest,
        FastShad *shad_ptr, uint64_t ptr, uint64_t ptr_size,
        FastShad *shad_src, uint64_t src, uint64_t size) {
    taint_log("ptr: %lx[%lx+%lx] <- %lx[%lx] @ %lx[%lx+%lx]\n",
            (uint64_t)shad_dest, dest, size,
            (uint64_t)shad_src, src, (uint64_t)shad_ptr, ptr, ptr_size);

    if (unlikely(dest + size > shad_dest->get_size())) {
        taint_log("  Ignoring IO RW\n");
        return;
    } else if (unlikely(src + size > shad_src->get_size())) {
        taint_log("  Source IO.\n");
        src = ones; // ignore source.
    }

    TaintData td = mixed_labels(shad_ptr, ptr, ptr_size);
    #ifndef CONFIG_INT_LABEL
    if (td.ls) td.tcn++;
    #endif
    if (src == ones) {
        bulk_set(shad_dest, dest, size, td);
    } else {
        unsigned i;
        for (i = 0; i < size; i++) {
            shad_dest->set_full(dest + i,
                    TaintData::copy_union(td, shad_src->query_full(src + i)));
        }
    }
}
Ejemplo n.º 2
0
void taint_host_memcpy(
        uint64_t env_ptr, uint64_t dest, uint64_t src,
        FastShad *greg, FastShad *gspec,
        uint64_t size, uint64_t labels_per_reg) {
    int64_t dest_offset = dest - env_ptr, src_offset = src - env_ptr;
    if (dest_offset < 0 || (size_t)dest_offset >= sizeof(CPUState) || 
            src_offset < 0 || (size_t)src_offset >= sizeof(CPUState)) {
        taint_log("hostmemcpy: irrelevant\n");
        return;
    }

    FastShad *shad_dest = NULL, *shad_src = NULL;
    uint64_t addr_dest = 0, addr_src = 0;

    find_offset(greg, gspec, (uint64_t)dest_offset, labels_per_reg,
            &shad_dest, &addr_dest);
    find_offset(greg, gspec, (uint64_t)src_offset, labels_per_reg,
            &shad_src, &addr_src);

#ifdef TAINTDEBUG
    taint_log("hostmemcpy: %lx[%lx+%lx] <- %lx[%lx] (offsets %lx <- %lx) (",
            (uint64_t)shad_dest, dest, size, (uint64_t)shad_src, src,
            dest_offset, src_offset);
    unsigned i;
    for (i = 0; i < size; i++) {
        taint_log("%lx, ", (uint64_t)shad_src->query(src + i));
    }
    taint_log(")\n");
#endif
    FastShad::copy(shad_dest, addr_dest, shad_src, addr_src, size);
}
Ejemplo n.º 3
0
void taint_host_memcpy(
        uint64_t env_ptr, uint64_t dest, uint64_t src,
        FastShad *greg, FastShad *gspec,
        uint64_t size, uint64_t labels_per_reg) {
    int64_t dest_offset = dest - env_ptr, src_offset = src - env_ptr;
    if (dest_offset < 0 || (size_t)dest_offset >= sizeof(CPUArchState) || 
            src_offset < 0 || (size_t)src_offset >= sizeof(CPUArchState)) {
        taint_log("hostmemcpy: irrelevant\n");
        return;
    }

    FastShad *shad_dest = NULL, *shad_src = NULL;
    uint64_t addr_dest = 0, addr_src = 0;

    find_offset(greg, gspec, (uint64_t)dest_offset, labels_per_reg,
            &shad_dest, &addr_dest);
    find_offset(greg, gspec, (uint64_t)src_offset, labels_per_reg,
            &shad_src, &addr_src);

    taint_log("hostmemcpy: %s[%lx+%lx] <- %s[%lx] (offsets %lx <- %lx) ",
            shad_dest->name(), dest, size, shad_src->name(), src,
            dest_offset, src_offset);
    taint_log_labels(shad_src, src, size);
    FastShad::copy(shad_dest, addr_dest, shad_src, addr_src, size);
}
Ejemplo n.º 4
0
// Model for tainted pointer is to mix all the labels from the pointer and then
// union that mix with each byte of the actual copied data. So if the pointer
// is labeled [1], [2], [3], [4], and the bytes are labeled [5], [6], [7], [8],
// we get [12345], [12346], [12347], [12348] as output taint of the load/store.
void taint_pointer(
        FastShad *shad_dest, uint64_t dest,
        FastShad *shad_ptr, uint64_t ptr, uint64_t ptr_size,
        FastShad *shad_src, uint64_t src, uint64_t size) {
    taint_log("ptr: %s[%lx+%lx] <- %s[%lx] @ %s[%lx+%lx]\n",
            shad_dest->name(), dest, size,
            shad_src->name(), src, shad_ptr->name(), ptr, ptr_size);

    if (unlikely(dest + size > shad_dest->get_size())) {
        taint_log("  Ignoring IO RW\n");
        return;
    } else if (unlikely(src + size > shad_src->get_size())) {
        taint_log("  Source IO.\n");
        src = ones; // ignore source.
    }

    // this is [1234] in our example
    TaintData ptr_td = mixed_labels(shad_ptr, ptr, ptr_size, false);
    if (src == ones) {
        bulk_set(shad_dest, dest, size, ptr_td);
    } else {
        for (unsigned i = 0; i < size; i++) {
            TaintData byte_td = shad_src->query_full(src + i);
            TaintData dest_td = TaintData::make_union(ptr_td, byte_td, false);

            // Unions usually destroy controlled bits. Tainted pointer is
            // a special case.
            dest_td.cb_mask = byte_td.cb_mask;
            shad_dest->set_full(dest + i, dest_td);
        }
    }
}
Ejemplo n.º 5
0
// This should only be called on loads/stores from CPUArchState.
void taint_host_copy(
        uint64_t env_ptr, uint64_t addr,
        FastShad *llv, uint64_t llv_offset,
        FastShad *greg, FastShad *gspec,
        uint64_t size, uint64_t labels_per_reg, bool is_store) {
    int64_t offset = addr - env_ptr;
    if (is_irrelevant(offset)) {
        // Irrelevant
        taint_log("hostcopy: irrelevant\n");
        return;
    }

    FastShad *state_shad = NULL;
    uint64_t state_addr = 0;

    find_offset(greg, gspec, (uint64_t)offset, labels_per_reg,
            &state_shad, &state_addr);

    FastShad *shad_src = is_store ? llv : state_shad;
    uint64_t src = is_store ? llv_offset : state_addr;
    FastShad *shad_dest = is_store ? state_shad : llv;
    uint64_t dest = is_store ? state_addr : llv_offset;

    //taint_log("taint_host_copy\n");
    //taint_log("\tenv: %lx, addr: %lx, llv: %lx, offset: %lx\n", env_ptr, addr, llv_ptr, llv_offset);
    //taint_log("\tgreg: %lx, gspec: %lx, size: %lx, is_store: %u\n", greg_ptr, gspec_ptr, size, is_store);
    taint_log("hostcopy: %s[%lx+%lx] <- %s[%lx] (offset %lx) ",
            shad_dest->name(), dest, size, shad_src->name(), src, offset);
    taint_log_labels(shad_src, src, size);
    FastShad::copy(shad_dest, dest, shad_src, src, size);
}
Ejemplo n.º 6
0
void taint_delete(FastShad *shad, uint64_t dest, uint64_t size) {
    taint_log("remove: %lx[%lx+%lx]\n", (uint64_t)shad, dest, size);
    if (unlikely(dest >= shad->get_size())) {
        taint_log("Ignoring IO RW\n");
        return;
    }
    shad->remove(dest, size);
}
Ejemplo n.º 7
0
void taint_parallel_compute(
        FastShad *shad,
        uint64_t dest, uint64_t ignored,
        uint64_t src1, uint64_t src2, uint64_t src_size,
        llvm::Instruction *I) {
    uint64_t shad_size = shad->get_size();
    if (unlikely(dest >= shad_size || src1 >= shad_size || src2 >= shad_size)) {
        taint_log("  Ignoring IO RW\n");
        return;
    }

    taint_log("pcompute: %s[%lx+%lx] <- %lx + %lx\n",
            shad->name(), dest, src_size, src1, src2);
    uint64_t i;
    for (i = 0; i < src_size; ++i) {
        TaintData td = TaintData::make_union(
                shad->query_full(src1 + i),
                shad->query_full(src2 + i), true);
        shad->set_full(dest + i, td);
    }

    // Unlike mixed computes, parallel computes guaranteed to be bitwise.
    // This means we can honestly compute CB masks; in fact we have to because
    // of the way e.g. the deposit TCG op is lifted to LLVM.
    CBMasks cb_mask_1 = compile_cb_masks(shad, src1, src_size);
    CBMasks cb_mask_2 = compile_cb_masks(shad, src2, src_size);
    CBMasks cb_mask_out = {0};
    if (I && I->getOpcode() == llvm::Instruction::Or) {
        cb_mask_out.one_mask = cb_mask_1.one_mask | cb_mask_2.one_mask;
        cb_mask_out.zero_mask = cb_mask_1.zero_mask & cb_mask_2.zero_mask;
        // Anything that's a literal zero in one operand will not affect
        // the other operand, so those bits are still controllable.
        cb_mask_out.cb_mask =
            (cb_mask_1.zero_mask & cb_mask_2.cb_mask) |
            (cb_mask_2.zero_mask & cb_mask_1.cb_mask);
    } else if (I && I->getOpcode() == llvm::Instruction::And) {
        cb_mask_out.one_mask = cb_mask_1.one_mask & cb_mask_2.one_mask;
        cb_mask_out.zero_mask = cb_mask_1.zero_mask | cb_mask_2.zero_mask;
        // Anything that's a literal one in one operand will not affect
        // the other operand, so those bits are still controllable.
        cb_mask_out.cb_mask =
            (cb_mask_1.one_mask & cb_mask_2.cb_mask) |
            (cb_mask_2.one_mask & cb_mask_1.cb_mask);
    }
    taint_log("pcompute_cb: %#lx + %#lx = %lx ",
            cb_mask_1.cb_mask, cb_mask_2.cb_mask, cb_mask_out.cb_mask);
    taint_log_labels(shad, dest, src_size);
    write_cb_masks(shad, dest, src_size, cb_mask_out);
}
Ejemplo n.º 8
0
uint64_t taint_memlog_pop(taint2_memlog *taint_memlog) {
    uint64_t result = taint_memlog->ring[taint_memlog->idx];
    taint_memlog->idx = (taint_memlog->idx + TAINT2_MEMLOG_SIZE - 1) % TAINT2_MEMLOG_SIZE;;

    taint_log("memlog_pop: %lx\n", result);
    return result;
}
Ejemplo n.º 9
0
// Takes a (~0UL, ~0UL)-terminated list of (value, selector) pairs.
void taint_select(
        FastShad *shad,
        uint64_t dest, uint64_t size, uint64_t selector,
        ...) {
    va_list argp;
    uint64_t src, srcsel;

    va_start(argp, selector);
    src = va_arg(argp, uint64_t);
    srcsel = va_arg(argp, uint64_t);
    while (!(src == ones && srcsel == ones)) {
        if (srcsel == selector) { // bingo!
            if (src != ones) { // otherwise it's a constant.
                taint_log("slct\n");
                FastShad::copy(shad, dest, shad, src, size);
            }
            return;
        }

        src = va_arg(argp, uint64_t);
        srcsel = va_arg(argp, uint64_t);
    } 

    tassert(false && "Couldn't find selected argument!!");
}
Ejemplo n.º 10
0
// Taint operations
void taint_copy(
        FastShad *shad_dest, uint64_t dest,
        FastShad *shad_src, uint64_t src,
        uint64_t size, llvm::Instruction *I) {
    if (unlikely(src >= shad_src->get_size() || dest >= shad_dest->get_size())) {
        taint_log("  Ignoring IO RW\n");
        return;
    }

    taint_log("copy: %s[%lx+%lx] <- %s[%lx] ",
            shad_dest->name(), dest, size, shad_src->name(), src);
    taint_log_labels(shad_src, src, size);

    FastShad::copy(shad_dest, dest, shad_src, src, size);

    if (I) update_cb(shad_dest, dest, shad_src, src, size, I);
}
Ejemplo n.º 11
0
void taint_host_delete(
        uint64_t env_ptr, uint64_t dest_addr,
        FastShad *greg, FastShad *gspec,
        uint64_t size, uint64_t labels_per_reg) {
    int64_t offset = dest_addr - env_ptr;

    if (offset < 0 || (size_t)offset >= sizeof(CPUState)) {
        taint_log("hostdel: irrelevant\n");
        return;
    }
    FastShad *shad = NULL;
    uint64_t dest = 0;

    find_offset(greg, gspec, offset, labels_per_reg, &shad, &dest);

    taint_log("hostdel: %lx[%lx+%lx]", (uint64_t)shad, dest, size);

    shad->remove(dest, size);
}
Ejemplo n.º 12
0
void taint_mix_compute(
        FastShad *shad,
        uint64_t dest, uint64_t dest_size,
        uint64_t src1, uint64_t src2, uint64_t src_size) {
    taint_log("mcompute: %lx[%lx+%lx] <- %lx + %lx\n",
            (uint64_t)shad, dest, dest_size, src1, src2);
    TaintData td = TaintData::comp_union(
            mixed_labels(shad, src1, src_size),
            mixed_labels(shad, src2, src_size));
    bulk_set(shad, dest, dest_size, td);
}
Ejemplo n.º 13
0
void taint_mix(
        FastShad *shad,
        uint64_t dest, uint64_t dest_size,
        uint64_t src, uint64_t src_size) {
    taint_log("mix: %lx[%lx+%lx] <- %lx+%lx\n",
            (uint64_t)shad, dest, dest_size, src, src_size);
    TaintData td = mixed_labels(shad, src, src_size);
    #ifndef CONFIG_INT_LABEL
    if (td.ls) td.tcn++;
    #endif
    bulk_set(shad, dest, dest_size, td);
}
Ejemplo n.º 14
0
void taint_mix(
        FastShad *shad,
        uint64_t dest, uint64_t dest_size,
        uint64_t src, uint64_t src_size,
        llvm::Instruction *I) {
    taint_log("mix: %s[%lx+%lx] <- %lx+%lx\n",
            shad->name(), dest, dest_size, src, src_size);
    TaintData td = mixed_labels(shad, src, src_size, true);
    bulk_set(shad, dest, dest_size, td);

    if (I) update_cb(shad, dest, shad, src, dest_size, I);
}
Ejemplo n.º 15
0
// Taint operations
void taint_copy(
        FastShad *shad_dest, uint64_t dest,
        FastShad *shad_src, uint64_t src,
        uint64_t size) {
    taint_log("copy: %lx[%lx+%lx] <- %lx[%lx] (",
            (uint64_t)shad_dest, dest, size, (uint64_t)shad_src, src);
#ifdef TAINTDEBUG
    unsigned i;
    for (i = 0; i < size; i++) {
        taint_log("%lx, ", (uint64_t)shad_src->query(src + i));
    }
    taint_log(")\n");
#endif

    if (dest + size >= shad_dest->get_size() || src + size >= shad_src->get_size()) {
        taint_log("Ignoring IO\n");
        return;
    }

    FastShad::copy(shad_dest, dest, shad_src, src, size);
}
Ejemplo n.º 16
0
void taint_mix_compute(
        FastShad *shad,
        uint64_t dest, uint64_t dest_size,
        uint64_t src1, uint64_t src2, uint64_t src_size,
        llvm::Instruction *ignored) {
    taint_log("mcompute: %s[%lx+%lx] <- %lx + %lx\n",
            shad->name(), dest, dest_size, src1, src2);
    TaintData td = TaintData::make_union(
            mixed_labels(shad, src1, src_size, false),
            mixed_labels(shad, src2, src_size, false),
            true);
    bulk_set(shad, dest, dest_size, td);
}
Ejemplo n.º 17
0
// Taint operations
void taint_copy(
        FastShad *shad_dest, uint64_t dest,
        FastShad *shad_src, uint64_t src,
        uint64_t size, llvm::Instruction *I) {
    taint_log("copy: %s[%lx+%lx] <- %s[%lx] (",
            shad_dest->name(), dest, size, shad_src->name(), src);
#ifdef TAINTDEBUG
    unsigned i;
    for (i = 0; i < size; i++) {
        taint_log("%lx, ", (uint64_t)shad_src->query(src + i));
    }
    taint_log(")\n");
#endif

    if (dest + size >= shad_dest->get_size() || src + size >= shad_src->get_size()) {
        taint_log("Ignoring IO\n");
        return;
    }

    FastShad::copy(shad_dest, dest, shad_src, src, size);

    if (I) update_cb(shad_dest, dest, shad_src, src, size, I);
}
Ejemplo n.º 18
0
void taint_parallel_compute(
        FastShad *shad,
        uint64_t dest, uint64_t ignored,
        uint64_t src1, uint64_t src2, uint64_t src_size) {
    taint_log("pcompute: %lx[%lx+%lx] <- %lx + %lx\n",
            (uint64_t)shad, dest, src_size, src1, src2);
    uint64_t i;
    for (i = 0; i < src_size; ++i) {
        TaintData td = TaintData::comp_union(
                shad->query_full(src1 + i),
                shad->query_full(src2 + i));
        shad->set_full(dest + i, td);
    }
}
Ejemplo n.º 19
0
// This should only be called on loads/stores from CPUState.
void taint_host_copy(
        uint64_t env_ptr, uint64_t addr,
        FastShad *llv, uint64_t llv_offset,
        FastShad *greg, FastShad *gspec,
        uint64_t size, uint64_t labels_per_reg, bool is_store) {
    int64_t offset = addr - env_ptr;
    if (offset < 0 || (size_t)offset >= sizeof(CPUState)) {
        // Irrelevant
        taint_log("hostcopy: irrelevant\n");
        return;
    }

    FastShad *state_shad = NULL;
    uint64_t state_addr = 0;

    find_offset(greg, gspec, (uint64_t)offset, labels_per_reg,
            &state_shad, &state_addr);

    FastShad *shad_src = is_store ? llv : state_shad;
    uint64_t src = is_store ? llv_offset : state_addr;
    FastShad *shad_dest = is_store ? state_shad : llv;
    uint64_t dest = is_store ? state_addr : llv_offset;

    //taint_log("taint_host_copy\n");
    //taint_log("\tenv: %lx, addr: %lx, llv: %lx, offset: %lx\n", env_ptr, addr, llv_ptr, llv_offset);
    //taint_log("\tgreg: %lx, gspec: %lx, size: %lx, is_store: %u\n", greg_ptr, gspec_ptr, size, is_store);
#ifdef TAINTDEBUG
    taint_log("hostcopy: %lx[%lx+%lx] <- %lx[%lx] (offset %lx) (",
            (uint64_t)shad_dest, dest, size, (uint64_t)shad_src, src, offset);
    unsigned i;
    for (i = 0; i < size; i++) {
        taint_log("%lx, ", (uint64_t)shad_src->query(src + i));
    }
    taint_log(")\n");
#endif
    FastShad::copy(shad_dest, dest, shad_src, src, size);
}
Ejemplo n.º 20
0
static void update_cb(
        FastShad *shad_dest, uint64_t dest,
        FastShad *shad_src, uint64_t src, uint64_t size,
        llvm::Instruction *I) {
    if (!I) return;

    CBMasks cb_masks = compile_cb_masks(shad_src, src, size);
    uint64_t &cb_mask = cb_masks.cb_mask;
    uint64_t &one_mask = cb_masks.one_mask;
    uint64_t &zero_mask = cb_masks.zero_mask;

    uint64_t orig_one_mask = one_mask, orig_zero_mask = zero_mask;
    llvm::Value *rhs = I->getNumOperands() >= 2 ? I->getOperand(1) : nullptr;
    llvm::ConstantInt *CI = rhs ? llvm::dyn_cast<llvm::ConstantInt>(rhs) : nullptr;
    uint64_t literal = CI ? CI->getZExtValue() : ~0UL;
    int log2 = 0;

    switch (I->getOpcode()) {
        // Totally reversible cases.
        case llvm::Instruction::Add:
        case llvm::Instruction::Sub:
            tassert(literal != ~0UL);
            log2 = 64 - __builtin_clz(literal);
            one_mask &= ~((1 << log2) - 1);
            zero_mask &= ~((1 << log2) - 1);
            break;

        case llvm::Instruction::Xor:
            one_mask &= ~literal;
            one_mask |= literal & orig_zero_mask;
            zero_mask &= ~literal;
            zero_mask |= literal & orig_one_mask;
            break;

        case llvm::Instruction::ZExt:
        case llvm::Instruction::IntToPtr:
        case llvm::Instruction::PtrToInt:
        case llvm::Instruction::BitCast:
        // This one copies the existing bits and adds non-controllable bits.
        // One and zero masks too complicated to compute. Bah.
        case llvm::Instruction::SExt:
        // Copies. These we ignore (the copy will copy the CB data for us)
        case llvm::Instruction::Store:
        case llvm::Instruction::Load:
        case llvm::Instruction::ExtractValue:
        case llvm::Instruction::InsertValue:
            break;

        case llvm::Instruction::Trunc:
            cb_mask &= (1 << (size * 8)) - 1;
            one_mask &= (1 << (size * 8)) - 1;
            zero_mask &= (1 << (size * 8)) - 1;
            break;

        case llvm::Instruction::Mul:
        {
            tassert(literal != ~0UL);
            // Powers of two in literal destroy reversibility.
            uint64_t trailing_zeroes = __builtin_ctz(literal);
            cb_mask <<= trailing_zeroes;
            zero_mask = (1 << trailing_zeroes) - 1;
            one_mask = 0;
            break;
        }

        case llvm::Instruction::URem:
        case llvm::Instruction::SRem:
            tassert(literal != ~0UL);
            log2 = 64 - __builtin_clz(literal);
            cb_mask &= (1 << log2) - 1;
            one_mask = 0;
            zero_mask = 0;
            break;

        case llvm::Instruction::UDiv:
        case llvm::Instruction::SDiv:
            tassert(literal != ~0UL);
            log2 = 64 - __builtin_clz(literal);
            cb_mask >>= log2;
            one_mask = 0;
            zero_mask = 0;
            break;

        case llvm::Instruction::And:
            tassert(literal != ~0UL);
            // Bits not in the bit mask are no longer controllable
            cb_mask &= literal;
            zero_mask |= ~literal;
            one_mask &= literal;
            break;

        case llvm::Instruction::Or:
            tassert(literal != ~0UL);
            // Bits in the bit mask are no longer controllable
            cb_mask &= ~literal;
            one_mask |= literal;
            zero_mask &= ~literal;
            break;

        case llvm::Instruction::Shl:
            tassert(literal != ~0UL);
            cb_mask <<= literal;
            one_mask <<= literal;
            zero_mask <<= literal;
            zero_mask |= (1 << literal) - 1;
            break;

        case llvm::Instruction::LShr:
            tassert(literal != ~0UL);
            cb_mask >>= literal;
            one_mask >>= literal;
            zero_mask >>= literal;
            zero_mask |= ~((1 << (64 - literal)) - 1);
            break;

        case llvm::Instruction::AShr: // High bits not really controllable.
            tassert(literal != ~0UL);
            cb_mask >>= literal;
            one_mask >>= literal;
            zero_mask >>= literal;

            // See if high bit is a literal
            if (orig_one_mask & (1 << (size * 8 - 1))) {
                one_mask |= ~((1 << (64 - literal)) - 1);
            } else if (orig_zero_mask & (1 << (size * 8 - 1))) {
                zero_mask |= ~((1 << (64 - literal)) - 1);
            }
            break;

        // Totally irreversible cases. Erase and bail.
        case llvm::Instruction::FAdd:
        case llvm::Instruction::FSub:
        case llvm::Instruction::FMul:
        case llvm::Instruction::FDiv:
        case llvm::Instruction::FRem:
        case llvm::Instruction::Call:
        case llvm::Instruction::ICmp:
            cb_mask = 0;
            one_mask = 0;
            zero_mask = 0;
            break;

        case llvm::Instruction::GetElementPtr:
        {
            llvm::GetElementPtrInst *GEPI =
                llvm::dyn_cast<llvm::GetElementPtrInst>(I);
            tassert(GEPI);
            one_mask = 0;
            zero_mask = 0;
            // Constant indices => fully reversible
            if (GEPI->hasAllConstantIndices()) break;
            // Otherwise we know nothing.
            cb_mask = 0;
            break;
        }

        default:
            printf("Unknown instruction in update_cb: ");
            I->dump();
            fflush(stdout);
            return;
    }

    taint_log("update_cb: %s[%lx+%lx] CB %#lx -> 0x%#lx, 0 %#lx -> %#lx, 1 %#lx -> %#lx\n",
            shad_dest->name(), dest, size, orig_cb_mask, cb_mask,
            orig_zero_mask, zero_mask, orig_one_mask, one_mask);

    write_cb_masks(shad_dest, dest, size, cb_masks);
}
Ejemplo n.º 21
0
void taint_memlog_push(taint2_memlog *taint_memlog, uint64_t val) {
    taint_log("memlog_push: %lx\n", val);
    taint_memlog->idx = (taint_memlog->idx + 1) % TAINT2_MEMLOG_SIZE;;
    taint_memlog->ring[taint_memlog->idx] = val;
}
Ejemplo n.º 22
0
static void update_cb(
        FastShad *shad_dest, uint64_t dest,
        FastShad *shad_src, uint64_t src, uint64_t size,
        llvm::Instruction *I) {
    if (!I) return;

    CBMasks cb_masks = compile_cb_masks(shad_src, src, size);
    uint64_t &cb_mask = cb_masks.cb_mask;
    uint64_t &one_mask = cb_masks.one_mask;
    uint64_t &zero_mask = cb_masks.zero_mask;

    uint64_t orig_one_mask = one_mask, orig_zero_mask = zero_mask;
    __attribute__((unused)) uint64_t orig_cb_mask = cb_mask;
    std::vector<uint64_t> literals;
    uint64_t last_literal = ~0UL; // last valid literal.
    literals.reserve(I->getNumOperands());

    for (auto it = I->value_op_begin(); it != I->value_op_end(); it++) {
        const llvm::Value *arg = *it;
        const llvm::ConstantInt *CI = llvm::dyn_cast<llvm::ConstantInt>(arg);
        uint64_t literal = CI ? CI->getZExtValue() : ~0UL;
        literals.push_back(literal);
        if (literal != ~0UL) last_literal = literal;
    }
    int log2 = 0;

    switch (I->getOpcode()) {
        // Totally reversible cases.
        case llvm::Instruction::Sub:
            if (literals[1] == ~0UL) {
                tassert(last_literal != ~0UL);
                // first operand is a variable. so negate.
                // throw out ones/zeroes info.
                // FIXME: handle better.
                one_mask = zero_mask = 0;
                break;
            } // otherwise fall through.
        case llvm::Instruction::Add:
            tassert(last_literal != ~0UL);
            log2 = 64 - __builtin_clz(last_literal);
            // FIXME: this isn't quite right. for example, if all bits ones,
            // adding one makes all bits zero.
            one_mask &= ~((1 << log2) - 1);
            zero_mask &= ~((1 << log2) - 1);
            break;

        case llvm::Instruction::Xor:
            one_mask &= ~last_literal;
            one_mask |= last_literal & orig_zero_mask;
            zero_mask &= ~last_literal;
            zero_mask |= last_literal & orig_one_mask;
            break;

        case llvm::Instruction::ZExt:
        case llvm::Instruction::IntToPtr:
        case llvm::Instruction::PtrToInt:
        case llvm::Instruction::BitCast:
        // This one copies the existing bits and adds non-controllable bits.
        // One and zero masks too complicated to compute. Bah.
        case llvm::Instruction::SExt:
        // Copies. These we ignore (the copy will copy the CB data for us)
        case llvm::Instruction::Store:
        case llvm::Instruction::Load:
        case llvm::Instruction::ExtractValue:
        case llvm::Instruction::InsertValue:
            break;

        case llvm::Instruction::Trunc:
            cb_mask &= (1 << (size * 8)) - 1;
            one_mask &= (1 << (size * 8)) - 1;
            zero_mask &= (1 << (size * 8)) - 1;
            break;

        case llvm::Instruction::Mul:
        {
            tassert(last_literal != ~0UL);
            // Powers of two in last_literal destroy reversibility.
            uint64_t trailing_zeroes = __builtin_ctz(last_literal);
            cb_mask <<= trailing_zeroes;
            zero_mask = (1 << trailing_zeroes) - 1;
            one_mask = 0;
            break;
        }

        case llvm::Instruction::URem:
        case llvm::Instruction::SRem:
            tassert(last_literal != ~0UL);
            log2 = 64 - __builtin_clz(last_literal);
            cb_mask &= (1 << log2) - 1;
            one_mask = 0;
            zero_mask = 0;
            break;

        case llvm::Instruction::UDiv:
        case llvm::Instruction::SDiv:
            tassert(last_literal != ~0UL);
            log2 = 64 - __builtin_clz(last_literal);
            cb_mask >>= log2;
            one_mask = 0;
            zero_mask = 0;
            break;

        case llvm::Instruction::And:
            tassert(last_literal != ~0UL);
            // Bits not in the bit mask are no longer controllable
            cb_mask &= last_literal;
            zero_mask |= ~last_literal;
            one_mask &= last_literal;
            break;

        case llvm::Instruction::Or:
            tassert(last_literal != ~0UL);
            // Bits in the bit mask are no longer controllable
            cb_mask &= ~last_literal;
            one_mask |= last_literal;
            zero_mask &= ~last_literal;
            break;

        case llvm::Instruction::Shl:
            tassert(last_literal != ~0UL);
            cb_mask <<= last_literal;
            one_mask <<= last_literal;
            zero_mask <<= last_literal;
            zero_mask |= (1 << last_literal) - 1;
            break;

        case llvm::Instruction::LShr:
            tassert(last_literal != ~0UL);
            cb_mask >>= last_literal;
            one_mask >>= last_literal;
            zero_mask >>= last_literal;
            zero_mask |= ~((1 << (64 - last_literal)) - 1);
            break;

        case llvm::Instruction::AShr: // High bits not really controllable.
            tassert(last_literal != ~0UL);
            cb_mask >>= last_literal;
            one_mask >>= last_literal;
            zero_mask >>= last_literal;

            // See if high bit is a last_literal
            if (orig_one_mask & (1 << (size * 8 - 1))) {
                one_mask |= ~((1 << (64 - last_literal)) - 1);
            } else if (orig_zero_mask & (1 << (size * 8 - 1))) {
                zero_mask |= ~((1 << (64 - last_literal)) - 1);
            }
            break;

        // Totally irreversible cases. Erase and bail.
        case llvm::Instruction::FAdd:
        case llvm::Instruction::FSub:
        case llvm::Instruction::FMul:
        case llvm::Instruction::FDiv:
        case llvm::Instruction::FRem:
        case llvm::Instruction::Call:
        case llvm::Instruction::ICmp:
        case llvm::Instruction::FCmp:
            cb_mask = 0;
            one_mask = 0;
            zero_mask = 0;
            break;

        case llvm::Instruction::GetElementPtr:
        {
            llvm::GetElementPtrInst *GEPI =
                llvm::dyn_cast<llvm::GetElementPtrInst>(I);
            tassert(GEPI);
            one_mask = 0;
            zero_mask = 0;
            // Constant indices => fully reversible
            if (GEPI->hasAllConstantIndices()) break;
            // Otherwise we know nothing.
            cb_mask = 0;
            break;
        }

        default:
            printf("Unknown instruction in update_cb: ");
            I->dump();
            fflush(stdout);
            return;
    }

    taint_log("update_cb: %s[%lx+%lx] CB %#lx -> 0x%#lx, 0 %#lx -> %#lx, 1 %#lx -> %#lx\n",
            shad_dest->name(), dest, size, orig_cb_mask, cb_mask,
            orig_zero_mask, zero_mask, orig_one_mask, one_mask);

    write_cb_masks(shad_dest, dest, size, cb_masks);
}
Ejemplo n.º 23
0
void taint_sext(FastShad *shad, uint64_t dest, uint64_t dest_size, uint64_t src, uint64_t src_size) {
    taint_log("taint_sext\n");
    FastShad::copy(shad, dest, shad, src, src_size);
    bulk_set(shad, dest + src_size, dest_size - src_size,
            shad->query_full(dest + src_size - 1));
}