示例#1
0
/** \brief Dump a sparse iterator's keys to stdout. */
void mmbit_sparse_iter_dump(const struct mmbit_sparse_iter *it,
                            u32 total_bits) {
    // Expediency and future-proofing: create a temporary multibit of the right
    // size with all the bits on, then walk it with this sparse iterator.
    size_t bytes = mmbit_size(total_bits);
    u8 *bits = malloc(bytes);
    if (!bits) {
        printf("Failed to alloc %zu bytes for temp multibit", bytes);
        return;
    }
    for (u32 i = 0; i < total_bits; i++) {
        mmbit_set_i(bits, total_bits, i);
    }

    struct mmbit_sparse_state s[MAX_SPARSE_ITER_STATES];
    u32 idx = 0;
    for (u32 i = mmbit_sparse_iter_begin(bits, total_bits, &idx, it, s);
             i != MMB_INVALID;
             i = mmbit_sparse_iter_next(bits, total_bits, i, &idx, it, s)) {
        printf("%u ", i);
    }

    printf("(%u keys)", idx + 1);

    free(bits);
}
示例#2
0
文件: stream.c 项目: 01org/hyperscan
static rose_inline
void roseCatchUpLeftfixes(const struct RoseEngine *t, char *state,
                          struct hs_scratch *scratch) {
    if (!t->activeLeftIterOffset) {
        // No sparse iter, no non-transient roses.
        return;
    }

    // As per UE-1629, we catch up leftfix engines to:
    //  * current position (last location in the queue, or last location we
    //    executed to if the queue is empty) if that position (and the byte
    //    before so we can decompress the stream state) will be in the history
    //    buffer on the next stream write; OR
    //  * (stream_boundary - max_delay) other

    u8 *ara = getActiveLeftArray(t, state); /* indexed by offsets into
                                             * left_table */
    const u32 arCount = t->activeLeftCount;
    const struct LeftNfaInfo *left_table = getLeftTable(t);
    const struct mmbit_sparse_iter *it = getActiveLeftIter(t);

    struct mmbit_sparse_state si_state[MAX_SPARSE_ITER_STATES];

    u32 idx = 0;
    u32 ri = mmbit_sparse_iter_begin(ara, arCount, &idx, it, si_state);
    for (; ri != MMB_INVALID;
           ri = mmbit_sparse_iter_next(ara, arCount, ri, &idx, it, si_state)) {
        const struct LeftNfaInfo *left = left_table + ri;
        u32 qi = ri + t->leftfixBeginQueue;
        DEBUG_PRINTF("leftfix %u of %u, maxLag=%u, infix=%d\n", ri, arCount,
                     left->maxLag, (int)left->infix);
        if (!roseCatchUpLeftfix(t, state, scratch, qi, left)) {
            DEBUG_PRINTF("removing rose %u from active list\n", ri);
            DEBUG_PRINTF("groups old=%016llx mask=%016llx\n",
                         scratch->tctxt.groups, left->squash_mask);
            scratch->tctxt.groups &= left->squash_mask;
            mmbit_unset(ara, arCount, ri);
        }
    }
}
示例#3
0
文件: stream.c 项目: 01org/hyperscan
static rose_inline
void runEagerPrefixesStream(const struct RoseEngine *t,
                            struct hs_scratch *scratch) {
    if (!t->eagerIterOffset
        || scratch->core_info.buf_offset >= EAGER_STOP_OFFSET) {
        return;
    }

    char *state = scratch->core_info.state;
    u8 *ara = getActiveLeftArray(t, state); /* indexed by offsets into
                                             * left_table */
    const u32 arCount = t->activeLeftCount;
    const u32 qCount = t->queueCount;
    const struct LeftNfaInfo *left_table = getLeftTable(t);
    const struct mmbit_sparse_iter *it = getByOffset(t, t->eagerIterOffset);

    struct mmbit_sparse_state si_state[MAX_SPARSE_ITER_STATES];

    u32 idx = 0;
    u32 ri = mmbit_sparse_iter_begin(ara, arCount, &idx, it, si_state);
    for (; ri != MMB_INVALID;
           ri = mmbit_sparse_iter_next(ara, arCount, ri, &idx, it, si_state)) {
        const struct LeftNfaInfo *left = left_table + ri;
        u32 qi = ri + t->leftfixBeginQueue;
        DEBUG_PRINTF("leftfix %u of %u, maxLag=%u\n", ri, arCount, left->maxLag);

        assert(!fatbit_isset(scratch->aqa, qCount, qi));
        assert(left->eager);
        assert(!left->infix);

        struct mq *q = scratch->queues + qi;
        const struct NFA *nfa = getNfaByQueue(t, qi);
        s64a loc = MIN(scratch->core_info.len,
                       EAGER_STOP_OFFSET - scratch->core_info.buf_offset);

        fatbit_set(scratch->aqa, qCount, qi);
        initRoseQueue(t, qi, left, scratch);

        if (scratch->core_info.buf_offset) {
            s64a sp = left->transient ? -(s64a)scratch->core_info.hlen
                                      : -(s64a)loadRoseDelay(t, state, left);
            pushQueueAt(q, 0, MQE_START, sp);
            if (scratch->core_info.buf_offset + sp > 0) {
                loadStreamState(nfa, q, sp);
                /* if the leftfix fix is currently in a match state, we cannot
                 * advance it. */
                if (nfaInAnyAcceptState(nfa, q)) {
                    continue;
                }
                pushQueueAt(q, 1, MQE_END, loc);
            } else {
                pushQueueAt(q, 1, MQE_TOP, sp);
                pushQueueAt(q, 2, MQE_END, loc);
                nfaQueueInitState(q->nfa, q);
            }
        } else {
            pushQueueAt(q, 0, MQE_START, 0);
            pushQueueAt(q, 1, MQE_TOP, 0);
            pushQueueAt(q, 2, MQE_END, loc);
            nfaQueueInitState(nfa, q);
        }

        char alive = nfaQueueExecToMatch(q->nfa, q, loc);

        if (!alive) {
            DEBUG_PRINTF("queue %u dead, squashing\n", qi);
            mmbit_unset(ara, arCount, ri);
            fatbit_unset(scratch->aqa, qCount, qi);
            scratch->tctxt.groups &= left->squash_mask;
        } else if (q->cur == q->end) {
            assert(alive != MO_MATCHES_PENDING);
            /* unlike in block mode we cannot squash groups if there is no match
             * in this block as we need the groups on for later stream writes */
            /* TODO: investigate possibility of a method to suppress groups for
             * a single stream block. */
            DEBUG_PRINTF("queue %u finished, nfa lives\n", qi);
            q->cur = q->end = 0;
            pushQueueAt(q, 0, MQE_START, loc);
        } else {
            assert(alive == MO_MATCHES_PENDING);
            DEBUG_PRINTF("queue %u unfinished, nfa lives\n", qi);
            q->end--; /* remove end item */
        }
    }
}
示例#4
0
文件: eod.c 项目: 0x4e38/hyperscan
static rose_inline
int roseEodRunIterator(const struct RoseEngine *t, u8 *state, u64a offset,
                       struct hs_scratch *scratch) {
    if (!t->eodIterOffset) {
        return MO_CONTINUE_MATCHING;
    }

    const struct RoseRole *roleTable = getRoleTable(t);
    const struct RosePred *predTable = getPredTable(t);
    const struct RoseIterMapping *iterMapBase
        = getByOffset(t, t->eodIterMapOffset);
    const struct mmbit_sparse_iter *it = getByOffset(t, t->eodIterOffset);
    assert(ISALIGNED(iterMapBase));
    assert(ISALIGNED(it));

    // Sparse iterator state was allocated earlier
    struct mmbit_sparse_state *s = scratch->sparse_iter_state;
    struct fatbit *handled_roles = scratch->handled_roles;

    const u32 numStates = t->rolesWithStateCount;

    void *role_state = getRoleState(state);
    u32 idx = 0;
    u32 i = mmbit_sparse_iter_begin(role_state, numStates, &idx, it, s);

    fatbit_clear(handled_roles);

    for (; i != MMB_INVALID;
           i = mmbit_sparse_iter_next(role_state, numStates, i, &idx, it, s)) {
        DEBUG_PRINTF("pred state %u (iter idx=%u) is on\n", i, idx);
        const struct RoseIterMapping *iterMap = iterMapBase + idx;
        const struct RoseIterRole *roles = getByOffset(t, iterMap->offset);
        assert(ISALIGNED(roles));

        DEBUG_PRINTF("%u roles to consider\n", iterMap->count);
        for (u32 j = 0; j != iterMap->count; j++) {
            u32 role = roles[j].role;
            assert(role < t->roleCount);
            DEBUG_PRINTF("checking role %u, pred %u:\n", role, roles[j].pred);
            const struct RoseRole *tr = roleTable + role;

            if (fatbit_isset(handled_roles, t->roleCount, role)) {
                DEBUG_PRINTF("role %u already handled by the walk, skip\n",
                             role);
                continue;
            }

            // Special case: if this role is a trivial case (pred type simple)
            // we don't need to check any history and we already know the pred
            // role is on.
            if (tr->flags & ROSE_ROLE_PRED_SIMPLE) {
                DEBUG_PRINTF("pred type is simple, no need for checks\n");
            } else {
                assert(roles[j].pred < t->predCount);
                const struct RosePred *tp = predTable + roles[j].pred;
                if (!roseCheckPredHistory(tp, offset)) {
                    continue;
                }
            }

            /* mark role as handled so we don't touch it again in this walk */
            fatbit_set(handled_roles, t->roleCount, role);

            DEBUG_PRINTF("fire report for role %u, report=%u\n", role,
                         tr->reportId);
            int rv = scratch->tctxt.cb(offset, tr->reportId,
                                       scratch->tctxt.userCtx);
            if (rv == MO_HALT_MATCHING) {
                return MO_HALT_MATCHING;
            }
        }
    }

    return MO_CONTINUE_MATCHING;
}