Example #1
0
static rose_inline
void roseCheckNfaEod(const struct RoseEngine *t, u8 *state,
                     struct hs_scratch *scratch, u64a offset,
                     const char is_streaming) {
    /* data, len is used for state decompress, should be full available data */
    const u8 *aa = getActiveLeafArray(t, state);
    const u32 aaCount = t->activeArrayCount;

    u8 key = 0;

    if (is_streaming) {
        const u8 *eod_data = scratch->core_info.hbuf;
        size_t eod_len = scratch->core_info.hlen;
        key = eod_len ? eod_data[eod_len - 1] : 0;
    }

    for (u32 qi = mmbit_iterate(aa, aaCount, MMB_INVALID); qi != MMB_INVALID;
         qi = mmbit_iterate(aa, aaCount, qi)) {
        const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
        const struct NFA *nfa = getNfaByInfo(t, info);

        if (!nfaAcceptsEod(nfa)) {
            DEBUG_PRINTF("nfa %u does not accept eod\n", qi);
            continue;
        }

        DEBUG_PRINTF("checking nfa %u\n", qi);

        char *fstate = scratch->fullState + info->fullStateOffset;
        const char *sstate = (const char *)state + info->stateOffset;

        if (is_streaming) {
            // Decompress stream state.
            nfaExpandState(nfa, fstate, sstate, offset, key);
        }

        nfaCheckFinalState(nfa, fstate, sstate, offset, scratch->tctxt.cb,
                           scratch->tctxt.cb_som, scratch->tctxt.userCtx);
    }
}
Example #2
0
static really_inline
void initQueue(struct mq *q, u32 qi, const struct RoseEngine *t,
               struct hs_scratch *scratch) {
    const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
    q->nfa = getNfaByInfo(t, info);
    q->end = 0;
    q->cur = 0;
    q->state = scratch->fullState + info->fullStateOffset;
    q->streamState = (char *)scratch->core_info.state + info->stateOffset;
    q->offset = scratch->core_info.buf_offset;
    q->buffer = scratch->core_info.buf;
    q->length = scratch->core_info.len;
    q->history = scratch->core_info.hbuf;
    q->hlength = scratch->core_info.hlen;
    q->cb = selectAdaptor(t);
    q->som_cb = selectSomAdaptor(t);
    q->context = scratch;
    q->report_current = 0;

    DEBUG_PRINTF("qi=%u, offset=%llu, fullState=%u, streamState=%u, "
                 "state=%u\n", qi, q->offset, info->fullStateOffset,
                 info->stateOffset, *(u32 *)q->state);
}
Example #3
0
static rose_inline
void roseCheckEodSuffixes(const struct RoseEngine *t, u8 *state, u64a offset,
                          struct hs_scratch *scratch) {
    const u8 *aa = getActiveLeafArray(t, state);
    const u32 aaCount = t->activeArrayCount;
    UNUSED u32 qCount = t->queueCount;

    for (u32 qi = mmbit_iterate(aa, aaCount, MMB_INVALID); qi != MMB_INVALID;
         qi = mmbit_iterate(aa, aaCount, qi)) {
        const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
        const struct NFA *nfa = getNfaByInfo(t, info);

        assert(nfaAcceptsEod(nfa));

        DEBUG_PRINTF("checking nfa %u\n", qi);

        assert(fatbit_isset(scratch->aqa, qCount, qi)); /* we have just been
                                                           triggered */

        char *fstate = scratch->fullState + info->fullStateOffset;
        const char *sstate = (const char *)state + info->stateOffset;

        struct mq *q = scratch->queues + qi;

        pushQueueNoMerge(q, MQE_END, scratch->core_info.len);

        q->context = NULL;
        /* rose exec is used as we don't want to / can't raise matches in the
         * history buffer. */
        char rv = nfaQueueExecRose(q->nfa, q, MO_INVALID_IDX);
        if (rv) { /* nfa is still alive */
            nfaCheckFinalState(nfa, fstate, sstate, offset, scratch->tctxt.cb,
                               scratch->tctxt.cb_som, scratch->tctxt.userCtx);
        }
    }
}
Example #4
0
static really_inline
enum MiracleAction roseScanForMiracles(const struct RoseEngine *t, char *state,
                                       struct hs_scratch *scratch, u32 qi,
                                       const struct LeftNfaInfo *left,
                                       const struct NFA *nfa) {
    struct core_info *ci = &scratch->core_info;
    const u32 qCount = t->queueCount;
    struct mq *q = scratch->queues + qi;

    const char q_active = fatbit_isset(scratch->aqa, qCount, qi);
    DEBUG_PRINTF("q_active=%d\n", q_active);

    const s64a begin_loc = q_active ? q_cur_loc(q) : 0;
    const s64a end_loc = ci->len;

    s64a miracle_loc;
    if (roseMiracleOccurs(t, left, ci, begin_loc, end_loc, &miracle_loc)) {
        goto found_miracle;
    }

    if (roseCountingMiracleOccurs(t, left, ci, begin_loc, end_loc,
                                  &miracle_loc)) {
        goto found_miracle;
    }

    DEBUG_PRINTF("no miracle\n");
    return MIRACLE_CONTINUE;

found_miracle:
    DEBUG_PRINTF("miracle at %lld\n", miracle_loc);

    if (left->infix) {
        if (!q_active) {
            DEBUG_PRINTF("killing infix\n");
            return MIRACLE_DEAD;
        }

        DEBUG_PRINTF("skip q forward, %lld to %lld\n", begin_loc, miracle_loc);
        q_skip_forward_to(q, miracle_loc);
        if (q_last_type(q) == MQE_START) {
            DEBUG_PRINTF("miracle caused infix to die\n");
            return MIRACLE_DEAD;
        }

        DEBUG_PRINTF("re-init infix state\n");
        assert(q->items[q->cur].type == MQE_START);
        q->items[q->cur].location = miracle_loc;
        nfaQueueInitState(q->nfa, q);
    } else {
        if (miracle_loc > end_loc - t->historyRequired) {
            char *streamState = state + getNfaInfoByQueue(t, qi)->stateOffset;
            u64a offset = ci->buf_offset + miracle_loc;
            u8 key = offset ? getByteBefore(ci, miracle_loc) : 0;
            DEBUG_PRINTF("init state, key=0x%02x, offset=%llu\n", key, offset);
            if (!nfaInitCompressedState(nfa, offset, streamState, key)) {
                return MIRACLE_DEAD;
            }
            storeRoseDelay(t, state, left, (s64a)ci->len - miracle_loc);
            return MIRACLE_SAVED;
        }

        DEBUG_PRINTF("re-init prefix (skip %lld->%lld)\n", begin_loc,
                     miracle_loc);
        if (!q_active) {
            fatbit_set(scratch->aqa, qCount, qi);
            initRoseQueue(t, qi, left, scratch);
        }
        q->cur = q->end = 0;
        pushQueueAt(q, 0, MQE_START, miracle_loc);
        pushQueueAt(q, 1, MQE_TOP, miracle_loc);
        nfaQueueInitState(q->nfa, q);
    }

    return MIRACLE_CONTINUE;
}
Example #5
0
hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t,
                                 struct hs_scratch *scratch, u32 event,
                                 u64a top_squash_distance, u64a end,
                                 char in_catchup) {
    assert(event == MQE_TOP || event >= MQE_TOP_FIRST);
    struct core_info *ci = &scratch->core_info;

    u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
    u32 aaCount = t->activeArrayCount;
    struct fatbit *activeQueues = scratch->aqa;
    u32 qCount = t->queueCount;

    const u32 qi = 0; /* MPV is always queue 0 if it exists */
    struct mq *q = &scratch->queues[qi];
    const struct NfaInfo *info = getNfaInfoByQueue(t, qi);

    s64a loc = (s64a)end - ci->buf_offset;
    assert(loc <= (s64a)ci->len && loc >= -(s64a)ci->hlen);

    if (!mmbit_set(aa, aaCount, qi)) {
        initQueue(q, qi, t, scratch);
        nfaQueueInitState(q->nfa, q);
        pushQueueAt(q, 0, MQE_START, loc);
        fatbit_set(activeQueues, qCount, qi);
    } else if (info->no_retrigger) {
        DEBUG_PRINTF("yawn\n");
        /* nfa only needs one top; we can go home now */
        return HWLM_CONTINUE_MATCHING;
    } else if (!fatbit_set(activeQueues, qCount, qi)) {
        initQueue(q, qi, t, scratch);
        loadStreamState(q->nfa, q, 0);
        pushQueueAt(q, 0, MQE_START, 0);
    } else if (isQueueFull(q)) {
        DEBUG_PRINTF("queue %u full -> catching up nfas\n", qi);
        /* we know it is a chained nfa and the suffixes/outfixes must already
         * be known to be consistent */
        if (ensureMpvQueueFlushed(t, scratch, qi, loc, in_catchup)
            == HWLM_TERMINATE_MATCHING) {
            DEBUG_PRINTF("terminating...\n");
            return HWLM_TERMINATE_MATCHING;
        }
    }

    if (top_squash_distance) {
        assert(q->cur != q->end);
        struct mq_item *last = &q->items[q->end - 1];
        if (last->type == event
            && last->location >= loc - (s64a)top_squash_distance) {
            last->location = loc;
            goto event_enqueued;
        }
    }

    pushQueue(q, event, loc);

event_enqueued:
    if (q_cur_loc(q) == (s64a)ci->len) {
        /* we may not run the nfa; need to ensure state is fine  */
        DEBUG_PRINTF("empty run\n");
        pushQueueNoMerge(q, MQE_END, loc);
        char alive = nfaQueueExec(q->nfa, q, loc);
        if (alive) {
            scratch->tctxt.mpv_inactive = 0;
            q->cur = q->end = 0;
            pushQueueAt(q, 0, MQE_START, loc);
        } else {
            mmbit_unset(aa, aaCount, qi);
            fatbit_unset(scratch->aqa, qCount, qi);
        }
    }

    DEBUG_PRINTF("added mpv event at %lld\n", loc);
    scratch->tctxt.next_mpv_offset = 0; /* the top event may result in matches
                                         * earlier than expected */
    return HWLM_CONTINUE_MATCHING;
}