Beispiel #1
0
static
void copyQueueItems(const struct Tamarama *t, const struct NFA *sub,
                    struct mq *q1, struct mq *q2, const u32 activeIdx) {
    const u32 *baseTop = (const u32 *)((const char *)t +
                                       sizeof(struct Tamarama));

    u32 lower = baseTop[activeIdx];
    u32 upper = activeIdx == t->numSubEngines - 1 ?
                    ~0U : baseTop[activeIdx + 1];
    u32 event_base = isMultiTopType(sub->type) ? MQE_TOP_FIRST : MQE_TOP;
    while (q1->cur < q1->end) {
        u32 type = q1->items[q1->cur].type;
        s64a loc = q1->items[q1->cur].location;
        DEBUG_PRINTF("type:%u lower:%u upper:%u\n", type, lower, upper);
        if (type >= lower && type < upper) {
            u32 event = event_base;
            if (event == MQE_TOP_FIRST) {
                event += type - lower;
            }
            pushQueue(q2, event, loc);
        } else {
            pushQueueNoMerge(q2, MQE_END, loc);
            break;
        }
        q1->cur++;
    }
}
Beispiel #2
0
static
void updateQueues(const struct Tamarama *t, struct mq *q1, struct mq *q2) {
    q2->cur = q2->end = 0;
    copyQueueProperties(q1, q2, t->activeIdxSize);

    const u32 numSubEngines = t->numSubEngines;
    u32 lastActiveIdx = loadActiveIdx(q1->streamState,
                                      t->activeIdxSize);
#ifdef DEBUG
    DEBUG_PRINTF("external queue\n");
    debugQueue(q1);
#endif

    // Push MQE_START event to the subqueue
    s64a loc = q1->items[q1->cur].location;
    pushQueueAt(q2, 0, MQE_START, loc);
    char hasStart = 0;
    if (q1->items[q1->cur].type == MQE_START) {
        hasStart = 1;
        q1->cur++;
    }

    u32 activeIdx = lastActiveIdx;
    // If we have top events in the main queue, update current active id
    if (q1->cur < q1->end - 1) {
        const u32 *baseTop = (const u32 *)((const char *)t +
                                           sizeof(struct Tamarama));
        u32 curTop = q1->items[q1->cur].type;
        activeIdx = findEngineForTop(baseTop, curTop, numSubEngines);
    }

    assert(activeIdx < numSubEngines);
    DEBUG_PRINTF("last id:%u, current id:%u, num of subengines:%u\n",
                 lastActiveIdx, activeIdx, numSubEngines);
    // Handle unfinished last alive subengine
    if (lastActiveIdx != activeIdx &&
        lastActiveIdx != numSubEngines && hasStart) {
        loc = q1->items[q1->cur].location;
        pushQueueNoMerge(q2, MQE_END, loc);
        q2->nfa = getSubEngine(t, lastActiveIdx);
        return;
    }

    initSubQueue(t, q1, q2, lastActiveIdx, activeIdx);
    DEBUG_PRINTF("finish queues\n");
}
Beispiel #3
0
static rose_inline
void roseCheckEodSuffixes(const struct RoseEngine *t, u8 *state, u64a offset,
                          struct hs_scratch *scratch) {
    const u8 *aa = getActiveLeafArray(t, state);
    const u32 aaCount = t->activeArrayCount;
    UNUSED u32 qCount = t->queueCount;

    for (u32 qi = mmbit_iterate(aa, aaCount, MMB_INVALID); qi != MMB_INVALID;
         qi = mmbit_iterate(aa, aaCount, qi)) {
        const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
        const struct NFA *nfa = getNfaByInfo(t, info);

        assert(nfaAcceptsEod(nfa));

        DEBUG_PRINTF("checking nfa %u\n", qi);

        assert(fatbit_isset(scratch->aqa, qCount, qi)); /* we have just been
                                                           triggered */

        char *fstate = scratch->fullState + info->fullStateOffset;
        const char *sstate = (const char *)state + info->stateOffset;

        struct mq *q = scratch->queues + qi;

        pushQueueNoMerge(q, MQE_END, scratch->core_info.len);

        q->context = NULL;
        /* rose exec is used as we don't want to / can't raise matches in the
         * history buffer. */
        char rv = nfaQueueExecRose(q->nfa, q, MO_INVALID_IDX);
        if (rv) { /* nfa is still alive */
            nfaCheckFinalState(nfa, fstate, sstate, offset, scratch->tctxt.cb,
                               scratch->tctxt.cb_som, scratch->tctxt.userCtx);
        }
    }
}
Beispiel #4
0
static really_inline
char roseCatchUpLeftfix(const struct RoseEngine *t, char *state,
                        struct hs_scratch *scratch, u32 qi,
                        const struct LeftNfaInfo *left) {
    assert(!left->transient); // active roses only

    struct core_info *ci = &scratch->core_info;
    const u32 qCount = t->queueCount;
    struct mq *q = scratch->queues + qi;
    const struct NFA *nfa = getNfaByQueue(t, qi);

    if (nfaSupportsZombie(nfa)
        && ci->buf_offset /* prefix can be alive with no q */
        && !fatbit_isset(scratch->aqa, qCount, qi)
        && isZombie(t, state, left)) {
        DEBUG_PRINTF("yawn - zombie\n");
        return 1;
    }

    if (left->stopTable) {
        enum MiracleAction mrv =
            roseScanForMiracles(t, state, scratch, qi, left, nfa);
        switch (mrv) {
        case MIRACLE_DEAD:
            return 0;
        case MIRACLE_SAVED:
            return 1;
        default:
            assert(mrv == MIRACLE_CONTINUE);
            break;
        }
    }

    if (!fatbit_set(scratch->aqa, qCount, qi)) {
        initRoseQueue(t, qi, left, scratch);

        s32 sp;
        if (ci->buf_offset) {
            sp = -(s32)loadRoseDelay(t, state, left);
        } else {
            sp = 0;
        }

        DEBUG_PRINTF("ci->len=%zu, sp=%d, historyRequired=%u\n", ci->len, sp,
                     t->historyRequired);

        if ( ci->len - sp + 1 < t->historyRequired) {
            // we'll end up safely in the history region.
            DEBUG_PRINTF("safely in history, skipping\n");
            storeRoseDelay(t, state, left, (s64a)ci->len - sp);
            return 1;
        }

        pushQueueAt(q, 0, MQE_START, sp);
        if (left->infix || ci->buf_offset + sp > 0) {
            loadStreamState(nfa, q, sp);
        } else {
            pushQueueAt(q, 1, MQE_TOP, sp);
            nfaQueueInitState(nfa, q);
        }
    } else {
        DEBUG_PRINTF("queue already active\n");
        if (q->end - q->cur == 1 && q_cur_type(q) == MQE_START) {
            DEBUG_PRINTF("empty queue, start loc=%lld\n", q_cur_loc(q));
            s64a last_loc = q_cur_loc(q);
            if (ci->len - last_loc + 1 < t->historyRequired) {
                // we'll end up safely in the history region.
                DEBUG_PRINTF("safely in history, saving state and skipping\n");
                saveStreamState(nfa, q, last_loc);
                storeRoseDelay(t, state, left, (s64a)ci->len - last_loc);
                return 1;
            }
        }
    }

    // Determine whether the byte before last_loc will be in the history
    // buffer on the next stream write.
    s64a last_loc = q_last_loc(q);
    s64a leftovers = ci->len - last_loc;
    if (leftovers + 1 >= t->historyRequired) {
        u32 catchup_offset = left->maxLag ? left->maxLag - 1 : 0;
        last_loc = (s64a)ci->len - catchup_offset;
    }

    if (left->infix) {
        if (infixTooOld(q, last_loc)) {
            DEBUG_PRINTF("infix died of old age\n");
            return 0;
        }
        reduceInfixQueue(q, last_loc, left->maxQueueLen, q->nfa->maxWidth);
    }

    DEBUG_PRINTF("end scan at %lld\n", last_loc);
    pushQueueNoMerge(q, MQE_END, last_loc);

#ifdef DEBUG
    debugQueue(q);
#endif

    char rv = nfaQueueExecRose(nfa, q, MO_INVALID_IDX);
    if (!rv) { /* nfa is dead */
        DEBUG_PRINTF("died catching up to stream boundary\n");
        return 0;
    } else {
        DEBUG_PRINTF("alive, saving stream state\n");
        if (nfaSupportsZombie(nfa) &&
            nfaGetZombieStatus(nfa, q, last_loc) == NFA_ZOMBIE_ALWAYS_YES) {
            DEBUG_PRINTF("not so fast - zombie\n");
            setAsZombie(t, state, left);
        } else {
            saveStreamState(nfa, q, last_loc);
            storeRoseDelay(t, state, left, (s64a)ci->len - last_loc);
        }
    }

    return 1;
}
Beispiel #5
0
hwlmcb_rv_t roseHandleChainMatch(const struct RoseEngine *t,
                                 struct hs_scratch *scratch, u32 event,
                                 u64a top_squash_distance, u64a end,
                                 char in_catchup) {
    assert(event == MQE_TOP || event >= MQE_TOP_FIRST);
    struct core_info *ci = &scratch->core_info;

    u8 *aa = getActiveLeafArray(t, scratch->core_info.state);
    u32 aaCount = t->activeArrayCount;
    struct fatbit *activeQueues = scratch->aqa;
    u32 qCount = t->queueCount;

    const u32 qi = 0; /* MPV is always queue 0 if it exists */
    struct mq *q = &scratch->queues[qi];
    const struct NfaInfo *info = getNfaInfoByQueue(t, qi);

    s64a loc = (s64a)end - ci->buf_offset;
    assert(loc <= (s64a)ci->len && loc >= -(s64a)ci->hlen);

    if (!mmbit_set(aa, aaCount, qi)) {
        initQueue(q, qi, t, scratch);
        nfaQueueInitState(q->nfa, q);
        pushQueueAt(q, 0, MQE_START, loc);
        fatbit_set(activeQueues, qCount, qi);
    } else if (info->no_retrigger) {
        DEBUG_PRINTF("yawn\n");
        /* nfa only needs one top; we can go home now */
        return HWLM_CONTINUE_MATCHING;
    } else if (!fatbit_set(activeQueues, qCount, qi)) {
        initQueue(q, qi, t, scratch);
        loadStreamState(q->nfa, q, 0);
        pushQueueAt(q, 0, MQE_START, 0);
    } else if (isQueueFull(q)) {
        DEBUG_PRINTF("queue %u full -> catching up nfas\n", qi);
        /* we know it is a chained nfa and the suffixes/outfixes must already
         * be known to be consistent */
        if (ensureMpvQueueFlushed(t, scratch, qi, loc, in_catchup)
            == HWLM_TERMINATE_MATCHING) {
            DEBUG_PRINTF("terminating...\n");
            return HWLM_TERMINATE_MATCHING;
        }
    }

    if (top_squash_distance) {
        assert(q->cur != q->end);
        struct mq_item *last = &q->items[q->end - 1];
        if (last->type == event
            && last->location >= loc - (s64a)top_squash_distance) {
            last->location = loc;
            goto event_enqueued;
        }
    }

    pushQueue(q, event, loc);

event_enqueued:
    if (q_cur_loc(q) == (s64a)ci->len) {
        /* we may not run the nfa; need to ensure state is fine  */
        DEBUG_PRINTF("empty run\n");
        pushQueueNoMerge(q, MQE_END, loc);
        char alive = nfaQueueExec(q->nfa, q, loc);
        if (alive) {
            scratch->tctxt.mpv_inactive = 0;
            q->cur = q->end = 0;
            pushQueueAt(q, 0, MQE_START, loc);
        } else {
            mmbit_unset(aa, aaCount, qi);
            fatbit_unset(scratch->aqa, qCount, qi);
        }
    }

    DEBUG_PRINTF("added mpv event at %lld\n", loc);
    scratch->tctxt.next_mpv_offset = 0; /* the top event may result in matches
                                         * earlier than expected */
    return HWLM_CONTINUE_MATCHING;
}