示例#1
0
void setSomFromSomAware(struct hs_scratch *scratch,
                        const struct som_operation *ri, u64a from_offset,
                        u64a to_offset) {
    assert(scratch);
    assert(ri);
    assert(to_offset);
    assert(ri->type == SOM_INTERNAL_LOC_SET_FROM
           || ri->type == SOM_INTERNAL_LOC_SET_FROM_IF_WRITABLE);

    struct core_info *ci = &scratch->core_info;
    const struct RoseEngine *rose = ci->rose;
    assert(rose->hasSom);

    const u32 som_store_count = rose->somLocationCount;
    u8 *som_store_valid = (u8 *)ci->state + rose->stateOffsets.somValid;
    u8 *som_store_writable = (u8 *)ci->state + rose->stateOffsets.somWritable;
    struct fatbit *som_set_now = scratch->som_set_now;
    struct fatbit *som_attempted_set = scratch->som_attempted_set;
    u64a *som_store = scratch->som_store;
    u64a *som_failed_store = scratch->som_attempted_store;

    if (to_offset != scratch->som_set_now_offset) {
        DEBUG_PRINTF("setting som_set_now_offset=%llu\n", to_offset);
        fatbit_clear(som_set_now);
        fatbit_clear(som_attempted_set);
        scratch->som_set_now_offset = to_offset;
    }

    if (ri->type == SOM_INTERNAL_LOC_SET_FROM) {
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_FROM\n");
        mmbit_set(som_store_valid, som_store_count, ri->onmatch);
        setSomLoc(som_set_now, som_store, som_store_count, ri, from_offset);
    } else {
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_FROM_IF_WRITABLE\n");
        if (ok_and_mark_if_write(som_store_valid, som_set_now,
                                 som_store_writable, som_store_count,
                                 ri->onmatch)) {
            setSomLoc(som_set_now, som_store, som_store_count, ri, from_offset);
            mmbit_unset(som_store_writable, som_store_count, ri->onmatch);
        } else {
            /* not writable, stash as an attempted write in case we are
             * racing our escape. */
            DEBUG_PRINTF("not writable, stashing attempt\n");
            assert(to_offset >= ri->aux.somDistance);
            u32 som_loc = ri->onmatch;

            if (!fatbit_set(som_attempted_set, som_store_count, ri->onmatch)) {
                som_failed_store[som_loc] = from_offset;
            } else {
                LIMIT_TO_AT_MOST(&som_failed_store[som_loc], from_offset);
            }
            DEBUG_PRINTF("som_failed_store[%u] = %llu\n", som_loc,
                         som_failed_store[som_loc]);
        }
    }
}
示例#2
0
文件: eod.c 项目: 0x4e38/hyperscan
static really_inline
void initContext(const struct RoseEngine *t, u8 *state, u64a offset,
                 struct hs_scratch *scratch, RoseCallback callback,
                 RoseCallbackSom som_callback, void *ctx) {
    struct RoseRuntimeState *rstate = getRuntimeState(state);
    struct RoseContext *tctxt = &scratch->tctxt;
    tctxt->t = t;
    tctxt->depth = rstate->stored_depth;
    tctxt->groups = loadGroups(t, state); /* TODO: diff groups for eod */
    tctxt->lit_offset_adjust = scratch->core_info.buf_offset
                             - scratch->core_info.hlen
                             + 1; // index after last byte
    tctxt->delayLastEndOffset = offset;
    tctxt->lastEndOffset = offset;
    tctxt->filledDelayedSlots = 0;
    tctxt->state = state;
    tctxt->cb = callback;
    tctxt->cb_som = som_callback;
    tctxt->userCtx = ctx;
    tctxt->lastMatchOffset = 0;
    tctxt->minMatchOffset = 0;
    tctxt->minNonMpvMatchOffset = 0;
    tctxt->next_mpv_offset = 0;
    tctxt->curr_anchored_loc = MMB_INVALID;
    tctxt->curr_row_offset = 0;

    scratch->catchup_pq.qm_size = 0;
    scratch->al_log_sum = 0; /* clear the anchored logs */

    fatbit_clear(scratch->aqa);
}
示例#3
0
static really_inline
int clearSomLog(struct hs_scratch *scratch, u64a offset, struct fatbit *log,
                const u64a *starts) {
    DEBUG_PRINTF("at %llu\n", offset);
    struct core_info *ci = &scratch->core_info;
    const struct RoseEngine *rose = ci->rose;
    const u32 dkeyCount = rose->dkeyCount;
    const u32 *dkey_to_report = (const u32 *)
        ((const char *)rose + rose->invDkeyOffset);
    u32 flags = 0;
#ifndef RELEASE_BUILD
    if (scratch->deduper.current_report_offset != offset) {
        flags |= HS_MATCH_FLAG_ADJUSTED;
    }
#endif

    for (u32 it = fatbit_iterate(log, dkeyCount, MMB_INVALID);
             it != MMB_INVALID; it = fatbit_iterate(log, dkeyCount, it)) {
        u64a from_offset = starts[it];
        u32 onmatch = dkey_to_report[it];
        int halt = ci->userCallback(onmatch, from_offset, offset, flags,
                                    ci->userContext);
        if (halt) {
            ci->status |= STATUS_TERMINATED;
            return 1;
        }
    }
    fatbit_clear(log);
    return 0;
}
示例#4
0
int flushStoredSomMatches_i(struct hs_scratch *scratch, u64a offset) {
    DEBUG_PRINTF("flush som matches\n");
    int halt = 0;

    assert(!told_to_stop_matching(scratch));

    if (scratch->deduper.current_report_offset == ~0ULL) {
        /* no matches recorded yet; just need to clear the logs */
        fatbit_clear(scratch->deduper.som_log[0]);
        fatbit_clear(scratch->deduper.som_log[1]);
        scratch->deduper.som_log_dirty = 0;
        return 0;
    }

    /* fire any reports from the logs and clear them */
    if (offset == scratch->deduper.current_report_offset + 1) {
        struct fatbit *done_log = scratch->deduper.som_log[offset % 2];
        u64a *done_starts = scratch->deduper.som_start_log[offset % 2];

        halt = clearSomLog(scratch, scratch->deduper.current_report_offset - 1,
                           done_log, done_starts);
        scratch->deduper.som_log_dirty >>= 1;
    } else {
示例#5
0
文件: match.c 项目: 01org/hyperscan
/**
 * \brief Execute a boundary report program.
 *
 * Returns MO_HALT_MATCHING if the stream is exhausted or the user has
 * instructed us to halt, or MO_CONTINUE_MATCHING otherwise.
 */
int roseRunBoundaryProgram(const struct RoseEngine *rose, u32 program,
                           u64a stream_offset, struct hs_scratch *scratch) {
    DEBUG_PRINTF("running boundary program at offset %u\n", program);

    if (can_stop_matching(scratch)) {
        DEBUG_PRINTF("can stop matching\n");
        return MO_HALT_MATCHING;
    }

    if (rose->hasSom && scratch->deduper.current_report_offset == ~0ULL) {
        /* we cannot delay the initialization of the som deduper logs any longer
         * as we are reporting matches. This is done explicitly as we are
         * shortcutting the som handling in the vacuous repeats as we know they
         * all come from non-som patterns. */
        fatbit_clear(scratch->deduper.som_log[0]);
        fatbit_clear(scratch->deduper.som_log[1]);
        scratch->deduper.som_log_dirty = 0;
    }

    // Keep assertions in program report path happy. At offset zero, there can
    // have been no earlier reports. At EOD, all earlier reports should have
    // been handled and we will have been caught up to the stream offset by the
    // time we are running boundary report programs.
    scratch->tctxt.minMatchOffset = stream_offset;

    const u64a som = 0;
    const size_t match_len = 0;
    const u8 flags = 0;
    hwlmcb_rv_t rv = roseRunProgram(rose, scratch, program, som, stream_offset,
                                    match_len, flags);
    if (rv == HWLM_TERMINATE_MATCHING) {
        return MO_HALT_MATCHING;
    }

    return MO_CONTINUE_MATCHING;
}
示例#6
0
static never_inline
void processReportList(const struct RoseEngine *rose, u32 base_offset,
                       u64a stream_offset, hs_scratch_t *scratch) {
    DEBUG_PRINTF("running report list at offset %u\n", base_offset);

    if (told_to_stop_matching(scratch)) {
        DEBUG_PRINTF("matching has been terminated\n");
        return;
    }

    if (rose->hasSom && scratch->deduper.current_report_offset == ~0ULL) {
        /* we cannot delay the initialization of the som deduper logs any longer
         * as we are reporting matches. This is done explicitly as we are
         * shortcutting the som handling in the vacuous repeats as we know they
         * all come from non-som patterns. */

        fatbit_clear(scratch->deduper.som_log[0]);
        fatbit_clear(scratch->deduper.som_log[1]);
        scratch->deduper.som_log_dirty = 0;
    }

    const ReportID *report =
        (const ReportID *)((const char *)rose + base_offset);

    /* never required to do som as vacuous reports are always external */

    if (rose->simpleCallback) {
        for (; *report != MO_INVALID_IDX; report++) {
            roseSimpleAdaptor(stream_offset, *report, scratch);
        }
    } else {
        for (; *report != MO_INVALID_IDX; report++) {
            roseAdaptor(stream_offset, *report, scratch);
        }
    }
}
示例#7
0
文件: match.c 项目: starius/hyperscan
static rose_inline
void recordAnchoredLiteralMatch(const struct RoseEngine *t,
                                struct hs_scratch *scratch, u32 literal_id,
                                u64a end) {
    assert(end);
    struct fatbit **anchoredLiteralRows = getAnchoredLiteralLog(scratch);

    DEBUG_PRINTF("record %u @ %llu\n", literal_id, end);

    if (!bf64_set(&scratch->al_log_sum, end - 1)) {
        // first time, clear row
        DEBUG_PRINTF("clearing %llu/%u\n", end - 1, t->anchored_count);
        fatbit_clear(anchoredLiteralRows[end - 1]);
    }

    u32 rel_idx = literal_id - t->anchored_base_id;
    DEBUG_PRINTF("record %u @ %llu index %u/%u\n", literal_id, end, rel_idx,
                 t->anchored_count);
    assert(rel_idx < t->anchored_count);
    fatbit_set(anchoredLiteralRows[end - 1], t->anchored_count, rel_idx);
}
示例#8
0
文件: stream.c 项目: 01org/hyperscan
static rose_inline
void roseStreamInitEod(const struct RoseEngine *t, u64a offset,
                       struct hs_scratch *scratch) {
    struct RoseContext *tctxt = &scratch->tctxt;
    /* TODO: diff groups for eod */
    tctxt->groups = loadGroups(t, scratch->core_info.state);
    tctxt->lit_offset_adjust = scratch->core_info.buf_offset
                             - scratch->core_info.hlen
                             + 1; // index after last byte
    tctxt->delayLastEndOffset = offset;
    tctxt->lastEndOffset = offset;
    tctxt->filledDelayedSlots = 0;
    tctxt->lastMatchOffset = 0;
    tctxt->minMatchOffset = offset;
    tctxt->minNonMpvMatchOffset = offset;
    tctxt->next_mpv_offset = offset;

    scratch->catchup_pq.qm_size = 0;
    scratch->al_log_sum = 0; /* clear the anchored logs */

    fatbit_clear(scratch->aqa);
}
示例#9
0
文件: stream.c 项目: 01org/hyperscan
void roseStreamExec(const struct RoseEngine *t, struct hs_scratch *scratch) {
    DEBUG_PRINTF("OH HAI [%llu, %llu)\n", scratch->core_info.buf_offset,
                 scratch->core_info.buf_offset + (u64a)scratch->core_info.len);
    assert(t);
    assert(scratch->core_info.hbuf);
    assert(scratch->core_info.buf);

    // We should not have been called if we've already been told to terminate
    // matching.
    assert(!told_to_stop_matching(scratch));

    assert(mmbit_sparse_iter_state_size(t->rolesWithStateCount)
           < MAX_SPARSE_ITER_STATES);

    size_t length = scratch->core_info.len;
    u64a offset = scratch->core_info.buf_offset;

    // We may have a maximum width (for engines constructed entirely
    // of bi-anchored patterns). If this write would result in us progressing
    // beyond this point, we cannot possibly match.
    if (t->maxBiAnchoredWidth != ROSE_BOUND_INF
        && offset + length > t->maxBiAnchoredWidth) {
        DEBUG_PRINTF("bailing, write would progress beyond maxBAWidth\n");
        return;
    }

    char *state = scratch->core_info.state;

    struct RoseContext *tctxt = &scratch->tctxt;
    tctxt->mpv_inactive = 0;
    tctxt->groups = loadGroups(t, state);
    tctxt->lit_offset_adjust = offset + 1; // index after last byte
    tctxt->delayLastEndOffset = offset;
    tctxt->lastEndOffset = offset;
    tctxt->filledDelayedSlots = 0;
    tctxt->lastMatchOffset = 0;
    tctxt->minMatchOffset = offset;
    tctxt->minNonMpvMatchOffset = offset;
    tctxt->next_mpv_offset = 0;
    DEBUG_PRINTF("BEGIN: history len=%zu, buffer len=%zu groups=%016llx\n",
                 scratch->core_info.hlen, scratch->core_info.len, tctxt->groups);

    fatbit_clear(scratch->aqa);
    scratch->al_log_sum = 0;
    scratch->catchup_pq.qm_size = 0;

    if (t->outfixBeginQueue != t->outfixEndQueue) {
        streamInitSufPQ(t, state, scratch);
    }

    runEagerPrefixesStream(t, scratch);

    u32 alen = t->anchoredDistance > offset ?
        MIN(length + offset, t->anchoredDistance) - offset : 0;

    const struct anchored_matcher_info *atable = getALiteralMatcher(t);
    if (atable && alen) {
        DEBUG_PRINTF("BEGIN ANCHORED %zu/%u\n", scratch->core_info.hlen, alen);
        runAnchoredTableStream(t, atable, alen, offset, scratch);

        if (can_stop_matching(scratch)) {
            goto exit;
        }
    }

    const struct HWLM *ftable = getFLiteralMatcher(t);
    if (ftable) {
        if (t->noFloatingRoots && !roseHasInFlightMatches(t, state, scratch)) {
            DEBUG_PRINTF("skip FLOATING: no inflight matches\n");
            goto flush_delay_and_exit;
        }

        size_t flen = length;
        if (t->floatingDistance != ROSE_BOUND_INF) {
            flen = t->floatingDistance > offset ?
                MIN(t->floatingDistance, length + offset) - offset : 0;
        }

        size_t hlength = scratch->core_info.hlen;

        char rebuild = hlength &&
                       (scratch->core_info.status & STATUS_DELAY_DIRTY) &&
                       (t->maxFloatingDelayedMatch == ROSE_BOUND_INF ||
                        offset < t->maxFloatingDelayedMatch);
        DEBUG_PRINTF("**rebuild %hhd status %hhu mfdm %u, offset %llu\n",
                     rebuild, scratch->core_info.status,
                     t->maxFloatingDelayedMatch, offset);

        if (!flen) {
            if (rebuild) { /* rebuild floating delayed match stuff */
                do_rebuild(t, ftable, scratch);
            }
            goto flush_delay_and_exit;
        }

        if (rebuild) { /* rebuild floating delayed match stuff */
            do_rebuild(t, ftable, scratch);
        }

        if (flen + offset <= t->floatingMinDistance) {
            DEBUG_PRINTF("skip FLOATING: before floating min\n");
            goto flush_delay_and_exit;
        }

        size_t start = 0;
        if (offset < t->floatingMinDistance) {
            // This scan crosses the floating min distance, so we can use that
            // to set HWLM's "start" offset.
            start = t->floatingMinDistance - offset;
        }
        DEBUG_PRINTF("start=%zu\n", start);

        u8 *stream_state;
        if (t->floatingStreamState) {
            stream_state = getFloatingMatcherState(t, state);
        } else {
            stream_state = NULL;
        }

        DEBUG_PRINTF("BEGIN FLOATING (over %zu/%zu)\n", flen, length);
        hwlmExecStreaming(ftable, scratch, flen, start, roseFloatingCallback,
                          scratch, tctxt->groups & t->floating_group_mask,
                          stream_state);
    }

flush_delay_and_exit:
    DEBUG_PRINTF("flushing floating\n");
    if (cleanUpDelayed(t, scratch, length, offset) == HWLM_TERMINATE_MATCHING) {
        return;
    }

exit:
    DEBUG_PRINTF("CLEAN UP TIME\n");
    if (!can_stop_matching(scratch)) {
        ensureStreamNeatAndTidy(t, state, scratch, length, offset);
    }
    DEBUG_PRINTF("DONE STREAMING SCAN, status = %u\n",
                 scratch->core_info.status);
    return;
}
示例#10
0
文件: eod.c 项目: 0x4e38/hyperscan
static rose_inline
int roseEodRunIterator(const struct RoseEngine *t, u8 *state, u64a offset,
                       struct hs_scratch *scratch) {
    if (!t->eodIterOffset) {
        return MO_CONTINUE_MATCHING;
    }

    const struct RoseRole *roleTable = getRoleTable(t);
    const struct RosePred *predTable = getPredTable(t);
    const struct RoseIterMapping *iterMapBase
        = getByOffset(t, t->eodIterMapOffset);
    const struct mmbit_sparse_iter *it = getByOffset(t, t->eodIterOffset);
    assert(ISALIGNED(iterMapBase));
    assert(ISALIGNED(it));

    // Sparse iterator state was allocated earlier
    struct mmbit_sparse_state *s = scratch->sparse_iter_state;
    struct fatbit *handled_roles = scratch->handled_roles;

    const u32 numStates = t->rolesWithStateCount;

    void *role_state = getRoleState(state);
    u32 idx = 0;
    u32 i = mmbit_sparse_iter_begin(role_state, numStates, &idx, it, s);

    fatbit_clear(handled_roles);

    for (; i != MMB_INVALID;
           i = mmbit_sparse_iter_next(role_state, numStates, i, &idx, it, s)) {
        DEBUG_PRINTF("pred state %u (iter idx=%u) is on\n", i, idx);
        const struct RoseIterMapping *iterMap = iterMapBase + idx;
        const struct RoseIterRole *roles = getByOffset(t, iterMap->offset);
        assert(ISALIGNED(roles));

        DEBUG_PRINTF("%u roles to consider\n", iterMap->count);
        for (u32 j = 0; j != iterMap->count; j++) {
            u32 role = roles[j].role;
            assert(role < t->roleCount);
            DEBUG_PRINTF("checking role %u, pred %u:\n", role, roles[j].pred);
            const struct RoseRole *tr = roleTable + role;

            if (fatbit_isset(handled_roles, t->roleCount, role)) {
                DEBUG_PRINTF("role %u already handled by the walk, skip\n",
                             role);
                continue;
            }

            // Special case: if this role is a trivial case (pred type simple)
            // we don't need to check any history and we already know the pred
            // role is on.
            if (tr->flags & ROSE_ROLE_PRED_SIMPLE) {
                DEBUG_PRINTF("pred type is simple, no need for checks\n");
            } else {
                assert(roles[j].pred < t->predCount);
                const struct RosePred *tp = predTable + roles[j].pred;
                if (!roseCheckPredHistory(tp, offset)) {
                    continue;
                }
            }

            /* mark role as handled so we don't touch it again in this walk */
            fatbit_set(handled_roles, t->roleCount, role);

            DEBUG_PRINTF("fire report for role %u, report=%u\n", role,
                         tr->reportId);
            int rv = scratch->tctxt.cb(offset, tr->reportId,
                                       scratch->tctxt.userCtx);
            if (rv == MO_HALT_MATCHING) {
                return MO_HALT_MATCHING;
            }
        }
    }

    return MO_CONTINUE_MATCHING;
}
示例#11
0
static really_inline
int roseSomAdaptor_i(u64a from_offset, u64a to_offset, ReportID id,
                     void *context, char is_simple) {
    assert(id != MO_INVALID_IDX); // Should never get an invalid ID.

    u32 flags = 0;

    struct hs_scratch *scratch = (struct hs_scratch *)context;
    struct core_info *ci = &scratch->core_info;
    const struct RoseEngine *rose = ci->rose;
    const struct internal_report *ri = getInternalReport(rose, id);

    /* internal events should be handled by rose directly */
    assert(ri->type == EXTERNAL_CALLBACK);

    DEBUG_PRINTF("internal match at %llu: IID=%u type=%hhu RID=%u "
                 "offsetAdj=%d\n", to_offset, id, ri->type, ri->onmatch,
                 ri->offsetAdjust);

    if (unlikely(can_stop_matching(scratch))) {
        DEBUG_PRINTF("pre broken - halting\n");
        return MO_HALT_MATCHING;
    }

    if (!is_simple && ri->hasBounds) {
        assert(ri->minOffset || ri->minLength || ri->maxOffset < MAX_OFFSET);
        if (to_offset < ri->minOffset || to_offset > ri->maxOffset) {
            DEBUG_PRINTF("match fell outside valid range %llu !: [%llu,%llu]\n",
                         to_offset, ri->minOffset, ri->maxOffset);
            return MO_CONTINUE_MATCHING;
        }
    }

    int halt = 0;

    if (!is_simple && unlikely(isExhausted(ci->exhaustionVector, ri->ekey))) {
        DEBUG_PRINTF("ate exhausted match\n");
        goto do_return;
    }

#ifdef DEDUPE_MATCHES
    u64a offset = to_offset;
#endif

    to_offset += ri->offsetAdjust;
    assert(from_offset == HS_OFFSET_PAST_HORIZON || from_offset <= to_offset);

    if (!is_simple && ri->minLength) {
        if (from_offset != HS_OFFSET_PAST_HORIZON &&
                (to_offset - from_offset < ri->minLength)) {
            return MO_CONTINUE_MATCHING;
        }
        if (ri->quashSom) {
            from_offset = 0;
        }
    }

    DEBUG_PRINTF(">> reporting match @[%llu,%llu] for sig %u ctxt %p <<\n",
                 from_offset, to_offset, ri->onmatch, ci->userContext);

#ifndef RELEASE_BUILD
    if (ri->offsetAdjust != 0) {
        // alert testing tools that we've got adjusted matches
        flags |= HS_MATCH_FLAG_ADJUSTED;
    }
#endif

#ifdef DEDUPE_MATCHES
    u32 dkeyCount = rose->dkeyCount;

    if (offset != scratch->deduper.current_report_offset) {

        assert(scratch->deduper.current_report_offset == ~0ULL
               || scratch->deduper.current_report_offset < offset);
        if (offset == scratch->deduper.current_report_offset + 1) {
            fatbit_clear(scratch->deduper.log[offset % 2]);
        } else {
            fatbit_clear(scratch->deduper.log[0]);
            fatbit_clear(scratch->deduper.log[1]);
        }

        halt = flushStoredSomMatches(scratch, offset);
        if (halt) {
            goto do_return;
        }

        scratch->deduper.current_report_offset = offset;
    }

    u32 dkey = ri->dkey;
    if (dkey != MO_INVALID_IDX) {
        if (ri->quashSom) {
            DEBUG_PRINTF("checking dkey %u at offset %llu\n", dkey, to_offset);
            assert(ri->offsetAdjust == 0 || ri->offsetAdjust == -1);
            if (fatbit_set(scratch->deduper.log[to_offset % 2], dkeyCount,
                           dkey)) {
                /* we have already raised this report at this offset, squash
                 * dupe match. */
                DEBUG_PRINTF("dedupe\n");
                goto do_return;
            }
        } else {
            /* SOM external event */
            DEBUG_PRINTF("checking dkey %u at offset %llu\n", dkey, to_offset);
            assert(ri->offsetAdjust == 0 || ri->offsetAdjust == -1);
            u64a *starts = scratch->deduper.som_start_log[to_offset % 2];
            if (fatbit_set(scratch->deduper.som_log[to_offset % 2], dkeyCount,
                           dkey)) {
                starts[dkey] = MIN(starts[dkey], from_offset);
            } else {
                starts[dkey] = from_offset;
            }

            if (ri->offsetAdjust) {
                scratch->deduper.som_log_dirty |= 1;
            } else {
                scratch->deduper.som_log_dirty |= 2;
            }

            goto do_return;
        }
    }
#endif

    halt = ci->userCallback((unsigned int)ri->onmatch, from_offset, to_offset,
                            flags, ci->userContext);

    if (!is_simple) {
        markAsMatched(ci->exhaustionVector, ri->ekey);
    }

do_return:
    if (halt) {
        DEBUG_PRINTF("callback requested to terminate matches\n");

        setBroken(ci->state, BROKEN_FROM_USER);
        ci->broken = BROKEN_FROM_USER;

        return MO_HALT_MATCHING;
    }

    return MO_CONTINUE_MATCHING;
}
示例#12
0
void handleSomInternal(struct hs_scratch *scratch,
                       const struct som_operation *ri, const u64a to_offset) {
    assert(scratch);
    assert(ri);
    DEBUG_PRINTF("-->som action required at %llu\n", to_offset);

    // SOM handling at scan time operates on data held in scratch. In
    // streaming mode, this data is read from / written out to stream state at
    // stream write boundaries.

    struct core_info *ci = &scratch->core_info;
    const struct RoseEngine *rose = ci->rose;
    assert(rose->hasSom);

    const u32 som_store_count = rose->somLocationCount;
    u8 *som_store_valid = (u8 *)ci->state + rose->stateOffsets.somValid;
    u8 *som_store_writable = (u8 *)ci->state + rose->stateOffsets.somWritable;
    struct fatbit *som_set_now = scratch->som_set_now;
    struct fatbit *som_attempted_set = scratch->som_attempted_set;
    u64a *som_store = scratch->som_store;
    u64a *som_failed_store = scratch->som_attempted_store;

    if (to_offset != scratch->som_set_now_offset) {
        assert(scratch->som_set_now_offset == ~0ULL
               || to_offset > scratch->som_set_now_offset);
        DEBUG_PRINTF("setting som_set_now_offset=%llu\n", to_offset);
        fatbit_clear(som_set_now);
        fatbit_clear(som_attempted_set);
        scratch->som_set_now_offset = to_offset;
    }

    switch (ri->type) {
    case SOM_INTERNAL_LOC_SET:
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET\n");
        mmbit_set(som_store_valid, som_store_count, ri->onmatch);
        setSomLoc(som_set_now, som_store, som_store_count, ri, to_offset);
        return;
    case SOM_INTERNAL_LOC_SET_IF_UNSET:
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_IF_UNSET\n");
        if (ok_and_mark_if_unset(som_store_valid, som_set_now, som_store_count,
                                 ri->onmatch)) {
            setSomLoc(som_set_now, som_store, som_store_count, ri, to_offset);
        }
        return;
    case SOM_INTERNAL_LOC_SET_IF_WRITABLE: {
        u32 slot = ri->onmatch;
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_IF_WRITABLE\n");
        if (ok_and_mark_if_write(som_store_valid, som_set_now,
                                 som_store_writable, som_store_count, slot)) {
            setSomLoc(som_set_now, som_store, som_store_count, ri, to_offset);
            mmbit_unset(som_store_writable, som_store_count, slot);
        } else {
            /* not writable, stash as an attempted write in case we are
             * racing our escape. */
            DEBUG_PRINTF("not writable, stashing attempt\n");
            assert(to_offset >= ri->aux.somDistance);
            u64a start_offset = to_offset - ri->aux.somDistance;

            if (!fatbit_set(som_attempted_set, som_store_count, slot)) {
                som_failed_store[slot] = start_offset;
            } else {
                LIMIT_TO_AT_MOST(&som_failed_store[slot], start_offset);
            }
            DEBUG_PRINTF("som_failed_store[%u] = %llu\n", slot,
                         som_failed_store[slot]);
        }
        return;
    }
    case SOM_INTERNAL_LOC_SET_REV_NFA:
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_REV_NFA\n");
        mmbit_set(som_store_valid, som_store_count, ri->onmatch);
        setSomLocRevNfa(scratch, som_set_now, som_store, som_store_count, ri,
                        to_offset);
        return;
    case SOM_INTERNAL_LOC_SET_REV_NFA_IF_UNSET:
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_REV_NFA_IF_UNSET\n");
        if (ok_and_mark_if_unset(som_store_valid, som_set_now, som_store_count,
                                 ri->onmatch)) {
            setSomLocRevNfa(scratch, som_set_now, som_store, som_store_count,
                            ri, to_offset);
        }
        return;
    case SOM_INTERNAL_LOC_SET_REV_NFA_IF_WRITABLE: {
        u32 slot = ri->onmatch;
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_IF_WRITABLE\n");
        if (ok_and_mark_if_write(som_store_valid, som_set_now,
                                 som_store_writable, som_store_count, slot)) {
            setSomLocRevNfa(scratch, som_set_now, som_store, som_store_count,
                            ri, to_offset);
            mmbit_unset(som_store_writable, som_store_count, slot);
        } else {
            /* not writable, stash as an attempted write in case we are
             * racing our escape. */
            DEBUG_PRINTF("not writable, stashing attempt\n");

            u64a from_offset = 0;
            runRevNfa(scratch, ri, to_offset, &from_offset);

            if (!fatbit_set(som_attempted_set, som_store_count, slot)) {
                som_failed_store[slot] = from_offset;
            } else {
                LIMIT_TO_AT_MOST(&som_failed_store[slot], from_offset);
            }
            DEBUG_PRINTF("som_failed_store[%u] = %llu\n", slot,
                         som_failed_store[slot]);
        }
        return;
    }
    case SOM_INTERNAL_LOC_COPY: {
        u32 slot_in = ri->aux.somDistance;
        u32 slot_out = ri->onmatch;
        DEBUG_PRINTF("SOM_INTERNAL_LOC_COPY S[%u] = S[%u]\n", slot_out,
                     slot_in);
        assert(mmbit_isset(som_store_valid, som_store_count, slot_in));
        mmbit_set(som_store_valid, som_store_count, slot_out);
        fatbit_set(som_set_now, som_store_count, slot_out);
        som_store[slot_out] = som_store[slot_in];

        return;
    }
    case SOM_INTERNAL_LOC_COPY_IF_WRITABLE: {
        u32 slot_in = ri->aux.somDistance;
        u32 slot_out = ri->onmatch;
        DEBUG_PRINTF("SOM_INTERNAL_LOC_COPY_IF_WRITABLE S[%u] = S[%u]\n",
                     slot_out, slot_in);
        assert(mmbit_isset(som_store_valid, som_store_count, slot_in));
        if (ok_and_mark_if_write(som_store_valid, som_set_now,
                                 som_store_writable, som_store_count,
                                 slot_out)) {
            DEBUG_PRINTF("copy, set som_store[%u]=%llu\n", slot_out,
                         som_store[slot_in]);
            som_store[slot_out] = som_store[slot_in];
            fatbit_set(som_set_now, som_store_count, slot_out);
            mmbit_unset(som_store_writable, som_store_count, slot_out);
        } else {
            /* not writable, stash as an attempted write in case we are
             * racing our escape */
            DEBUG_PRINTF("not writable, stashing attempt\n");
            fatbit_set(som_attempted_set, som_store_count, slot_out);
            som_failed_store[slot_out] = som_store[slot_in];
            DEBUG_PRINTF("som_failed_store[%u] = %llu\n", slot_out,
                         som_failed_store[slot_out]);
        }
        return;
    }
    case SOM_INTERNAL_LOC_MAKE_WRITABLE: {
        u32 slot = ri->onmatch;
        DEBUG_PRINTF("SOM_INTERNAL_LOC_MAKE_WRITABLE\n");
        /* if just written to the loc, ignore the racing escape */
        if (fatbit_isset(som_set_now, som_store_count, slot)) {
            DEBUG_PRINTF("just written\n");
            return;
        }
        if (fatbit_isset(som_attempted_set, som_store_count, slot)) {
            /* writes were waiting for an escape to arrive */
            DEBUG_PRINTF("setting som_store[%u] = %llu from "
                         "som_failed_store[%u]\n", slot, som_failed_store[slot],
                         slot);
            som_store[slot] = som_failed_store[slot];
            fatbit_set(som_set_now, som_store_count, slot);
            return;
        }
        mmbit_set(som_store_writable, som_store_count, slot);
        return;
    }
    default:
        DEBUG_PRINTF("unknown report type!\n");
        break;
    }

    // All valid som_operation types should be handled and returned above.
    assert(0);
    return;
}