Exemplo n.º 1
0
static really_inline
char ok_and_mark_if_unset(u8 *som_store_valid, struct fatbit *som_set_now,
                          u32 som_store_count, u32 loc) {
    return !mmbit_set(som_store_valid, som_store_count, loc) /* unwritten */
        || fatbit_isset(som_set_now, som_store_count, loc); /* write here, need
                                                            * to resolve race */
}
Exemplo n.º 2
0
// Saves out stream state for all our active suffix NFAs.
static rose_inline
void roseSaveNfaStreamState(const struct RoseEngine *t, char *state,
                            struct hs_scratch *scratch) {
    struct mq *queues = scratch->queues;
    u8 *aa = getActiveLeafArray(t, state);
    u32 aaCount = t->activeArrayCount;

    if (scratch->tctxt.mpv_inactive) {
        DEBUG_PRINTF("mpv is dead as a doornail\n");
        /* mpv if it exists is queue 0 */
        mmbit_unset(aa, aaCount, 0);
    }

    for (u32 qi = mmbit_iterate(aa, aaCount, MMB_INVALID); qi != MMB_INVALID;
         qi = mmbit_iterate(aa, aaCount, qi)) {
        DEBUG_PRINTF("saving stream state for qi=%u\n", qi);

        struct mq *q = queues + qi;

        // If it's active, it should have an active queue (as we should have
        // done some work!)
        assert(fatbit_isset(scratch->aqa, t->queueCount, qi));

        const struct NFA *nfa = getNfaByQueue(t, qi);
        saveStreamState(nfa, q, q_cur_loc(q));
    }
}
Exemplo n.º 3
0
static rose_inline
void roseCheckEodSuffixes(const struct RoseEngine *t, u8 *state, u64a offset,
                          struct hs_scratch *scratch) {
    const u8 *aa = getActiveLeafArray(t, state);
    const u32 aaCount = t->activeArrayCount;
    UNUSED u32 qCount = t->queueCount;

    for (u32 qi = mmbit_iterate(aa, aaCount, MMB_INVALID); qi != MMB_INVALID;
         qi = mmbit_iterate(aa, aaCount, qi)) {
        const struct NfaInfo *info = getNfaInfoByQueue(t, qi);
        const struct NFA *nfa = getNfaByInfo(t, info);

        assert(nfaAcceptsEod(nfa));

        DEBUG_PRINTF("checking nfa %u\n", qi);

        assert(fatbit_isset(scratch->aqa, qCount, qi)); /* we have just been
                                                           triggered */

        char *fstate = scratch->fullState + info->fullStateOffset;
        const char *sstate = (const char *)state + info->stateOffset;

        struct mq *q = scratch->queues + qi;

        pushQueueNoMerge(q, MQE_END, scratch->core_info.len);

        q->context = NULL;
        /* rose exec is used as we don't want to / can't raise matches in the
         * history buffer. */
        char rv = nfaQueueExecRose(q->nfa, q, MO_INVALID_IDX);
        if (rv) { /* nfa is still alive */
            nfaCheckFinalState(nfa, fstate, sstate, offset, scratch->tctxt.cb,
                               scratch->tctxt.cb_som, scratch->tctxt.userCtx);
        }
    }
}
Exemplo n.º 4
0
static rose_inline
void runEagerPrefixesStream(const struct RoseEngine *t,
                            struct hs_scratch *scratch) {
    if (!t->eagerIterOffset
        || scratch->core_info.buf_offset >= EAGER_STOP_OFFSET) {
        return;
    }

    char *state = scratch->core_info.state;
    u8 *ara = getActiveLeftArray(t, state); /* indexed by offsets into
                                             * left_table */
    const u32 arCount = t->activeLeftCount;
    const u32 qCount = t->queueCount;
    const struct LeftNfaInfo *left_table = getLeftTable(t);
    const struct mmbit_sparse_iter *it = getByOffset(t, t->eagerIterOffset);

    struct mmbit_sparse_state si_state[MAX_SPARSE_ITER_STATES];

    u32 idx = 0;
    u32 ri = mmbit_sparse_iter_begin(ara, arCount, &idx, it, si_state);
    for (; ri != MMB_INVALID;
           ri = mmbit_sparse_iter_next(ara, arCount, ri, &idx, it, si_state)) {
        const struct LeftNfaInfo *left = left_table + ri;
        u32 qi = ri + t->leftfixBeginQueue;
        DEBUG_PRINTF("leftfix %u of %u, maxLag=%u\n", ri, arCount, left->maxLag);

        assert(!fatbit_isset(scratch->aqa, qCount, qi));
        assert(left->eager);
        assert(!left->infix);

        struct mq *q = scratch->queues + qi;
        const struct NFA *nfa = getNfaByQueue(t, qi);
        s64a loc = MIN(scratch->core_info.len,
                       EAGER_STOP_OFFSET - scratch->core_info.buf_offset);

        fatbit_set(scratch->aqa, qCount, qi);
        initRoseQueue(t, qi, left, scratch);

        if (scratch->core_info.buf_offset) {
            s64a sp = left->transient ? -(s64a)scratch->core_info.hlen
                                      : -(s64a)loadRoseDelay(t, state, left);
            pushQueueAt(q, 0, MQE_START, sp);
            if (scratch->core_info.buf_offset + sp > 0) {
                loadStreamState(nfa, q, sp);
                /* if the leftfix fix is currently in a match state, we cannot
                 * advance it. */
                if (nfaInAnyAcceptState(nfa, q)) {
                    continue;
                }
                pushQueueAt(q, 1, MQE_END, loc);
            } else {
                pushQueueAt(q, 1, MQE_TOP, sp);
                pushQueueAt(q, 2, MQE_END, loc);
                nfaQueueInitState(q->nfa, q);
            }
        } else {
            pushQueueAt(q, 0, MQE_START, 0);
            pushQueueAt(q, 1, MQE_TOP, 0);
            pushQueueAt(q, 2, MQE_END, loc);
            nfaQueueInitState(nfa, q);
        }

        char alive = nfaQueueExecToMatch(q->nfa, q, loc);

        if (!alive) {
            DEBUG_PRINTF("queue %u dead, squashing\n", qi);
            mmbit_unset(ara, arCount, ri);
            fatbit_unset(scratch->aqa, qCount, qi);
            scratch->tctxt.groups &= left->squash_mask;
        } else if (q->cur == q->end) {
            assert(alive != MO_MATCHES_PENDING);
            /* unlike in block mode we cannot squash groups if there is no match
             * in this block as we need the groups on for later stream writes */
            /* TODO: investigate possibility of a method to suppress groups for
             * a single stream block. */
            DEBUG_PRINTF("queue %u finished, nfa lives\n", qi);
            q->cur = q->end = 0;
            pushQueueAt(q, 0, MQE_START, loc);
        } else {
            assert(alive == MO_MATCHES_PENDING);
            DEBUG_PRINTF("queue %u unfinished, nfa lives\n", qi);
            q->end--; /* remove end item */
        }
    }
}
Exemplo n.º 5
0
static really_inline
char roseCatchUpLeftfix(const struct RoseEngine *t, char *state,
                        struct hs_scratch *scratch, u32 qi,
                        const struct LeftNfaInfo *left) {
    assert(!left->transient); // active roses only

    struct core_info *ci = &scratch->core_info;
    const u32 qCount = t->queueCount;
    struct mq *q = scratch->queues + qi;
    const struct NFA *nfa = getNfaByQueue(t, qi);

    if (nfaSupportsZombie(nfa)
        && ci->buf_offset /* prefix can be alive with no q */
        && !fatbit_isset(scratch->aqa, qCount, qi)
        && isZombie(t, state, left)) {
        DEBUG_PRINTF("yawn - zombie\n");
        return 1;
    }

    if (left->stopTable) {
        enum MiracleAction mrv =
            roseScanForMiracles(t, state, scratch, qi, left, nfa);
        switch (mrv) {
        case MIRACLE_DEAD:
            return 0;
        case MIRACLE_SAVED:
            return 1;
        default:
            assert(mrv == MIRACLE_CONTINUE);
            break;
        }
    }

    if (!fatbit_set(scratch->aqa, qCount, qi)) {
        initRoseQueue(t, qi, left, scratch);

        s32 sp;
        if (ci->buf_offset) {
            sp = -(s32)loadRoseDelay(t, state, left);
        } else {
            sp = 0;
        }

        DEBUG_PRINTF("ci->len=%zu, sp=%d, historyRequired=%u\n", ci->len, sp,
                     t->historyRequired);

        if ( ci->len - sp + 1 < t->historyRequired) {
            // we'll end up safely in the history region.
            DEBUG_PRINTF("safely in history, skipping\n");
            storeRoseDelay(t, state, left, (s64a)ci->len - sp);
            return 1;
        }

        pushQueueAt(q, 0, MQE_START, sp);
        if (left->infix || ci->buf_offset + sp > 0) {
            loadStreamState(nfa, q, sp);
        } else {
            pushQueueAt(q, 1, MQE_TOP, sp);
            nfaQueueInitState(nfa, q);
        }
    } else {
        DEBUG_PRINTF("queue already active\n");
        if (q->end - q->cur == 1 && q_cur_type(q) == MQE_START) {
            DEBUG_PRINTF("empty queue, start loc=%lld\n", q_cur_loc(q));
            s64a last_loc = q_cur_loc(q);
            if (ci->len - last_loc + 1 < t->historyRequired) {
                // we'll end up safely in the history region.
                DEBUG_PRINTF("safely in history, saving state and skipping\n");
                saveStreamState(nfa, q, last_loc);
                storeRoseDelay(t, state, left, (s64a)ci->len - last_loc);
                return 1;
            }
        }
    }

    // Determine whether the byte before last_loc will be in the history
    // buffer on the next stream write.
    s64a last_loc = q_last_loc(q);
    s64a leftovers = ci->len - last_loc;
    if (leftovers + 1 >= t->historyRequired) {
        u32 catchup_offset = left->maxLag ? left->maxLag - 1 : 0;
        last_loc = (s64a)ci->len - catchup_offset;
    }

    if (left->infix) {
        if (infixTooOld(q, last_loc)) {
            DEBUG_PRINTF("infix died of old age\n");
            return 0;
        }
        reduceInfixQueue(q, last_loc, left->maxQueueLen, q->nfa->maxWidth);
    }

    DEBUG_PRINTF("end scan at %lld\n", last_loc);
    pushQueueNoMerge(q, MQE_END, last_loc);

#ifdef DEBUG
    debugQueue(q);
#endif

    char rv = nfaQueueExecRose(nfa, q, MO_INVALID_IDX);
    if (!rv) { /* nfa is dead */
        DEBUG_PRINTF("died catching up to stream boundary\n");
        return 0;
    } else {
        DEBUG_PRINTF("alive, saving stream state\n");
        if (nfaSupportsZombie(nfa) &&
            nfaGetZombieStatus(nfa, q, last_loc) == NFA_ZOMBIE_ALWAYS_YES) {
            DEBUG_PRINTF("not so fast - zombie\n");
            setAsZombie(t, state, left);
        } else {
            saveStreamState(nfa, q, last_loc);
            storeRoseDelay(t, state, left, (s64a)ci->len - last_loc);
        }
    }

    return 1;
}
Exemplo n.º 6
0
static really_inline
enum MiracleAction roseScanForMiracles(const struct RoseEngine *t, char *state,
                                       struct hs_scratch *scratch, u32 qi,
                                       const struct LeftNfaInfo *left,
                                       const struct NFA *nfa) {
    struct core_info *ci = &scratch->core_info;
    const u32 qCount = t->queueCount;
    struct mq *q = scratch->queues + qi;

    const char q_active = fatbit_isset(scratch->aqa, qCount, qi);
    DEBUG_PRINTF("q_active=%d\n", q_active);

    const s64a begin_loc = q_active ? q_cur_loc(q) : 0;
    const s64a end_loc = ci->len;

    s64a miracle_loc;
    if (roseMiracleOccurs(t, left, ci, begin_loc, end_loc, &miracle_loc)) {
        goto found_miracle;
    }

    if (roseCountingMiracleOccurs(t, left, ci, begin_loc, end_loc,
                                  &miracle_loc)) {
        goto found_miracle;
    }

    DEBUG_PRINTF("no miracle\n");
    return MIRACLE_CONTINUE;

found_miracle:
    DEBUG_PRINTF("miracle at %lld\n", miracle_loc);

    if (left->infix) {
        if (!q_active) {
            DEBUG_PRINTF("killing infix\n");
            return MIRACLE_DEAD;
        }

        DEBUG_PRINTF("skip q forward, %lld to %lld\n", begin_loc, miracle_loc);
        q_skip_forward_to(q, miracle_loc);
        if (q_last_type(q) == MQE_START) {
            DEBUG_PRINTF("miracle caused infix to die\n");
            return MIRACLE_DEAD;
        }

        DEBUG_PRINTF("re-init infix state\n");
        assert(q->items[q->cur].type == MQE_START);
        q->items[q->cur].location = miracle_loc;
        nfaQueueInitState(q->nfa, q);
    } else {
        if (miracle_loc > end_loc - t->historyRequired) {
            char *streamState = state + getNfaInfoByQueue(t, qi)->stateOffset;
            u64a offset = ci->buf_offset + miracle_loc;
            u8 key = offset ? getByteBefore(ci, miracle_loc) : 0;
            DEBUG_PRINTF("init state, key=0x%02x, offset=%llu\n", key, offset);
            if (!nfaInitCompressedState(nfa, offset, streamState, key)) {
                return MIRACLE_DEAD;
            }
            storeRoseDelay(t, state, left, (s64a)ci->len - miracle_loc);
            return MIRACLE_SAVED;
        }

        DEBUG_PRINTF("re-init prefix (skip %lld->%lld)\n", begin_loc,
                     miracle_loc);
        if (!q_active) {
            fatbit_set(scratch->aqa, qCount, qi);
            initRoseQueue(t, qi, left, scratch);
        }
        q->cur = q->end = 0;
        pushQueueAt(q, 0, MQE_START, miracle_loc);
        pushQueueAt(q, 1, MQE_TOP, miracle_loc);
        nfaQueueInitState(q->nfa, q);
    }

    return MIRACLE_CONTINUE;
}
Exemplo n.º 7
0
static rose_inline
int roseEodRunIterator(const struct RoseEngine *t, u8 *state, u64a offset,
                       struct hs_scratch *scratch) {
    if (!t->eodIterOffset) {
        return MO_CONTINUE_MATCHING;
    }

    const struct RoseRole *roleTable = getRoleTable(t);
    const struct RosePred *predTable = getPredTable(t);
    const struct RoseIterMapping *iterMapBase
        = getByOffset(t, t->eodIterMapOffset);
    const struct mmbit_sparse_iter *it = getByOffset(t, t->eodIterOffset);
    assert(ISALIGNED(iterMapBase));
    assert(ISALIGNED(it));

    // Sparse iterator state was allocated earlier
    struct mmbit_sparse_state *s = scratch->sparse_iter_state;
    struct fatbit *handled_roles = scratch->handled_roles;

    const u32 numStates = t->rolesWithStateCount;

    void *role_state = getRoleState(state);
    u32 idx = 0;
    u32 i = mmbit_sparse_iter_begin(role_state, numStates, &idx, it, s);

    fatbit_clear(handled_roles);

    for (; i != MMB_INVALID;
           i = mmbit_sparse_iter_next(role_state, numStates, i, &idx, it, s)) {
        DEBUG_PRINTF("pred state %u (iter idx=%u) is on\n", i, idx);
        const struct RoseIterMapping *iterMap = iterMapBase + idx;
        const struct RoseIterRole *roles = getByOffset(t, iterMap->offset);
        assert(ISALIGNED(roles));

        DEBUG_PRINTF("%u roles to consider\n", iterMap->count);
        for (u32 j = 0; j != iterMap->count; j++) {
            u32 role = roles[j].role;
            assert(role < t->roleCount);
            DEBUG_PRINTF("checking role %u, pred %u:\n", role, roles[j].pred);
            const struct RoseRole *tr = roleTable + role;

            if (fatbit_isset(handled_roles, t->roleCount, role)) {
                DEBUG_PRINTF("role %u already handled by the walk, skip\n",
                             role);
                continue;
            }

            // Special case: if this role is a trivial case (pred type simple)
            // we don't need to check any history and we already know the pred
            // role is on.
            if (tr->flags & ROSE_ROLE_PRED_SIMPLE) {
                DEBUG_PRINTF("pred type is simple, no need for checks\n");
            } else {
                assert(roles[j].pred < t->predCount);
                const struct RosePred *tp = predTable + roles[j].pred;
                if (!roseCheckPredHistory(tp, offset)) {
                    continue;
                }
            }

            /* mark role as handled so we don't touch it again in this walk */
            fatbit_set(handled_roles, t->roleCount, role);

            DEBUG_PRINTF("fire report for role %u, report=%u\n", role,
                         tr->reportId);
            int rv = scratch->tctxt.cb(offset, tr->reportId,
                                       scratch->tctxt.userCtx);
            if (rv == MO_HALT_MATCHING) {
                return MO_HALT_MATCHING;
            }
        }
    }

    return MO_CONTINUE_MATCHING;
}
Exemplo n.º 8
0
void handleSomInternal(struct hs_scratch *scratch,
                       const struct som_operation *ri, const u64a to_offset) {
    assert(scratch);
    assert(ri);
    DEBUG_PRINTF("-->som action required at %llu\n", to_offset);

    // SOM handling at scan time operates on data held in scratch. In
    // streaming mode, this data is read from / written out to stream state at
    // stream write boundaries.

    struct core_info *ci = &scratch->core_info;
    const struct RoseEngine *rose = ci->rose;
    assert(rose->hasSom);

    const u32 som_store_count = rose->somLocationCount;
    u8 *som_store_valid = (u8 *)ci->state + rose->stateOffsets.somValid;
    u8 *som_store_writable = (u8 *)ci->state + rose->stateOffsets.somWritable;
    struct fatbit *som_set_now = scratch->som_set_now;
    struct fatbit *som_attempted_set = scratch->som_attempted_set;
    u64a *som_store = scratch->som_store;
    u64a *som_failed_store = scratch->som_attempted_store;

    if (to_offset != scratch->som_set_now_offset) {
        assert(scratch->som_set_now_offset == ~0ULL
               || to_offset > scratch->som_set_now_offset);
        DEBUG_PRINTF("setting som_set_now_offset=%llu\n", to_offset);
        fatbit_clear(som_set_now);
        fatbit_clear(som_attempted_set);
        scratch->som_set_now_offset = to_offset;
    }

    switch (ri->type) {
    case SOM_INTERNAL_LOC_SET:
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET\n");
        mmbit_set(som_store_valid, som_store_count, ri->onmatch);
        setSomLoc(som_set_now, som_store, som_store_count, ri, to_offset);
        return;
    case SOM_INTERNAL_LOC_SET_IF_UNSET:
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_IF_UNSET\n");
        if (ok_and_mark_if_unset(som_store_valid, som_set_now, som_store_count,
                                 ri->onmatch)) {
            setSomLoc(som_set_now, som_store, som_store_count, ri, to_offset);
        }
        return;
    case SOM_INTERNAL_LOC_SET_IF_WRITABLE: {
        u32 slot = ri->onmatch;
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_IF_WRITABLE\n");
        if (ok_and_mark_if_write(som_store_valid, som_set_now,
                                 som_store_writable, som_store_count, slot)) {
            setSomLoc(som_set_now, som_store, som_store_count, ri, to_offset);
            mmbit_unset(som_store_writable, som_store_count, slot);
        } else {
            /* not writable, stash as an attempted write in case we are
             * racing our escape. */
            DEBUG_PRINTF("not writable, stashing attempt\n");
            assert(to_offset >= ri->aux.somDistance);
            u64a start_offset = to_offset - ri->aux.somDistance;

            if (!fatbit_set(som_attempted_set, som_store_count, slot)) {
                som_failed_store[slot] = start_offset;
            } else {
                LIMIT_TO_AT_MOST(&som_failed_store[slot], start_offset);
            }
            DEBUG_PRINTF("som_failed_store[%u] = %llu\n", slot,
                         som_failed_store[slot]);
        }
        return;
    }
    case SOM_INTERNAL_LOC_SET_REV_NFA:
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_REV_NFA\n");
        mmbit_set(som_store_valid, som_store_count, ri->onmatch);
        setSomLocRevNfa(scratch, som_set_now, som_store, som_store_count, ri,
                        to_offset);
        return;
    case SOM_INTERNAL_LOC_SET_REV_NFA_IF_UNSET:
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_REV_NFA_IF_UNSET\n");
        if (ok_and_mark_if_unset(som_store_valid, som_set_now, som_store_count,
                                 ri->onmatch)) {
            setSomLocRevNfa(scratch, som_set_now, som_store, som_store_count,
                            ri, to_offset);
        }
        return;
    case SOM_INTERNAL_LOC_SET_REV_NFA_IF_WRITABLE: {
        u32 slot = ri->onmatch;
        DEBUG_PRINTF("SOM_INTERNAL_LOC_SET_IF_WRITABLE\n");
        if (ok_and_mark_if_write(som_store_valid, som_set_now,
                                 som_store_writable, som_store_count, slot)) {
            setSomLocRevNfa(scratch, som_set_now, som_store, som_store_count,
                            ri, to_offset);
            mmbit_unset(som_store_writable, som_store_count, slot);
        } else {
            /* not writable, stash as an attempted write in case we are
             * racing our escape. */
            DEBUG_PRINTF("not writable, stashing attempt\n");

            u64a from_offset = 0;
            runRevNfa(scratch, ri, to_offset, &from_offset);

            if (!fatbit_set(som_attempted_set, som_store_count, slot)) {
                som_failed_store[slot] = from_offset;
            } else {
                LIMIT_TO_AT_MOST(&som_failed_store[slot], from_offset);
            }
            DEBUG_PRINTF("som_failed_store[%u] = %llu\n", slot,
                         som_failed_store[slot]);
        }
        return;
    }
    case SOM_INTERNAL_LOC_COPY: {
        u32 slot_in = ri->aux.somDistance;
        u32 slot_out = ri->onmatch;
        DEBUG_PRINTF("SOM_INTERNAL_LOC_COPY S[%u] = S[%u]\n", slot_out,
                     slot_in);
        assert(mmbit_isset(som_store_valid, som_store_count, slot_in));
        mmbit_set(som_store_valid, som_store_count, slot_out);
        fatbit_set(som_set_now, som_store_count, slot_out);
        som_store[slot_out] = som_store[slot_in];

        return;
    }
    case SOM_INTERNAL_LOC_COPY_IF_WRITABLE: {
        u32 slot_in = ri->aux.somDistance;
        u32 slot_out = ri->onmatch;
        DEBUG_PRINTF("SOM_INTERNAL_LOC_COPY_IF_WRITABLE S[%u] = S[%u]\n",
                     slot_out, slot_in);
        assert(mmbit_isset(som_store_valid, som_store_count, slot_in));
        if (ok_and_mark_if_write(som_store_valid, som_set_now,
                                 som_store_writable, som_store_count,
                                 slot_out)) {
            DEBUG_PRINTF("copy, set som_store[%u]=%llu\n", slot_out,
                         som_store[slot_in]);
            som_store[slot_out] = som_store[slot_in];
            fatbit_set(som_set_now, som_store_count, slot_out);
            mmbit_unset(som_store_writable, som_store_count, slot_out);
        } else {
            /* not writable, stash as an attempted write in case we are
             * racing our escape */
            DEBUG_PRINTF("not writable, stashing attempt\n");
            fatbit_set(som_attempted_set, som_store_count, slot_out);
            som_failed_store[slot_out] = som_store[slot_in];
            DEBUG_PRINTF("som_failed_store[%u] = %llu\n", slot_out,
                         som_failed_store[slot_out]);
        }
        return;
    }
    case SOM_INTERNAL_LOC_MAKE_WRITABLE: {
        u32 slot = ri->onmatch;
        DEBUG_PRINTF("SOM_INTERNAL_LOC_MAKE_WRITABLE\n");
        /* if just written to the loc, ignore the racing escape */
        if (fatbit_isset(som_set_now, som_store_count, slot)) {
            DEBUG_PRINTF("just written\n");
            return;
        }
        if (fatbit_isset(som_attempted_set, som_store_count, slot)) {
            /* writes were waiting for an escape to arrive */
            DEBUG_PRINTF("setting som_store[%u] = %llu from "
                         "som_failed_store[%u]\n", slot, som_failed_store[slot],
                         slot);
            som_store[slot] = som_failed_store[slot];
            fatbit_set(som_set_now, som_store_count, slot);
            return;
        }
        mmbit_set(som_store_writable, som_store_count, slot);
        return;
    }
    default:
        DEBUG_PRINTF("unknown report type!\n");
        break;
    }

    // All valid som_operation types should be handled and returned above.
    assert(0);
    return;
}