/** * Inserts a timer in queue. * @param delay - Number of seconds the timeout should happen in. * @param action - The function to call on timeout. * @param data - Pointer to the function data to supply... */ int timer_setTimer(int delay, timer_f action, void *data) { struct timeOutQueue *ptr, *node, *prev; int i = 0; /* create a node */ node = (struct timeOutQueue *)malloc(sizeof(struct timeOutQueue)); if (node == 0) { my_log(LOG_WARNING, 0, "Malloc Failed in timer_settimer\n"); return -1; } node->func = action; node->data = data; node->time = delay; node->next = 0; node->id = ++id; prev = ptr = queue; /* insert node in the queue */ /* if the queue is empty, insert the node and return */ if (!queue) { queue = node; } else { /* chase the pointer looking for the right place */ while (ptr) { if (delay < ptr->time) { // We found the correct node node->next = ptr; if (ptr == queue) { queue = node; } else { prev->next = node; } ptr->time -= node->time; my_log(LOG_DEBUG, 0, "Created timeout %d (#%d) - delay %d secs", node->id, i, node->time); debugQueue(); return node->id; } else { // Continur to check nodes. delay -= ptr->time; node->time = delay; prev = ptr; ptr = ptr->next; } i++; } prev->next = node; } my_log(LOG_DEBUG, 0, "Created timeout %d (#%d) - delay %d secs", node->id, i, node->time); debugQueue(); return node->id; }
/** * clears the associated timer. Returns 1 if succeeded. */ int timer_clearTimer(int timer_id) { struct timeOutQueue *ptr, *prev; int i = 0; if (!timer_id) return 0; prev = ptr = queue; /* * find the right node, delete it. the subsequent node's time * gets bumped up */ debugQueue(); while (ptr) { if (ptr->id == timer_id) { /* got the right node */ /* unlink it from the queue */ if (ptr == queue) queue = queue->next; else prev->next = ptr->next; /* increment next node if any */ if (ptr->next != 0) (ptr->next)->time += ptr->time; if (ptr->data) free(ptr->data); my_log(LOG_DEBUG, 0, "deleted timer %d (#%d)", ptr->id, i); free(ptr); debugQueue(); return 1; } prev = ptr; ptr = ptr->next; i++; } // If we get here, the timer was not deleted. my_log(LOG_DEBUG, 0, "failed to delete timer %d (#%d)", timer_id, i); debugQueue(); return 0; }
static void updateQueues(const struct Tamarama *t, struct mq *q1, struct mq *q2) { q2->cur = q2->end = 0; copyQueueProperties(q1, q2, t->activeIdxSize); const u32 numSubEngines = t->numSubEngines; u32 lastActiveIdx = loadActiveIdx(q1->streamState, t->activeIdxSize); #ifdef DEBUG DEBUG_PRINTF("external queue\n"); debugQueue(q1); #endif // Push MQE_START event to the subqueue s64a loc = q1->items[q1->cur].location; pushQueueAt(q2, 0, MQE_START, loc); char hasStart = 0; if (q1->items[q1->cur].type == MQE_START) { hasStart = 1; q1->cur++; } u32 activeIdx = lastActiveIdx; // If we have top events in the main queue, update current active id if (q1->cur < q1->end - 1) { const u32 *baseTop = (const u32 *)((const char *)t + sizeof(struct Tamarama)); u32 curTop = q1->items[q1->cur].type; activeIdx = findEngineForTop(baseTop, curTop, numSubEngines); } assert(activeIdx < numSubEngines); DEBUG_PRINTF("last id:%u, current id:%u, num of subengines:%u\n", lastActiveIdx, activeIdx, numSubEngines); // Handle unfinished last alive subengine if (lastActiveIdx != activeIdx && lastActiveIdx != numSubEngines && hasStart) { loc = q1->items[q1->cur].location; pushQueueNoMerge(q2, MQE_END, loc); q2->nfa = getSubEngine(t, lastActiveIdx); return; } initSubQueue(t, q1, q2, lastActiveIdx, activeIdx); DEBUG_PRINTF("finish queues\n"); }
// After processing subqueue items for subengines, we need to copy back // remaining items in subqueue if there are any to Tamarama main queue static void copyBack(const struct Tamarama *t, struct mq *q, struct mq *q1) { DEBUG_PRINTF("copy back %u, %u\n", q1->cur, q1->end); q->report_current = q1->report_current; if (q->cur >= q->end && q1->cur >= q1->end) { return; } const u32 *baseTop = (const u32 *)((const char *)t + sizeof(struct Tamarama)); const u32 lastIdx = loadActiveIdx(q->streamState, t->activeIdxSize); u32 base = 0, event_base = 0; if (lastIdx != t->numSubEngines) { base = baseTop[lastIdx]; const struct NFA *sub = getSubEngine(t, lastIdx); event_base = isMultiTopType(sub->type) ? MQE_TOP_FIRST : MQE_TOP; } u32 numItems = q1->end > q1->cur + 1 ? q1->end - q1->cur - 1 : 1; // Also need to copy MQE_END if the main queue is empty if (q->cur == q->end) { numItems++; } u32 cur = q->cur - numItems; q->items[cur] = q1->items[q1->cur++]; q->items[cur].type = MQE_START; q->cur = cur++; for (u32 i = 0; i < numItems - 1; ++i) { u32 type = q1->items[q1->cur].type; if (type > MQE_END) { q1->items[q1->cur].type = type - event_base + base; } q->items[cur++] = q1->items[q1->cur++]; } #ifdef DEBUG DEBUG_PRINTF("external queue\n"); debugQueue(q); #endif }
static really_inline char roseCatchUpLeftfix(const struct RoseEngine *t, char *state, struct hs_scratch *scratch, u32 qi, const struct LeftNfaInfo *left) { assert(!left->transient); // active roses only struct core_info *ci = &scratch->core_info; const u32 qCount = t->queueCount; struct mq *q = scratch->queues + qi; const struct NFA *nfa = getNfaByQueue(t, qi); if (nfaSupportsZombie(nfa) && ci->buf_offset /* prefix can be alive with no q */ && !fatbit_isset(scratch->aqa, qCount, qi) && isZombie(t, state, left)) { DEBUG_PRINTF("yawn - zombie\n"); return 1; } if (left->stopTable) { enum MiracleAction mrv = roseScanForMiracles(t, state, scratch, qi, left, nfa); switch (mrv) { case MIRACLE_DEAD: return 0; case MIRACLE_SAVED: return 1; default: assert(mrv == MIRACLE_CONTINUE); break; } } if (!fatbit_set(scratch->aqa, qCount, qi)) { initRoseQueue(t, qi, left, scratch); s32 sp; if (ci->buf_offset) { sp = -(s32)loadRoseDelay(t, state, left); } else { sp = 0; } DEBUG_PRINTF("ci->len=%zu, sp=%d, historyRequired=%u\n", ci->len, sp, t->historyRequired); if ( ci->len - sp + 1 < t->historyRequired) { // we'll end up safely in the history region. DEBUG_PRINTF("safely in history, skipping\n"); storeRoseDelay(t, state, left, (s64a)ci->len - sp); return 1; } pushQueueAt(q, 0, MQE_START, sp); if (left->infix || ci->buf_offset + sp > 0) { loadStreamState(nfa, q, sp); } else { pushQueueAt(q, 1, MQE_TOP, sp); nfaQueueInitState(nfa, q); } } else { DEBUG_PRINTF("queue already active\n"); if (q->end - q->cur == 1 && q_cur_type(q) == MQE_START) { DEBUG_PRINTF("empty queue, start loc=%lld\n", q_cur_loc(q)); s64a last_loc = q_cur_loc(q); if (ci->len - last_loc + 1 < t->historyRequired) { // we'll end up safely in the history region. DEBUG_PRINTF("safely in history, saving state and skipping\n"); saveStreamState(nfa, q, last_loc); storeRoseDelay(t, state, left, (s64a)ci->len - last_loc); return 1; } } } // Determine whether the byte before last_loc will be in the history // buffer on the next stream write. s64a last_loc = q_last_loc(q); s64a leftovers = ci->len - last_loc; if (leftovers + 1 >= t->historyRequired) { u32 catchup_offset = left->maxLag ? left->maxLag - 1 : 0; last_loc = (s64a)ci->len - catchup_offset; } if (left->infix) { if (infixTooOld(q, last_loc)) { DEBUG_PRINTF("infix died of old age\n"); return 0; } reduceInfixQueue(q, last_loc, left->maxQueueLen, q->nfa->maxWidth); } DEBUG_PRINTF("end scan at %lld\n", last_loc); pushQueueNoMerge(q, MQE_END, last_loc); #ifdef DEBUG debugQueue(q); #endif char rv = nfaQueueExecRose(nfa, q, MO_INVALID_IDX); if (!rv) { /* nfa is dead */ DEBUG_PRINTF("died catching up to stream boundary\n"); return 0; } else { DEBUG_PRINTF("alive, saving stream state\n"); if (nfaSupportsZombie(nfa) && nfaGetZombieStatus(nfa, q, last_loc) == NFA_ZOMBIE_ALWAYS_YES) { DEBUG_PRINTF("not so fast - zombie\n"); setAsZombie(t, state, left); } else { saveStreamState(nfa, q, last_loc); storeRoseDelay(t, state, left, (s64a)ci->len - last_loc); } } return 1; }