static int FlowStorageTest02(void) { Flow *f = NULL; StorageInit(); int id1 = FlowStorageRegister("test", sizeof(void *), NULL, StorageTestFree); if (id1 < 0) goto error; if (StorageFinalize() < 0) goto error; FlowInitConfig(FLOW_QUIET); f = FlowAlloc(); if (f == NULL) { goto error; } void *ptr = FlowGetStorageById(f, id1); if (ptr != NULL) { goto error; } void *ptr1a = SCMalloc(128); if (ptr1a == NULL) { goto error; } FlowSetStorageById(f, id1, ptr1a); void *ptr1b = FlowGetStorageById(f, id1); if (ptr1a != ptr1b) { goto error; } FlowClearMemory(f, 0); FlowFree(f); FlowShutdown(); StorageCleanup(); return 1; error: if (f != NULL) { FlowClearMemory(f, 0); FlowFree(f); } FlowShutdown(); StorageCleanup(); return 0; }
/** \internal * \brief Get a flow from the hash directly. * * Called in conditions where the spare queue is empty and memcap is reached. * * Walks the hash until a flow can be freed. Timeouts are disregarded, use_cnt * is adhered to. "flow_prune_idx" atomic int makes sure we don't start at the * top each time since that would clear the top of the hash leading to longer * and longer search times under high pressure (observed). * * \retval f flow or NULL */ static Flow *FlowGetUsedFlow(void) { uint32_t idx = SC_ATOMIC_GET(flow_prune_idx) % flow_config.hash_size; uint32_t cnt = flow_config.hash_size; while (cnt--) { if (++idx >= flow_config.hash_size) idx = 0; FlowBucket *fb = &flow_hash[idx]; if (FBLOCK_TRYLOCK(fb) != 0) continue; Flow *f = fb->tail; if (f == NULL) { FBLOCK_UNLOCK(fb); continue; } if (FLOWLOCK_TRYWRLOCK(f) != 0) { FBLOCK_UNLOCK(fb); continue; } /** never prune a flow that is used by a packet or stream msg * we are currently processing in one of the threads */ if (SC_ATOMIC_GET(f->use_cnt) > 0) { FBLOCK_UNLOCK(fb); FLOWLOCK_UNLOCK(f); continue; } /* remove from the hash */ if (f->hprev != NULL) f->hprev->hnext = f->hnext; if (f->hnext != NULL) f->hnext->hprev = f->hprev; if (fb->head == f) fb->head = f->hnext; if (fb->tail == f) fb->tail = f->hprev; f->hnext = NULL; f->hprev = NULL; f->fb = NULL; FBLOCK_UNLOCK(fb); FlowClearMemory(f, f->protomap); FLOWLOCK_UNLOCK(f); (void) SC_ATOMIC_ADD(flow_prune_idx, (flow_config.hash_size - cnt)); return f; } return NULL; }
/** \internal * \brief Get a flow from the hash directly. * * Called in conditions where the spare queue is empty and memcap is reached. * * Walks the hash until a flow can be freed. Timeouts are disregarded, use_cnt * is adhered to. "flow_prune_idx" atomic int makes sure we don't start at the * top each time since that would clear the top of the hash leading to longer * and longer search times under high pressure (observed). * * \param tv thread vars * \param dtv decode thread vars (for flow log api thread data) * * \retval f flow or NULL */ static Flow *FlowGetUsedFlow(ThreadVars *tv, DecodeThreadVars *dtv) { uint32_t idx = SC_ATOMIC_GET(flow_prune_idx) % flow_config.hash_size; uint32_t cnt = flow_config.hash_size; while (cnt--) { if (++idx >= flow_config.hash_size) idx = 0; FlowBucket *fb = &flow_hash[idx]; if (FBLOCK_TRYLOCK(fb) != 0) continue; Flow *f = fb->tail; if (f == NULL) { FBLOCK_UNLOCK(fb); continue; } if (FLOWLOCK_TRYWRLOCK(f) != 0) { FBLOCK_UNLOCK(fb); continue; } /** never prune a flow that is used by a packet or stream msg * we are currently processing in one of the threads */ if (SC_ATOMIC_GET(f->use_cnt) > 0) { FBLOCK_UNLOCK(fb); FLOWLOCK_UNLOCK(f); continue; } /* remove from the hash */ if (f->hprev != NULL) f->hprev->hnext = f->hnext; if (f->hnext != NULL) f->hnext->hprev = f->hprev; if (fb->head == f) fb->head = f->hnext; if (fb->tail == f) fb->tail = f->hprev; f->hnext = NULL; f->hprev = NULL; f->fb = NULL; SC_ATOMIC_SET(fb->next_ts, 0); FBLOCK_UNLOCK(fb); int state = SC_ATOMIC_GET(f->flow_state); if (state == FLOW_STATE_NEW) f->flow_end_flags |= FLOW_END_FLAG_STATE_NEW; else if (state == FLOW_STATE_ESTABLISHED) f->flow_end_flags |= FLOW_END_FLAG_STATE_ESTABLISHED; else if (state == FLOW_STATE_CLOSED) f->flow_end_flags |= FLOW_END_FLAG_STATE_CLOSED; else if (state == FLOW_STATE_CAPTURE_BYPASSED) f->flow_end_flags |= FLOW_END_FLAG_STATE_BYPASSED; else if (state == FLOW_STATE_LOCAL_BYPASSED) f->flow_end_flags |= FLOW_END_FLAG_STATE_BYPASSED; f->flow_end_flags |= FLOW_END_FLAG_FORCED; if (SC_ATOMIC_GET(flow_flags) & FLOW_EMERGENCY) f->flow_end_flags |= FLOW_END_FLAG_EMERGENCY; /* invoke flow log api */ if (dtv && dtv->output_flow_thread_data) (void)OutputFlowLog(tv, dtv->output_flow_thread_data, f); FlowClearMemory(f, f->protomap); FlowUpdateState(f, FLOW_STATE_NEW); FLOWLOCK_UNLOCK(f); (void) SC_ATOMIC_ADD(flow_prune_idx, (flow_config.hash_size - cnt)); return f; } return NULL; }
static int FlowStorageTest01(void) { Flow *f = NULL; StorageInit(); int id1 = FlowStorageRegister("test", 8, StorageTestAlloc, StorageTestFree); if (id1 < 0) goto error; int id2 = FlowStorageRegister("variable", 24, StorageTestAlloc, StorageTestFree); if (id2 < 0) goto error; int id3 = FlowStorageRegister("store", sizeof(void *), StorageTestAlloc, StorageTestFree); if (id3 < 0) goto error; if (StorageFinalize() < 0) goto error; FlowInitConfig(FLOW_QUIET); f = FlowAlloc(); if (f == NULL) { goto error; } void *ptr = FlowGetStorageById(f, id1); if (ptr != NULL) { goto error; } ptr = FlowGetStorageById(f, id2); if (ptr != NULL) { goto error; } ptr = FlowGetStorageById(f, id3); if (ptr != NULL) { goto error; } void *ptr1a = FlowAllocStorageById(f, id1); if (ptr1a == NULL) { goto error; } void *ptr2a = FlowAllocStorageById(f, id2); if (ptr2a == NULL) { goto error; } void *ptr3a = FlowAllocStorageById(f, id3); if (ptr3a == NULL) { goto error; } void *ptr1b = FlowGetStorageById(f, id1); if (ptr1a != ptr1b) { goto error; } void *ptr2b = FlowGetStorageById(f, id2); if (ptr2a != ptr2b) { goto error; } void *ptr3b = FlowGetStorageById(f, id3); if (ptr3a != ptr3b) { goto error; } FlowClearMemory(f, 0); FlowFree(f); FlowShutdown(); StorageCleanup(); return 1; error: if (f != NULL) { FlowClearMemory(f, 0); FlowFree(f); } FlowShutdown(); StorageCleanup(); return 0; }