/** \internal * \brief Get a host from the hash directly. * * Called in conditions where the spare queue is empty and memcap is reached. * * Walks the hash until a host can be freed. "host_prune_idx" atomic int makes * sure we don't start at the top each time since that would clear the top of * the hash leading to longer and longer search times under high pressure (observed). * * \retval h host or NULL */ static Host *HostGetUsedHost(void) { uint32_t idx = SC_ATOMIC_GET(host_prune_idx) % host_config.hash_size; uint32_t cnt = host_config.hash_size; while (cnt--) { if (++idx >= host_config.hash_size) idx = 0; HostHashRow *hb = &host_hash[idx]; if (hb == NULL) continue; if (HRLOCK_TRYLOCK(hb) != 0) continue; Host *h = hb->tail; if (h == NULL) { HRLOCK_UNLOCK(hb); continue; } if (SCMutexTrylock(&h->m) != 0) { HRLOCK_UNLOCK(hb); continue; } /** never prune a host that is used by a packets * we are currently processing in one of the threads */ if (SC_ATOMIC_GET(h->use_cnt) > 0) { HRLOCK_UNLOCK(hb); SCMutexUnlock(&h->m); continue; } /* remove from the hash */ if (h->hprev != NULL) h->hprev->hnext = h->hnext; if (h->hnext != NULL) h->hnext->hprev = h->hprev; if (hb->head == h) hb->head = h->hnext; if (hb->tail == h) hb->tail = h->hprev; h->hnext = NULL; h->hprev = NULL; HRLOCK_UNLOCK(hb); HostClearMemory (h); SCMutexUnlock(&h->m); (void) SC_ATOMIC_ADD(host_prune_idx, (host_config.hash_size - cnt)); return h; } return NULL; }
void HostFree(Host *h) { if (h != NULL) { HostClearMemory(h); SCMutexDestroy(&h->m); SCFree(h); (void) SC_ATOMIC_SUB(host_memuse, sizeof(Host)); } }
void HostFree(Host *h) { if (h != NULL) { HostClearMemory(h); SC_ATOMIC_DESTROY(h->use_cnt); SCMutexDestroy(&h->m); SCFree(h); (void) SC_ATOMIC_SUB(host_memuse, (sizeof(Host) + HostStorageSize())); } }
/** * \internal * * \brief check all hosts in a hash row for timing out * * \param hb host hash row *LOCKED* * \param h last host in the hash row * \param ts timestamp * * \retval cnt timed out hosts */ static uint32_t HostHashRowTimeout(HostHashRow *hb, Host *h, struct timeval *ts) { uint32_t cnt = 0; do { if (SCMutexTrylock(&h->m) != 0) { h = h->hprev; continue; } Host *next_host = h->hprev; /* check if the host is fully timed out and * ready to be discarded. */ if (HostHostTimedOut(h, ts) == 1) { /* remove from the hash */ if (h->hprev != NULL) h->hprev->hnext = h->hnext; if (h->hnext != NULL) h->hnext->hprev = h->hprev; if (hb->head == h) hb->head = h->hnext; if (hb->tail == h) hb->tail = h->hprev; h->hnext = NULL; h->hprev = NULL; HostClearMemory (h); /* no one is referring to this host, use_cnt 0, removed from hash * so we can unlock it and move it back to the spare queue. */ SCMutexUnlock(&h->m); /* move to spare list */ HostMoveToSpare(h); cnt++; } else { SCMutexUnlock(&h->m); } h = next_host; } while (h != NULL); return cnt; }
/** \brief Cleanup the host engine * * Cleanup the host engine from tag and threshold. * */ void HostCleanup(void) { Host *h; uint32_t u; if (host_hash != NULL) { for (u = 0; u < host_config.hash_size; u++) { h = host_hash[u].head; HostHashRow *hb = &host_hash[u]; HRLOCK_LOCK(hb); while (h) { if ((SC_ATOMIC_GET(h->use_cnt) > 0) && (h->iprep != NULL)) { /* iprep is attached to host only clear tag and threshold */ if (h->tag != NULL) { DetectTagDataListFree(h->tag); h->tag = NULL; } if (h->threshold != NULL) { ThresholdListFree(h->threshold); h->threshold = NULL; } h = h->hnext; } else { Host *n = h->hnext; /* remove from the hash */ if (h->hprev != NULL) h->hprev->hnext = h->hnext; if (h->hnext != NULL) h->hnext->hprev = h->hprev; if (hb->head == h) hb->head = h->hnext; if (hb->tail == h) hb->tail = h->hprev; h->hnext = NULL; h->hprev = NULL; HostClearMemory(h); HostMoveToSpare(h); h = n; } } HRLOCK_UNLOCK(hb); } } return; }
/** \brief shutdown the flow engine * \warning Not thread safe */ void HostShutdown(void) { Host *h; uint32_t u; HostPrintStats(); /* free spare queue */ while((h = HostDequeue(&host_spare_q))) { BUG_ON(SC_ATOMIC_GET(h->use_cnt) > 0); HostFree(h); } /* clear and free the hash */ if (host_hash != NULL) { for (u = 0; u < host_config.hash_size; u++) { Host *h = host_hash[u].head; while (h) { Host *n = h->hnext; HostClearMemory(h); HostFree(h); h = n; } HRLOCK_DESTROY(&host_hash[u]); } SCFree(host_hash); host_hash = NULL; } (void) SC_ATOMIC_SUB(host_memuse, host_config.hash_size * sizeof(HostHashRow)); HostQueueDestroy(&host_spare_q); SC_ATOMIC_DESTROY(host_prune_idx); SC_ATOMIC_DESTROY(host_memuse); SC_ATOMIC_DESTROY(host_counter); //SC_ATOMIC_DESTROY(flow_flags); return; }