void FlowHashDebugDeinit(void) { #ifdef FLOW_DEBUG_STATS struct timeval ts; memset(&ts, 0, sizeof(ts)); TimeGet(&ts); FlowHashDebugPrint((uint32_t)ts.tv_sec); if (flow_hash_count_fp != NULL) fclose(flow_hash_count_fp); SCSpinDestroy(&flow_hash_count_lock); #endif }
void RingBuffer8Destroy(RingBuffer8 *rb) { if (rb != NULL) { SC_ATOMIC_DESTROY(rb->write); SC_ATOMIC_DESTROY(rb->read); SCSpinDestroy(&rb->spin); #ifdef RINGBUFFER_MUTEX_WAIT SCMutexDestroy(&rb->wait_mutex); SCCondDestroy(&rb->wait_cond); #endif SCFree(rb); } }
/** * \brief Test Spinlock Macros * * Valgrind's DRD tool (valgrind-3.5.0-Debian) reports: * * ==31156== Recursive locking not allowed: mutex 0x7fefff97c, recursion count 1, owner 1. * ==31156== at 0x4C2C77E: pthread_spin_trylock (drd_pthread_intercepts.c:829) * ==31156== by 0x40EB3E: ThreadMacrosTest02Spinlocks (threads.c:40) * ==31156== by 0x532E8A: UtRunTests (util-unittest.c:182) * ==31156== by 0x4065C3: main (suricata.c:789) * * To me this is a false positve, as the whole point of "trylock" is to see * if a spinlock is actually locked. * */ int ThreadMacrosTest02Spinlocks(void) { SCSpinlock mut; int r = 0; r |= SCSpinInit(&mut, 0); r |= SCSpinLock(&mut); #ifndef __OpenBSD__ r |= (SCSpinTrylock(&mut) == EBUSY)? 0 : 1; #else r |= (SCSpinTrylock(&mut) == EDEADLK)? 0 : 1; #endif r |= SCSpinUnlock(&mut); r |= SCSpinDestroy(&mut); return (r == 0)? 1 : 0; }
static int ProfilingGenericTicksTest01(void) { #define TEST_RUNS 1024 uint64_t ticks_start = 0; uint64_t ticks_end = 0; void *ptr[TEST_RUNS]; int i; ticks_start = UtilCpuGetTicks(); for (i = 0; i < TEST_RUNS; i++) { ptr[i] = SCMalloc(1024); } ticks_end = UtilCpuGetTicks(); printf("malloc(1024) %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS); ticks_start = UtilCpuGetTicks(); for (i = 0; i < TEST_RUNS; i++) { SCFree(ptr[i]); } ticks_end = UtilCpuGetTicks(); printf("SCFree(1024) %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS); SCMutex m[TEST_RUNS]; ticks_start = UtilCpuGetTicks(); for (i = 0; i < TEST_RUNS; i++) { SCMutexInit(&m[i], NULL); } ticks_end = UtilCpuGetTicks(); printf("SCMutexInit() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS); ticks_start = UtilCpuGetTicks(); for (i = 0; i < TEST_RUNS; i++) { SCMutexLock(&m[i]); } ticks_end = UtilCpuGetTicks(); printf("SCMutexLock() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS); ticks_start = UtilCpuGetTicks(); for (i = 0; i < TEST_RUNS; i++) { SCMutexUnlock(&m[i]); } ticks_end = UtilCpuGetTicks(); printf("SCMutexUnlock() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS); ticks_start = UtilCpuGetTicks(); for (i = 0; i < TEST_RUNS; i++) { SCMutexDestroy(&m[i]); } ticks_end = UtilCpuGetTicks(); printf("SCMutexDestroy() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS); SCSpinlock s[TEST_RUNS]; ticks_start = UtilCpuGetTicks(); for (i = 0; i < TEST_RUNS; i++) { SCSpinInit(&s[i], 0); } ticks_end = UtilCpuGetTicks(); printf("SCSpinInit() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS); ticks_start = UtilCpuGetTicks(); for (i = 0; i < TEST_RUNS; i++) { SCSpinLock(&s[i]); } ticks_end = UtilCpuGetTicks(); printf("SCSpinLock() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS); ticks_start = UtilCpuGetTicks(); for (i = 0; i < TEST_RUNS; i++) { SCSpinUnlock(&s[i]); } ticks_end = UtilCpuGetTicks(); printf("SCSpinUnlock() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS); ticks_start = UtilCpuGetTicks(); for (i = 0; i < TEST_RUNS; i++) { SCSpinDestroy(&s[i]); } ticks_end = UtilCpuGetTicks(); printf("SCSpinDestroy() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS); SC_ATOMIC_DECL_AND_INIT(unsigned int, test); ticks_start = UtilCpuGetTicks(); for (i = 0; i < TEST_RUNS; i++) { (void) SC_ATOMIC_ADD(test,1); } ticks_end = UtilCpuGetTicks(); printf("SC_ATOMIC_ADD %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS); ticks_start = UtilCpuGetTicks(); for (i = 0; i < TEST_RUNS; i++) { SC_ATOMIC_CAS(&test,i,i+1); } ticks_end = UtilCpuGetTicks(); printf("SC_ATOMIC_CAS %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS); return 1; }
void TimeDeinit(void) { SCSpinDestroy(¤t_time_spinlock); }