/** \brief Return packet to Packet pool * */ void PacketPoolReturnPacket(Packet *p) { PktPool *my_pool = GetThreadPacketPool(); PACKET_RELEASE_REFS(p); PktPool *pool = p->pool; if (pool == NULL) { PacketFree(p); return; } #ifdef DEBUG_VALIDATION BUG_ON(pool->initialized == 0); BUG_ON(pool->destroyed == 1); BUG_ON(my_pool->initialized == 0); BUG_ON(my_pool->destroyed == 1); #endif /* DEBUG_VALIDATION */ if (pool == my_pool) { /* Push back onto this thread's own stack, so no locking. */ p->next = my_pool->head; my_pool->head = p; } else { PktPool *pending_pool = my_pool->pending_pool; if (pending_pool == NULL) { /* No pending packet, so store the current packet. */ my_pool->pending_pool = pool; my_pool->pending_head = p; my_pool->pending_tail = p; my_pool->pending_count = 1; } else if (pending_pool == pool) { /* Another packet for the pending pool list. */ p->next = my_pool->pending_head; my_pool->pending_head = p; my_pool->pending_count++; if (SC_ATOMIC_GET(pool->return_stack.sync_now) || my_pool->pending_count > MAX_PENDING_RETURN_PACKETS) { /* Return the entire list of pending packets. */ SCMutexLock(&pool->return_stack.mutex); my_pool->pending_tail->next = pool->return_stack.head; pool->return_stack.head = my_pool->pending_head; SC_ATOMIC_RESET(pool->return_stack.sync_now); SCMutexUnlock(&pool->return_stack.mutex); SCCondSignal(&pool->return_stack.cond); /* Clear the list of pending packets to return. */ my_pool->pending_pool = NULL; my_pool->pending_head = NULL; my_pool->pending_tail = NULL; my_pool->pending_count = 0; } } else { /* Push onto return stack for this pool */ SCMutexLock(&pool->return_stack.mutex); p->next = pool->return_stack.head; pool->return_stack.head = p; SC_ATOMIC_RESET(pool->return_stack.sync_now); SCMutexUnlock(&pool->return_stack.mutex); SCCondSignal(&pool->return_stack.cond); } } }
/** * \brief Force reassembly for all the flows that have unprocessed segments. */ void FlowForceReassembly(void) { /* Do remember. We need to have packet acquire disabled by now */ /** ----- Part 1 ------*/ /* Flush out unattended packets */ FlowForceReassemblyFlushPendingPseudoPackets(); /** ----- Part 2 ----- **/ /* Check if all threads are idle. We need this so that we have all * packets freeds. As a consequence, no flows are in use */ SCMutexLock(&tv_root_lock); /* all receive threads are part of packet processing threads */ ThreadVars *tv = tv_root[TVT_PPT]; /* we are doing this in order receive -> decode -> ... -> log */ while (tv != NULL) { if (tv->inq != NULL) { /* we wait till we dry out all the inq packets, before we * kill this thread. Do note that you should have disabled * packet acquire by now using TmThreadDisableReceiveThreads()*/ if (!(strlen(tv->inq->name) == strlen("packetpool") && strcasecmp(tv->inq->name, "packetpool") == 0)) { PacketQueue *q = &trans_q[tv->inq->id]; while (q->len != 0) { usleep(100); } TmThreadsSetFlag(tv, THV_PAUSE); if (tv->inq->q_type == 0) SCCondSignal(&trans_q[tv->inq->id].cond_q); else SCCondSignal(&data_queues[tv->inq->id].cond_q); while (!TmThreadsCheckFlag(tv, THV_PAUSED)) { if (tv->inq->q_type == 0) SCCondSignal(&trans_q[tv->inq->id].cond_q); else SCCondSignal(&data_queues[tv->inq->id].cond_q); usleep(100); } TmThreadsUnsetFlag(tv, THV_PAUSE); } } tv = tv->next; } SCMutexUnlock(&tv_root_lock); /** ----- Part 3 ----- **/ /* Carry out flow reassembly for unattended flows */ FlowForceReassemblyForHash(); return; }
/** * \brief Kill a thread. * * \param tv A ThreadVars instance corresponding to the thread that has to be * killed. */ void TmThreadKillThread(ThreadVars *tv) { int i = 0; if (tv == NULL) return; /* set the thread flag informing the thread that it needs to be * terminated */ TmThreadsSetFlag(tv, THV_KILL); if (tv->inq != NULL) { /* signal the queue for the number of users */ if (tv->InShutdownHandler != NULL) { tv->InShutdownHandler(tv); } for (i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++) SCCondSignal(&trans_q[tv->inq->id].cond_q); /* to be sure, signal more */ while (1) { if (TmThreadsCheckFlag(tv, THV_CLOSED)) { break; } if (tv->InShutdownHandler != NULL) { tv->InShutdownHandler(tv); } for (i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++) SCCondSignal(&trans_q[tv->inq->id].cond_q); usleep(100); } } if (tv->cond != NULL ) { while (1) { if (TmThreadsCheckFlag(tv, THV_CLOSED)) { break; } pthread_cond_broadcast(tv->cond); usleep(100); } } return; }
/** * \brief put a ptr in the RingBuffer. * * As we support multiple writers we need to protect 2 things: * 1. writing the ptr to the array * 2. incrementing the rb->write idx * * We can't do both at the same time in one atomic operation, so * we need to (spin) lock it. We do increment rb->write atomically * after that, so that we don't need to use the lock in our *Get * function. * * \param rb the ringbuffer * \param ptr ptr to store * * \retval 0 ok * \retval -1 wait loop interrupted because of engine flags */ int RingBufferMrMwPut(RingBuffer16 *rb, void *ptr) { SCLogDebug("ptr %p", ptr); /* buffer is full, wait... */ retry: while ((unsigned short)(SC_ATOMIC_GET(rb->write) + 1) == SC_ATOMIC_GET(rb->read)) { /* break out if the engine wants to shutdown */ if (rb->shutdown != 0) return -1; RingBufferDoWait(rb); } /* get our lock */ SCSpinLock(&rb->spin); /* if while we got our lock the buffer changed, we need to retry */ if ((unsigned short)(SC_ATOMIC_GET(rb->write) + 1) == SC_ATOMIC_GET(rb->read)) { SCSpinUnlock(&rb->spin); goto retry; } SCLogDebug("rb->write %u, ptr %p", SC_ATOMIC_GET(rb->write), ptr); /* update the ring buffer */ rb->array[SC_ATOMIC_GET(rb->write)] = ptr; (void) SC_ATOMIC_ADD(rb->write, 1); SCSpinUnlock(&rb->spin); SCLogDebug("ptr %p, done", ptr); #ifdef RINGBUFFER_MUTEX_WAIT SCCondSignal(&rb->wait_cond); #endif return 0; }
/** * \brief get the next ptr from the ring buffer * * Because we allow for multiple readers we take great care in making sure * that the threads don't interfere with one another. * * This version does NOT enter a wait if the buffer is empty loop. * * \retval ptr pointer to the data, or NULL if buffer is empty */ void *RingBufferMrMwGetNoWait(RingBuffer16 *rb) { void *ptr; /** local pointer for data races. If SCAtomicCompareAndSwap (CAS) * fails we increase our local array idx to try the next array member * until we succeed. Or when the buffer is empty again we jump back * to the waiting loop. */ unsigned short readp; /* buffer is empty, wait... */ retry: while (SC_ATOMIC_GET(rb->write) == SC_ATOMIC_GET(rb->read)) { /* break if buffer is empty */ return NULL; } /* atomically update rb->read */ readp = SC_ATOMIC_GET(rb->read) - 1; do { /* with multiple readers we can get in the situation that we exitted * from the wait loop but the rb is empty again once we get here. */ if (SC_ATOMIC_GET(rb->write) == SC_ATOMIC_GET(rb->read)) goto retry; readp++; ptr = rb->array[readp]; } while (!(SC_ATOMIC_CAS(&rb->read, readp, (readp + 1)))); SCLogDebug("ptr %p", ptr); #ifdef RINGBUFFER_MUTEX_WAIT SCCondSignal(&rb->wait_cond); #endif return ptr; }
/** * \brief select the queue to output in a round robin fashion. * * \param tv thread vars * \param p packet */ void TmqhOutputFlowRoundRobin(ThreadVars *tv, Packet *p) { int32_t qid = 0; TmqhFlowCtx *ctx = (TmqhFlowCtx *)tv->outctx; /* if no flow we use the first queue, * should be rare */ if (p->flow != NULL) { qid = SC_ATOMIC_GET(p->flow->autofp_tmqh_flow_qid); if (qid == -1) { qid = SC_ATOMIC_ADD(ctx->round_robin_idx, 1); if (qid >= ctx->size) { SC_ATOMIC_RESET(ctx->round_robin_idx); qid = 0; } (void) SC_ATOMIC_ADD(ctx->queues[qid].total_flows, 1); (void) SC_ATOMIC_SET(p->flow->autofp_tmqh_flow_qid, qid); } } else { qid = ctx->last++; if (ctx->last == ctx->size) ctx->last = 0; } (void) SC_ATOMIC_ADD(ctx->queues[qid].total_packets, 1); PacketQueue *q = ctx->queues[qid].q; SCMutexLock(&q->mutex_q); PacketEnqueue(q, p); SCCondSignal(&q->cond_q); SCMutexUnlock(&q->mutex_q); return; }
void TmqhOutputVerdictNfq(ThreadVars *t, Packet *p) { /* XXX not scaling */ #if 0 PacketQueue *q = &trans_q[p->verdict_q_id]; SCMutexLock(&q->mutex_q); PacketEnqueue(q, p); SCCondSignal(&q->cond_q); SCMutexUnlock(&q->mutex_q); #endif }
/** * \brief select the queue to output to based on queue lengths. * * \param tv thread vars * \param p packet */ void TmqhOutputFlowActivePackets(ThreadVars *tv, Packet *p) { int32_t qid = 0; TmqhFlowCtx *ctx = (TmqhFlowCtx *)tv->outctx; /* if no flow we use the first queue, * should be rare */ if (p->flow != NULL) { qid = SC_ATOMIC_GET(p->flow->autofp_tmqh_flow_qid); if (qid == -1) { uint16_t i = 0; int lowest_id = 0; TmqhFlowMode *queues = ctx->queues; uint32_t lowest = queues[i].q->len; for (i = 1; i < ctx->size; i++) { if (queues[i].q->len < lowest) { lowest = queues[i].q->len; lowest_id = i; } } qid = lowest_id; (void) SC_ATOMIC_SET(p->flow->autofp_tmqh_flow_qid, lowest_id); (void) SC_ATOMIC_ADD(ctx->queues[qid].total_flows, 1); } } else { qid = ctx->last++; if (ctx->last == ctx->size) ctx->last = 0; } (void) SC_ATOMIC_ADD(ctx->queues[qid].total_packets, 1); PacketQueue *q = ctx->queues[qid].q; SCMutexLock(&q->mutex_q); PacketEnqueue(q, p); #ifdef __tile__ q->cond_q = 1; #else SCCondSignal(&q->cond_q); #endif SCMutexUnlock(&q->mutex_q); return; }
int RingBufferSrSwPut(RingBuffer16 *rb, void *ptr) { /* buffer is full, wait... */ while ((unsigned short)(SC_ATOMIC_GET(rb->write) + 1) == SC_ATOMIC_GET(rb->read)) { /* break out if the engine wants to shutdown */ if (rb->shutdown != 0) return -1; RingBufferDoWait(rb); } rb->array[SC_ATOMIC_GET(rb->write)] = ptr; (void) SC_ATOMIC_ADD(rb->write, 1); #ifdef RINGBUFFER_MUTEX_WAIT SCCondSignal(&rb->wait_cond); #endif return 0; }
void *RingBufferSrSwGet(RingBuffer16 *rb) { void *ptr = NULL; /* buffer is empty, wait... */ while (SC_ATOMIC_GET(rb->write) == SC_ATOMIC_GET(rb->read)) { /* break out if the engine wants to shutdown */ if (rb->shutdown != 0) return NULL; RingBufferDoWait(rb); } ptr = rb->array[SC_ATOMIC_GET(rb->read)]; (void) SC_ATOMIC_ADD(rb->read, 1); #ifdef RINGBUFFER_MUTEX_WAIT SCCondSignal(&rb->wait_cond); #endif return ptr; }
/** \brief tell the ringbuffer to shut down * * \param rb ringbuffer */ void RingBuffer8Shutdown(RingBuffer8 *rb) { rb->shutdown = 1; #ifdef RINGBUFFER_MUTEX_WAIT SCCondSignal(&rb->wait_cond); #endif }
/** * \internal * \brief Forces reassembly for flow if it needs it. * * The function requires flow to be locked beforehand. * * \param f Pointer to the flow. * \param server action required for server: 1 or 2 * \param client action required for client: 1 or 2 * * \retval 0 This flow doesn't need any reassembly processing; 1 otherwise. */ int FlowForceReassemblyForFlowV2(Flow *f, int server, int client) { Packet *p1 = NULL, *p2 = NULL, *p3 = NULL; TcpSession *ssn; /* looks like we have no flows in this queue */ if (f == NULL) { return 0; } /* Get the tcp session for the flow */ ssn = (TcpSession *)f->protoctx; if (ssn == NULL) { return 0; } /* The packets we use are based on what segments in what direction are * unprocessed. * p1 if we have client segments for reassembly purpose only. If we * have no server segments p2 can be a toserver packet with dummy * seq/ack, and if we have server segments p2 has to carry out reassembly * for server segment as well, in which case we will also need a p3 in the * toclient which is now dummy since all we need it for is detection */ /* insert a pseudo packet in the toserver direction */ if (client == 1) { p1 = FlowForceReassemblyPseudoPacketGet(1, f, ssn, 0); if (p1 == NULL) { return 1; } PKT_SET_SRC(p1, PKT_SRC_FFR_V2); if (server == 1) { p2 = FlowForceReassemblyPseudoPacketGet(0, f, ssn, 0); if (p2 == NULL) { FlowDeReference(&p1->flow); TmqhOutputPacketpool(NULL, p1); return 1; } PKT_SET_SRC(p2, PKT_SRC_FFR_V2); p3 = FlowForceReassemblyPseudoPacketGet(1, f, ssn, 1); if (p3 == NULL) { FlowDeReference(&p1->flow); TmqhOutputPacketpool(NULL, p1); FlowDeReference(&p2->flow); TmqhOutputPacketpool(NULL, p2); return 1; } PKT_SET_SRC(p3, PKT_SRC_FFR_V2); } else { p2 = FlowForceReassemblyPseudoPacketGet(0, f, ssn, 1); if (p2 == NULL) { FlowDeReference(&p1->flow); TmqhOutputPacketpool(NULL, p1); return 1; } PKT_SET_SRC(p2, PKT_SRC_FFR_V2); } } else if (client == 2) { if (server == 1) { p1 = FlowForceReassemblyPseudoPacketGet(0, f, ssn, 0); if (p1 == NULL) { return 1; } PKT_SET_SRC(p1, PKT_SRC_FFR_V2); p2 = FlowForceReassemblyPseudoPacketGet(1, f, ssn, 1); if (p2 == NULL) { FlowDeReference(&p1->flow); TmqhOutputPacketpool(NULL, p1); return 1; } PKT_SET_SRC(p2, PKT_SRC_FFR_V2); } else { p1 = FlowForceReassemblyPseudoPacketGet(0, f, ssn, 1); if (p1 == NULL) { return 1; } PKT_SET_SRC(p1, PKT_SRC_FFR_V2); if (server == 2) { p2 = FlowForceReassemblyPseudoPacketGet(1, f, ssn, 1); if (p2 == NULL) { FlowDeReference(&p1->flow); TmqhOutputPacketpool(NULL, p1); return 1; } PKT_SET_SRC(p2, PKT_SRC_FFR_V2); } } } else { if (server == 1) { p1 = FlowForceReassemblyPseudoPacketGet(0, f, ssn, 0); if (p1 == NULL) { return 1; } PKT_SET_SRC(p1, PKT_SRC_FFR_V2); p2 = FlowForceReassemblyPseudoPacketGet(1, f, ssn, 1); if (p2 == NULL) { FlowDeReference(&p1->flow); TmqhOutputPacketpool(NULL, p1); return 1; } PKT_SET_SRC(p2, PKT_SRC_FFR_V2); } else if (server == 2) { p1 = FlowForceReassemblyPseudoPacketGet(1, f, ssn, 1); if (p1 == NULL) { return 1; } PKT_SET_SRC(p1, PKT_SRC_FFR_V2); } else { /* impossible */ BUG_ON(1); } } f->flags |= FLOW_TIMEOUT_REASSEMBLY_DONE; SCMutexLock(&stream_pseudo_pkt_decode_tm_slot->slot_post_pq.mutex_q); PacketEnqueue(&stream_pseudo_pkt_decode_tm_slot->slot_post_pq, p1); if (p2 != NULL) PacketEnqueue(&stream_pseudo_pkt_decode_tm_slot->slot_post_pq, p2); if (p3 != NULL) PacketEnqueue(&stream_pseudo_pkt_decode_tm_slot->slot_post_pq, p3); SCMutexUnlock(&stream_pseudo_pkt_decode_tm_slot->slot_post_pq.mutex_q); if (stream_pseudo_pkt_decode_TV->inq != NULL) { SCCondSignal(&trans_q[stream_pseudo_pkt_decode_TV->inq->id].cond_q); } return 1; }
void TmThreadKillThreads(void) { ThreadVars *tv = NULL; int i = 0; for (i = 0; i < TVT_MAX; i++) { tv = tv_root[i]; while (tv) { TmThreadsSetFlag(tv, THV_KILL); SCLogDebug("told thread %s to stop", tv->name); if (tv->inq != NULL) { int i; //printf("TmThreadKillThreads: (t->inq->reader_cnt + t->inq->writer_cnt) %" PRIu32 "\n", (t->inq->reader_cnt + t->inq->writer_cnt)); /* make sure our packet pending counter doesn't block */ //SCCondSignal(&cond_pending); /* signal the queue for the number of users */ if (tv->InShutdownHandler != NULL) { tv->InShutdownHandler(tv); } for (i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++) { if (tv->inq->q_type == 0) SCCondSignal(&trans_q[tv->inq->id].cond_q); else SCCondSignal(&data_queues[tv->inq->id].cond_q); } /* to be sure, signal more */ int cnt = 0; while (1) { if (TmThreadsCheckFlag(tv, THV_CLOSED)) { SCLogDebug("signalled the thread %" PRId32 " times", cnt); break; } cnt++; if (tv->InShutdownHandler != NULL) { tv->InShutdownHandler(tv); } for (i = 0; i < (tv->inq->reader_cnt + tv->inq->writer_cnt); i++) { if (tv->inq->q_type == 0) SCCondSignal(&trans_q[tv->inq->id].cond_q); else SCCondSignal(&data_queues[tv->inq->id].cond_q); } usleep(100); } SCLogDebug("signalled tv->inq->id %" PRIu32 "", tv->inq->id); } if (tv->cond != NULL ) { int cnt = 0; while (1) { if (TmThreadsCheckFlag(tv, THV_CLOSED)) { SCLogDebug("signalled the thread %" PRId32 " times", cnt); break; } cnt++; pthread_cond_broadcast(tv->cond); usleep(100); } } /* join it */ pthread_join(tv->t, NULL); SCLogDebug("thread %s stopped", tv->name); tv = tv->next; } } }