示例#1
0
/** \brief check the ringbuffer is full (no more data will fit)
 *
 *  \param rb ringbuffer
 *
 *  \retval 1 empty
 *  \retval 0 not empty
 */
int RingBuffer8IsFull(RingBuffer8 *rb) {
    if ((unsigned char)(SC_ATOMIC_GET(rb->write) + 1) == SC_ATOMIC_GET(rb->read)) {
        return 1;
    }

    return 0;
}
示例#2
0
static int RingBuffer8SrSwInit01 (void) {
    int result = 0;

    RingBuffer8 *rb = NULL;

    rb = RingBuffer8Init();
    if (rb == NULL) {
        printf("rb == NULL: ");
        goto end;
    }

    int r = SCSpinLock(&rb->spin);
    if (r != 0) {
        printf("r = %d, expected %d: ", r, 0);
        goto end;
    }
    SCSpinUnlock(&rb->spin);

    if (SC_ATOMIC_GET(rb->read) != 0) {
        printf("read %u, expected 0: ", SC_ATOMIC_GET(rb->read));
        goto end;
    }

    if (SC_ATOMIC_GET(rb->write) != 0) {
        printf("write %u, expected 0: ", SC_ATOMIC_GET(rb->write));
        goto end;
    }

    result = 1;
end:
    if (rb != NULL) {
        RingBuffer8Destroy(rb);
    }
    return result;
}
示例#3
0
static int SCAtomicTest01(void)
{
    int result = 0;
    int a = 10;
    int b = 20;
    int *temp_int = NULL;

    SC_ATOMIC_DECL_AND_INIT(void *, temp);

    temp_int = SC_ATOMIC_GET(temp);
    if (temp_int != NULL)
        goto end;

    (void)SC_ATOMIC_SET(temp, &a);
    temp_int = SC_ATOMIC_GET(temp);
    if (temp_int == NULL)
        goto end;
    if (*temp_int != a)
        goto end;

    (void)SC_ATOMIC_SET(temp, &b);
    temp_int = SC_ATOMIC_GET(temp);
    if (temp_int == NULL)
        goto end;
    if (*temp_int != b)
        goto end;

    result = 1;

 end:
    return result;
}
示例#4
0
static void PcapCallbackLoop(char *user, struct pcap_pkthdr *h, u_char *pkt)
{
    SCEnter();

    PcapThreadVars *ptv = (PcapThreadVars *)user;
    Packet *p = PacketGetFromQueueOrAlloc();
    struct timeval current_time;

    if (unlikely(p == NULL)) {
        SCReturn;
    }

    PKT_SET_SRC(p, PKT_SRC_WIRE);
    p->ts.tv_sec = h->ts.tv_sec;
    p->ts.tv_usec = h->ts.tv_usec;
    SCLogDebug("p->ts.tv_sec %"PRIuMAX"", (uintmax_t)p->ts.tv_sec);
    p->datalink = ptv->datalink;

    ptv->pkts++;
    ptv->bytes += h->caplen;
    (void) SC_ATOMIC_ADD(ptv->livedev->pkts, 1);
    p->livedev = ptv->livedev;

    if (unlikely(PacketCopyData(p, pkt, h->caplen))) {
        TmqhOutputPacketpool(ptv->tv, p);
        SCReturn;
    }

    switch (ptv->checksum_mode) {
        case CHECKSUM_VALIDATION_AUTO:
            if (ptv->livedev->ignore_checksum) {
                p->flags |= PKT_IGNORE_CHECKSUM;
            } else if (ChecksumAutoModeCheck(ptv->pkts,
                        SC_ATOMIC_GET(ptv->livedev->pkts),
                        SC_ATOMIC_GET(ptv->livedev->invalid_checksums))) {
                ptv->livedev->ignore_checksum = 1;
                p->flags |= PKT_IGNORE_CHECKSUM;
            }
            break;
        case CHECKSUM_VALIDATION_DISABLE:
            p->flags |= PKT_IGNORE_CHECKSUM;
            break;
        default:
            break;
    }

    if (TmThreadsSlotProcessPkt(ptv->tv, ptv->slot, p) != TM_ECODE_OK) {
        pcap_breakloop(ptv->pcap_handle);
        ptv->cb_result = TM_ECODE_FAILED;
    }

    /* Trigger one dump of stats every second */
    TimeGet(&current_time);
    if (current_time.tv_sec != ptv->last_stats_dump) {
        PcapDumpCounters(ptv);
        ptv->last_stats_dump = current_time.tv_sec;
    }

    SCReturn;
}
示例#5
0
/** \brief check the ringbuffer is full (no more data will fit)
 *
 *  \param rb ringbuffer
 *
 *  \retval 1 empty
 *  \retval 0 not empty
 */
int RingBufferIsFull(RingBuffer16 *rb) {
    if ((unsigned short)(SC_ATOMIC_GET(rb->write) + 1) == SC_ATOMIC_GET(rb->read)) {
        return 1;
    }

    return 0;
}
示例#6
0
/**
 *  \brief get the next ptr from the ring buffer
 *
 *  Because we allow for multiple readers we take great care in making sure
 *  that the threads don't interfere with one another.
 *
 *  This version does NOT enter a wait if the buffer is empty loop.
 *
 *  \retval ptr pointer to the data, or NULL if buffer is empty
 */
void *RingBufferMrMwGetNoWait(RingBuffer16 *rb) {
    void *ptr;
    /** local pointer for data races. If SCAtomicCompareAndSwap (CAS)
     *  fails we increase our local array idx to try the next array member
     *  until we succeed. Or when the buffer is empty again we jump back
     *  to the waiting loop. */
    unsigned short readp;

    /* buffer is empty, wait... */
retry:
    while (SC_ATOMIC_GET(rb->write) == SC_ATOMIC_GET(rb->read)) {
        /* break if buffer is empty */
        return NULL;
    }

    /* atomically update rb->read */
    readp = SC_ATOMIC_GET(rb->read) - 1;
    do {
        /* with multiple readers we can get in the situation that we exitted
         * from the wait loop but the rb is empty again once we get here. */
        if (SC_ATOMIC_GET(rb->write) == SC_ATOMIC_GET(rb->read))
            goto retry;

        readp++;
        ptr = rb->array[readp];
    } while (!(SC_ATOMIC_CAS(&rb->read, readp, (readp + 1))));

    SCLogDebug("ptr %p", ptr);

#ifdef RINGBUFFER_MUTEX_WAIT
    SCCondSignal(&rb->wait_cond);
#endif
    return ptr;
}
示例#7
0
/** \brief check the ringbuffer is empty (no data in it)
 *
 *  \param rb ringbuffer
 *
 *  \retval 1 empty
 *  \retval 0 not empty
 */
int RingBufferIsEmpty(RingBuffer16 *rb) {
    if (SC_ATOMIC_GET(rb->write) == SC_ATOMIC_GET(rb->read)) {
        return 1;
    }

    return 0;
}
示例#8
0
/** \internal
 *  \brief Get a flow from the hash directly.
 *
 *  Called in conditions where the spare queue is empty and memcap is reached.
 *
 *  Walks the hash until a flow can be freed. Timeouts are disregarded, use_cnt
 *  is adhered to. "flow_prune_idx" atomic int makes sure we don't start at the
 *  top each time since that would clear the top of the hash leading to longer
 *  and longer search times under high pressure (observed).
 *
 *  \retval f flow or NULL
 */
static Flow *FlowGetUsedFlow(void)
{
    uint32_t idx = SC_ATOMIC_GET(flow_prune_idx) % flow_config.hash_size;
    uint32_t cnt = flow_config.hash_size;

    while (cnt--) {
        if (++idx >= flow_config.hash_size)
            idx = 0;

        FlowBucket *fb = &flow_hash[idx];

        if (FBLOCK_TRYLOCK(fb) != 0)
            continue;

        Flow *f = fb->tail;
        if (f == NULL) {
            FBLOCK_UNLOCK(fb);
            continue;
        }

        if (FLOWLOCK_TRYWRLOCK(f) != 0) {
            FBLOCK_UNLOCK(fb);
            continue;
        }

        /** never prune a flow that is used by a packet or stream msg
         *  we are currently processing in one of the threads */
        if (SC_ATOMIC_GET(f->use_cnt) > 0) {
            FBLOCK_UNLOCK(fb);
            FLOWLOCK_UNLOCK(f);
            continue;
        }

        /* remove from the hash */
        if (f->hprev != NULL)
            f->hprev->hnext = f->hnext;
        if (f->hnext != NULL)
            f->hnext->hprev = f->hprev;
        if (fb->head == f)
            fb->head = f->hnext;
        if (fb->tail == f)
            fb->tail = f->hprev;

        f->hnext = NULL;
        f->hprev = NULL;
        f->fb = NULL;
        FBLOCK_UNLOCK(fb);

        FlowClearMemory(f, f->protomap);

        FLOWLOCK_UNLOCK(f);

        (void) SC_ATOMIC_ADD(flow_prune_idx, (flow_config.hash_size - cnt));
        return f;
    }

    return NULL;
}
示例#9
0
/** \internal
 *  \brief Get a host from the hash directly.
 *
 *  Called in conditions where the spare queue is empty and memcap is reached.
 *
 *  Walks the hash until a host can be freed. "host_prune_idx" atomic int makes
 *  sure we don't start at the top each time since that would clear the top of
 *  the hash leading to longer and longer search times under high pressure (observed).
 *
 *  \retval h host or NULL
 */
static Host *HostGetUsedHost(void) {
    uint32_t idx = SC_ATOMIC_GET(host_prune_idx) % host_config.hash_size;
    uint32_t cnt = host_config.hash_size;

    while (cnt--) {
        if (++idx >= host_config.hash_size)
            idx = 0;

        HostHashRow *hb = &host_hash[idx];
        if (hb == NULL)
            continue;

        if (HRLOCK_TRYLOCK(hb) != 0)
            continue;

        Host *h = hb->tail;
        if (h == NULL) {
            HRLOCK_UNLOCK(hb);
            continue;
        }

        if (SCMutexTrylock(&h->m) != 0) {
            HRLOCK_UNLOCK(hb);
            continue;
        }

        /** never prune a host that is used by a packets
         *  we are currently processing in one of the threads */
        if (SC_ATOMIC_GET(h->use_cnt) > 0) {
            HRLOCK_UNLOCK(hb);
            SCMutexUnlock(&h->m);
            continue;
        }

        /* remove from the hash */
        if (h->hprev != NULL)
            h->hprev->hnext = h->hnext;
        if (h->hnext != NULL)
            h->hnext->hprev = h->hprev;
        if (hb->head == h)
            hb->head = h->hnext;
        if (hb->tail == h)
            hb->tail = h->hprev;

        h->hnext = NULL;
        h->hprev = NULL;
        HRLOCK_UNLOCK(hb);

        HostClearMemory (h);

        SCMutexUnlock(&h->m);

        (void) SC_ATOMIC_ADD(host_prune_idx, (host_config.hash_size - cnt));
        return h;
    }

    return NULL;
}
示例#10
0
/** \internal
 *  \brief Get a tracker from the hash directly.
 *
 *  Called in conditions where the spare queue is empty and memcap is reached.
 *
 *  Walks the hash until a tracker can be freed. "defragtracker_prune_idx" atomic int makes
 *  sure we don't start at the top each time since that would clear the top of
 *  the hash leading to longer and longer search times under high pressure (observed).
 *
 *  \retval dt tracker or NULL
 */
static DefragTracker *DefragTrackerGetUsedDefragTracker(void)
{
    uint32_t idx = SC_ATOMIC_GET(defragtracker_prune_idx) % defrag_config.hash_size;
    uint32_t cnt = defrag_config.hash_size;

    while (cnt--) {
        if (++idx >= defrag_config.hash_size)
            idx = 0;

        DefragTrackerHashRow *hb = &defragtracker_hash[idx];

        if (DRLOCK_TRYLOCK(hb) != 0)
            continue;

        DefragTracker *dt = hb->tail;
        if (dt == NULL) {
            DRLOCK_UNLOCK(hb);
            continue;
        }

        if (SCMutexTrylock(&dt->lock) != 0) {
            DRLOCK_UNLOCK(hb);
            continue;
        }

        /** never prune a tracker that is used by a packets
         *  we are currently processing in one of the threads */
        if (SC_ATOMIC_GET(dt->use_cnt) > 0) {
            DRLOCK_UNLOCK(hb);
            SCMutexUnlock(&dt->lock);
            continue;
        }

        /* remove from the hash */
        if (dt->hprev != NULL)
            dt->hprev->hnext = dt->hnext;
        if (dt->hnext != NULL)
            dt->hnext->hprev = dt->hprev;
        if (hb->head == dt)
            hb->head = dt->hnext;
        if (hb->tail == dt)
            hb->tail = dt->hprev;

        dt->hnext = NULL;
        dt->hprev = NULL;
        DRLOCK_UNLOCK(hb);

        DefragTrackerClearMemory(dt);

        SCMutexUnlock(&dt->lock);

        (void) SC_ATOMIC_ADD(defragtracker_prune_idx, (defrag_config.hash_size - cnt));
        return dt;
    }

    return NULL;
}
示例#11
0
/**
 * \brief Pfring Packet Process function.
 *
 * This function fills in our packet structure from libpfring.
 * From here the packets are picked up by the  DecodePfring thread.
 *
 * \param user pointer to PfringThreadVars
 * \param h pointer to pfring packet header
 * \param p pointer to the current packet
 */
static inline void PfringProcessPacket(void *user, struct pfring_pkthdr *h, Packet *p)
{

    PfringThreadVars *ptv = (PfringThreadVars *)user;

    ptv->bytes += h->caplen;
    ptv->pkts++;
    p->livedev = ptv->livedev;

    /* PF_RING may fail to set timestamp */
    if (h->ts.tv_sec == 0) {
        gettimeofday((struct timeval *)&h->ts, NULL);
    }

    p->ts.tv_sec = h->ts.tv_sec;
    p->ts.tv_usec = h->ts.tv_usec;

    /* PF_RING all packets are marked as a link type of ethernet
     * so that is what we do here. */
    p->datalink = LINKTYPE_ETHERNET;

    /* get vlan id from header. Check on vlan_id not null even if comment in
     * header announce NO_VLAN is used when there is no VLAN. But NO_VLAN
     * is not defined nor used in PF_RING code. And vlan_id is set to 0
     * in PF_RING kernel code when there is no VLAN. */
    if ((!ptv->vlan_disabled) && h->extended_hdr.parsed_pkt.vlan_id) {
        p->vlan_id[0] = h->extended_hdr.parsed_pkt.vlan_id & 0x0fff;
        p->vlan_idx = 1;
        p->vlanh[0] = NULL;
    }

    switch (ptv->checksum_mode) {
        case CHECKSUM_VALIDATION_RXONLY:
            if (h->extended_hdr.rx_direction == 0) {
                p->flags |= PKT_IGNORE_CHECKSUM;
            }
            break;
        case CHECKSUM_VALIDATION_DISABLE:
            p->flags |= PKT_IGNORE_CHECKSUM;
            break;
        case CHECKSUM_VALIDATION_AUTO:
            if (ptv->livedev->ignore_checksum) {
                p->flags |= PKT_IGNORE_CHECKSUM;
            } else if (ChecksumAutoModeCheck(ptv->pkts,
                        SC_ATOMIC_GET(ptv->livedev->pkts),
                        SC_ATOMIC_GET(ptv->livedev->invalid_checksums))) {
                ptv->livedev->ignore_checksum = 1;
                p->flags |= PKT_IGNORE_CHECKSUM;
            }
            break;
        default:
            break;
    }

    SET_PKT_LEN(p, h->caplen);
}
示例#12
0
文件: ippair.c 项目: bmeeks8/suricata
/** \brief print some ippair stats
 *  \warning Not thread safe */
void IPPairPrintStats (void)
{
#ifdef IPPAIRBITS_STATS
    SCLogPerf("ippairbits added: %" PRIu32 ", removed: %" PRIu32 ", max memory usage: %" PRIu32 "",
        ippairbits_added, ippairbits_removed, ippairbits_memuse_max);
#endif /* IPPAIRBITS_STATS */
    SCLogPerf("ippair memory usage: %"PRIu64" bytes, maximum: %"PRIu64,
            SC_ATOMIC_GET(ippair_memuse), SC_ATOMIC_GET(ippair_config.memcap));
    return;
}
示例#13
0
/**
 * \brief Search tags for src and dst. Update entries of the tag, remove if necessary
 *
 * \param de_ctx Detect context
 * \param det_ctx Detect thread context
 * \param p packet
 *
 */
void TagHandlePacket(DetectEngineCtx *de_ctx,
        DetectEngineThreadCtx *det_ctx, Packet *p)
{
    /* If there's no tag, get out of here */
    unsigned int current_tags = SC_ATOMIC_GET(num_tags);
    if (current_tags == 0)
        return;

    /* First update and get session tags */
    if (p->flow != NULL) {
        FLOWLOCK_WRLOCK(p->flow);
        TagHandlePacketFlow(p->flow, p);
        FLOWLOCK_UNLOCK(p->flow);
    }

    Host *src = HostLookupHostFromHash(&p->src);
    if (src) {
        if (src->tag != NULL) {
            TagHandlePacketHost(src,p);
        }
        HostRelease(src);
    }
    Host *dst = HostLookupHostFromHash(&p->dst);
    if (dst) {
        if (dst->tag != NULL) {
            TagHandlePacketHost(dst,p);
        }
        HostRelease(dst);
    }
}
示例#14
0
/**
 * \brief select the queue to output in a round robin fashion.
 *
 * \param tv thread vars
 * \param p packet
 */
void TmqhOutputFlowRoundRobin(ThreadVars *tv, Packet *p)
{
    int32_t qid = 0;

    TmqhFlowCtx *ctx = (TmqhFlowCtx *)tv->outctx;

    /* if no flow we use the first queue,
     * should be rare */
    if (p->flow != NULL) {
        qid = SC_ATOMIC_GET(p->flow->autofp_tmqh_flow_qid);
        if (qid == -1) {
            qid = SC_ATOMIC_ADD(ctx->round_robin_idx, 1);
            if (qid >= ctx->size) {
                SC_ATOMIC_RESET(ctx->round_robin_idx);
                qid = 0;
            }
            (void) SC_ATOMIC_ADD(ctx->queues[qid].total_flows, 1);
            (void) SC_ATOMIC_SET(p->flow->autofp_tmqh_flow_qid, qid);
        }
    } else {
        qid = ctx->last++;

        if (ctx->last == ctx->size)
            ctx->last = 0;
    }
    (void) SC_ATOMIC_ADD(ctx->queues[qid].total_packets, 1);

    PacketQueue *q = ctx->queues[qid].q;
    SCMutexLock(&q->mutex_q);
    PacketEnqueue(q, p);
    SCCondSignal(&q->cond_q);
    SCMutexUnlock(&q->mutex_q);

    return;
}
示例#15
0
/**
 *  \brief Check if alloc'ing "size" would mean we're over memcap
 *
 *  \retval 1 if in bounds
 *  \retval 0 if not in bounds
 */
int HTPCheckMemcap(uint64_t size)
{
    if (htp_config_memcap == 0 || size + SC_ATOMIC_GET(htp_memuse) <= htp_config_memcap)
        return 1;
    (void) SC_ATOMIC_ADD(htp_memcap, 1);
    return 0;
}
示例#16
0
/** \brief Return packet to Packet pool
 *
 */
void PacketPoolReturnPacket(Packet *p)
{
    PktPool *my_pool = GetThreadPacketPool();

    PACKET_RELEASE_REFS(p);

    PktPool *pool = p->pool;
    if (pool == NULL) {
        PacketFree(p);
        return;
    }
#ifdef DEBUG_VALIDATION
    BUG_ON(pool->initialized == 0);
    BUG_ON(pool->destroyed == 1);
    BUG_ON(my_pool->initialized == 0);
    BUG_ON(my_pool->destroyed == 1);
#endif /* DEBUG_VALIDATION */

    if (pool == my_pool) {
        /* Push back onto this thread's own stack, so no locking. */
        p->next = my_pool->head;
        my_pool->head = p;
    } else {
        PktPool *pending_pool = my_pool->pending_pool;
        if (pending_pool == NULL) {
            /* No pending packet, so store the current packet. */
            my_pool->pending_pool = pool;
            my_pool->pending_head = p;
            my_pool->pending_tail = p;
            my_pool->pending_count = 1;
        } else if (pending_pool == pool) {
            /* Another packet for the pending pool list. */
            p->next = my_pool->pending_head;
            my_pool->pending_head = p;
            my_pool->pending_count++;
            if (SC_ATOMIC_GET(pool->return_stack.sync_now) || my_pool->pending_count > MAX_PENDING_RETURN_PACKETS) {
                /* Return the entire list of pending packets. */
                SCMutexLock(&pool->return_stack.mutex);
                my_pool->pending_tail->next = pool->return_stack.head;
                pool->return_stack.head = my_pool->pending_head;
                SC_ATOMIC_RESET(pool->return_stack.sync_now);
                SCMutexUnlock(&pool->return_stack.mutex);
                SCCondSignal(&pool->return_stack.cond);
                /* Clear the list of pending packets to return. */
                my_pool->pending_pool = NULL;
                my_pool->pending_head = NULL;
                my_pool->pending_tail = NULL;
                my_pool->pending_count = 0;
            }
        } else {
            /* Push onto return stack for this pool */
            SCMutexLock(&pool->return_stack.mutex);
            p->next = pool->return_stack.head;
            pool->return_stack.head = p;
            SC_ATOMIC_RESET(pool->return_stack.sync_now);
            SCMutexUnlock(&pool->return_stack.mutex);
            SCCondSignal(&pool->return_stack.cond);
        }
    }
}
示例#17
0
/** \internal
 *  \brief See if we can really discard this host. Check use_cnt reference.
 *
 *  \param h host
 *  \param ts timestamp
 *
 *  \retval 0 not timed out just yet
 *  \retval 1 fully timed out, lets kill it
 */
static int HostHostTimedOut(Host *h, struct timeval *ts) {
    int tags = 0;
    int thresholds = 0;

    /** never prune a host that is used by a packet
     *  we are currently processing in one of the threads */
    if (SC_ATOMIC_GET(h->use_cnt) > 0) {
        return 0;
    }

    if (h->iprep) {
        if (SRepHostTimedOut(h) == 0)
            return 0;

        SCLogDebug("host %p reputation timed out", h);
    }

    if (h->tag && TagTimeoutCheck(h, ts) == 0) {
        tags = 1;
    }
    if (h->threshold && ThresholdTimeoutCheck(h, ts) == 0) {
        thresholds = 1;
    }

    if (tags || thresholds)
        return 0;

    SCLogDebug("host %p timed out", h);
    return 1;
}
示例#18
0
/**
 * \brief Destroy tag context hash tables
 *
 * \param tag_ctx Tag Context
 *
 */
void TagDestroyCtx(void)
{
#ifdef DEBUG
    BUG_ON(SC_ATOMIC_GET(num_tags) != 0);
#endif
    SC_ATOMIC_DESTROY(num_tags);
}
示例#19
0
static TmEcode FlowWorkerThreadDeinit(ThreadVars *tv, void *data)
{
    FlowWorkerThreadData *fw = data;

    DecodeThreadVarsFree(tv, fw->dtv);

    /* free TCP */
    StreamTcpThreadDeinit(tv, (void *)fw->stream_thread);

    /* free DETECT */
    void *detect_thread = SC_ATOMIC_GET(fw->detect_thread);
    if (detect_thread != NULL) {
        DetectEngineThreadCtxDeinit(tv, detect_thread);
        SC_ATOMIC_SET(fw->detect_thread, NULL);
    }

    /* Free output. */
    OutputLoggerThreadDeinit(tv, fw->output_thread);

    /* free pq */
    BUG_ON(fw->pq.len);
    SCMutexDestroy(&fw->pq.mutex_q);

    SC_ATOMIC_DESTROY(fw->detect_thread);
    SCFree(fw);
    return TM_ECODE_OK;
}
示例#20
0
static int RingBuffer8SrSwPut02 (void) {
    int result = 0;
    RingBuffer8 *rb = NULL;

    int array[255];
    int cnt = 0;
    for (cnt = 0; cnt < 255; cnt++) {
        array[cnt] = cnt;
    }

    rb = RingBuffer8Init();
    if (rb == NULL) {
        printf("rb == NULL: ");
        goto end;
    }

    for (cnt = 0; cnt < 255; cnt++) {
        RingBufferSrSw8Put(rb, (void *)&array[cnt]);

        if (SC_ATOMIC_GET(rb->read) != 0) {
            printf("read %u, expected 0: ", SC_ATOMIC_GET(rb->read));
            goto end;
        }

        if (SC_ATOMIC_GET(rb->write) != (unsigned char)(cnt+1)) {
            printf("write %u, expected %u: ", SC_ATOMIC_GET(rb->write), (unsigned char)(cnt+1));
            goto end;
        }

        if (rb->array[cnt] != (void *)&array[cnt]) {
            printf("ptr is %p, expected %p: ", rb->array[cnt], (void *)&array[cnt]);
            goto end;
        }
    }

    if (!(RingBuffer8IsFull(rb))) {
        printf("ringbuffer should be full, isn't: ");
        goto end;
    }

    result = 1;
end:
    if (rb != NULL) {
        RingBuffer8Destroy(rb);
    }
    return result;
}
示例#21
0
文件: flow-hash.c 项目: norg/suricata
/**
 *  \brief Get a new flow
 *
 *  Get a new flow. We're checking memcap first and will try to make room
 *  if the memcap is reached.
 *
 *  \param tv thread vars
 *  \param dtv decode thread vars (for flow log api thread data)
 *
 *  \retval f *LOCKED* flow on succes, NULL on error.
 */
static Flow *FlowGetNew(ThreadVars *tv, DecodeThreadVars *dtv, const Packet *p)
{
    Flow *f = NULL;

    if (FlowCreateCheck(p) == 0) {
        return NULL;
    }

    /* get a flow from the spare queue */
    f = FlowDequeue(&flow_spare_q);
    if (f == NULL) {
        /* If we reached the max memcap, we get a used flow */
        if (!(FLOW_CHECK_MEMCAP(sizeof(Flow) + FlowStorageSize()))) {
            /* declare state of emergency */
            if (!(SC_ATOMIC_GET(flow_flags) & FLOW_EMERGENCY)) {
                SC_ATOMIC_OR(flow_flags, FLOW_EMERGENCY);

                FlowTimeoutsEmergency();

                /* under high load, waking up the flow mgr each time leads
                 * to high cpu usage. Flows are not timed out much faster if
                 * we check a 1000 times a second. */
                FlowWakeupFlowManagerThread();
            }

            f = FlowGetUsedFlow(tv, dtv);
            if (f == NULL) {
                /* max memcap reached, so increments the counter */
                if (tv != NULL && dtv != NULL) {
                    StatsIncr(tv, dtv->counter_flow_memcap);
                }

                /* very rare, but we can fail. Just giving up */
                return NULL;
            }

            /* freed a flow, but it's unlocked */
        } else {
            /* now see if we can alloc a new flow */
            f = FlowAlloc();
            if (f == NULL) {
                if (tv != NULL && dtv != NULL) {
                    StatsIncr(tv, dtv->counter_flow_memcap);
                }
                return NULL;
            }

            /* flow is initialized but *unlocked* */
        }
    } else {
        /* flow has been recycled before it went into the spare queue */

        /* flow is initialized (recylced) but *unlocked* */
    }

    FLOWLOCK_WRLOCK(f);
    FlowUpdateCounter(tv, dtv, p->proto);
    return f;
}
示例#22
0
/**
 * \brief Pfring Packet Process function.
 *
 * This function fills in our packet structure from libpfring.
 * From here the packets are picked up by the  DecodePfring thread.
 *
 * \param user pointer to PfringThreadVars
 * \param h pointer to pfring packet header
 * \param p pointer to the current packet
 */
static inline void PfringProcessPacket(void *user, struct pfring_pkthdr *h, Packet *p) {

    PfringThreadVars *ptv = (PfringThreadVars *)user;

    ptv->bytes += h->caplen;
    ptv->pkts++;
    p->livedev = ptv->livedev;

    /* PF_RING may fail to set timestamp */
    if (h->ts.tv_sec == 0) {
        gettimeofday((struct timeval *)&h->ts, NULL);
    }

    p->ts.tv_sec = h->ts.tv_sec;
    p->ts.tv_usec = h->ts.tv_usec;

    /* PF_RING all packets are marked as a link type of ethernet
     * so that is what we do here. */
    p->datalink = LINKTYPE_ETHERNET;

    switch (ptv->checksum_mode) {
        case CHECKSUM_VALIDATION_RXONLY:
            if (h->extended_hdr.rx_direction == 0) {
                p->flags |= PKT_IGNORE_CHECKSUM;
            }
            break;
        case CHECKSUM_VALIDATION_DISABLE:
            p->flags |= PKT_IGNORE_CHECKSUM;
            break;
        case CHECKSUM_VALIDATION_AUTO:
            if (ptv->livedev->ignore_checksum) {
                p->flags |= PKT_IGNORE_CHECKSUM;
            } else if (ChecksumAutoModeCheck(ptv->pkts,
                        SC_ATOMIC_GET(ptv->livedev->pkts),
                        SC_ATOMIC_GET(ptv->livedev->invalid_checksums))) {
                ptv->livedev->ignore_checksum = 1;
                p->flags |= PKT_IGNORE_CHECKSUM;
            }
            break;
        default:
            break;
    }

    SET_PKT_LEN(p, h->caplen);
}
示例#23
0
int RingBufferSrSwPut(RingBuffer16 *rb, void *ptr) {
    /* buffer is full, wait... */
    while ((unsigned short)(SC_ATOMIC_GET(rb->write) + 1) == SC_ATOMIC_GET(rb->read)) {
        /* break out if the engine wants to shutdown */
        if (rb->shutdown != 0)
            return -1;

        RingBufferDoWait(rb);
    }

    rb->array[SC_ATOMIC_GET(rb->write)] = ptr;
    (void) SC_ATOMIC_ADD(rb->write, 1);

#ifdef RINGBUFFER_MUTEX_WAIT
    SCCondSignal(&rb->wait_cond);
#endif
    return 0;
}
示例#24
0
文件: ippair.c 项目: bmeeks8/suricata
/**
 *  \brief Update memcap value
 *
 *  \param size new memcap value
 */
int IPPairSetMemcap(uint64_t size)
{
    if ((uint64_t)SC_ATOMIC_GET(ippair_memuse) < size) {
        SC_ATOMIC_SET(ippair_config.memcap, size);
        return 1;
    }

    return 0;
}
示例#25
0
/**
 * Function doing a lookup in expectation list and updating Flow if needed.
 *
 * This function lookup for a existing expectation that could match the Flow.
 * If found and if the expectation contains data it store the data in the
 * expectation storage of the Flow.
 *
 * \return an AppProto value if found
 * \return ALPROTO_UNKNOWN if not found
 */
AppProto AppLayerExpectationHandle(Flow *f, int direction)
{
    AppProto alproto = ALPROTO_UNKNOWN;
    IPPair *ipp = NULL;
    Expectation *lexp = NULL;
    Expectation *pexp = NULL;

    int x = SC_ATOMIC_GET(expectation_count);
    if (x == 0) {
        return ALPROTO_UNKNOWN;
    }

    /* Call will take reference of the ip pair in 'ipp' */
    Expectation *exp = AppLayerExpectationLookup(f, direction, &ipp);
    if (exp == NULL)
        goto out;

    time_t ctime = f->lastts.tv_sec;

    pexp = NULL;
    while (exp) {
        lexp = exp->next;
        if ( (exp->direction & direction) &&
             ((exp->sp == 0) || (exp->sp == f->sp)) &&
             ((exp->dp == 0) || (exp->dp == f->dp))) {
            alproto = exp->alproto;
            f->alproto_ts = alproto;
            f->alproto_tc = alproto;
            void *fdata = FlowGetStorageById(f, g_expectation_id);
            if (fdata) {
                /* We already have an expectation so let's clean this one */
                ExpectationDataFree(exp->data);
            } else {
                /* Transfer ownership of Expectation data to the Flow */
                if (FlowSetStorageById(f, g_expectation_data_id, exp->data) != 0) {
                    SCLogDebug("Unable to set flow storage");
                }
            }
            exp->data = NULL;
            exp = RemoveExpectationAndGetNext(ipp, pexp, exp, lexp);
            continue;
        }
        /* Cleaning remove old entries */
        if (exp && (ctime > exp->ts.tv_sec + EXPECTATION_TIMEOUT)) {
            exp = RemoveExpectationAndGetNext(ipp, pexp, exp, lexp);
            continue;
        }
        pexp = exp;
        exp = lexp;
    }

out:
    if (ipp)
        IPPairRelease(ipp);
    return alproto;
}
示例#26
0
void TmqhOutputFlowFreeCtx(void *ctx)
{
    int i;
    TmqhFlowCtx *fctx = (TmqhFlowCtx *)ctx;

    SCLogInfo("AutoFP - Total flow handler queues - %" PRIu16,
              fctx->size);
    for (i = 0; i < fctx->size; i++) {
        SCLogInfo("AutoFP - Queue %-2"PRIu32 " - pkts: %-12"PRIu64" flows: %-12"PRIu64, i,
                SC_ATOMIC_GET(fctx->queues[i].total_packets),
                SC_ATOMIC_GET(fctx->queues[i].total_flows));
        SC_ATOMIC_DESTROY(fctx->queues[i].total_packets);
        SC_ATOMIC_DESTROY(fctx->queues[i].total_flows);
    }

    SCFree(fctx->queues);

    return;
}
示例#27
0
/**
 *  \internal
 *
 *  \brief Store the waldo file based on the file_id
 *
 *  \param path full path for the waldo file
 */
static void LogFilestoreLogStoreWaldo(const char *path) {
    char line[16] = "";

    if (SC_ATOMIC_GET(file_id) == 0) {
        SCReturn;
    }

    FILE *fp = fopen(path, "w");
    if (fp == NULL) {
        SCLogInfo("couldn't open waldo: %s", strerror(errno));
        SCReturn;
    }

    snprintf(line, sizeof(line), "%u\n", SC_ATOMIC_GET(file_id));
    if (fwrite(line, strlen(line), 1, fp) != 1) {
        SCLogError(SC_ERR_FWRITE, "fwrite failed: %s", strerror(errno));
    }
    fclose(fp);
}
示例#28
0
/** \brief print some host stats
 *  \warning Not thread safe */
void HostPrintStats (void)
{
#ifdef HOSTBITS_STATS
    SCLogInfo("hostbits added: %" PRIu32 ", removed: %" PRIu32 ", max memory usage: %" PRIu32 "",
        hostbits_added, hostbits_removed, hostbits_memuse_max);
#endif /* HOSTBITS_STATS */
    SCLogInfo("host memory usage: %llu bytes, maximum: %"PRIu64,
            SC_ATOMIC_GET(host_memuse), host_config.memcap);
    return;
}
示例#29
0
void *RingBufferSrSwGet(RingBuffer16 *rb) {
    void *ptr = NULL;

    /* buffer is empty, wait... */
    while (SC_ATOMIC_GET(rb->write) == SC_ATOMIC_GET(rb->read)) {
        /* break out if the engine wants to shutdown */
        if (rb->shutdown != 0)
            return NULL;

        RingBufferDoWait(rb);
    }

    ptr = rb->array[SC_ATOMIC_GET(rb->read)];
    (void) SC_ATOMIC_ADD(rb->read, 1);

#ifdef RINGBUFFER_MUTEX_WAIT
    SCCondSignal(&rb->wait_cond);
#endif
    return ptr;
}
示例#30
0
void ReceivePcapFileThreadExitStats(ThreadVars *tv, void *data) {
    SCEnter();
    PcapFileThreadVars *ptv = (PcapFileThreadVars *)data;

    if (pcap_g.conf_checksum_mode == CHECKSUM_VALIDATION_AUTO &&
            pcap_g.cnt < CHECKSUM_SAMPLE_COUNT &&
            SC_ATOMIC_GET(pcap_g.invalid_checksums)) {
        uint64_t chrate = pcap_g.cnt / SC_ATOMIC_GET(pcap_g.invalid_checksums);
        if (chrate < CHECKSUM_INVALID_RATIO)
            SCLogWarning(SC_ERR_INVALID_CHECKSUM,
                         "1/%" PRIu64 "th of packets have an invalid checksum,"
                         " consider setting pcap-file.checksum-checks variable to no"
                         " or use '-k none' option on command line.",
                         chrate);
        else
            SCLogInfo("1/%" PRIu64 "th of packets have an invalid checksum",
                      chrate);
    }
    SCLogNotice("Pcap-file module read %" PRIu32 " packets, %" PRIu64 " bytes", ptv->pkts, ptv->bytes);
    return;
}