Exemplo n.º 1
0
/**
 * \brief select the queue to output in a round robin fashion.
 *
 * \param tv thread vars
 * \param p packet
 */
void TmqhOutputFlowRoundRobin(ThreadVars *tv, Packet *p)
{
    int32_t qid = 0;

    TmqhFlowCtx *ctx = (TmqhFlowCtx *)tv->outctx;

    /* if no flow we use the first queue,
     * should be rare */
    if (p->flow != NULL) {
        qid = SC_ATOMIC_GET(p->flow->autofp_tmqh_flow_qid);
        if (qid == -1) {
            qid = SC_ATOMIC_ADD(ctx->round_robin_idx, 1);
            if (qid >= ctx->size) {
                SC_ATOMIC_RESET(ctx->round_robin_idx);
                qid = 0;
            }
            (void) SC_ATOMIC_ADD(ctx->queues[qid].total_flows, 1);
            (void) SC_ATOMIC_SET(p->flow->autofp_tmqh_flow_qid, qid);
        }
    } else {
        qid = ctx->last++;

        if (ctx->last == ctx->size)
            ctx->last = 0;
    }
    (void) SC_ATOMIC_ADD(ctx->queues[qid].total_packets, 1);

    PacketQueue *q = ctx->queues[qid].q;
    SCMutexLock(&q->mutex_q);
    PacketEnqueue(q, p);
    SCCondSignal(&q->cond_q);
    SCMutexUnlock(&q->mutex_q);

    return;
}
Exemplo n.º 2
0
static int NFQCallBack(struct nfq_q_handle *qh, struct nfgenmsg *nfmsg,
                       struct nfq_data *nfa, void *data)
{
    NFQThreadVars *ntv = (NFQThreadVars *)data;
    ThreadVars *tv = ntv->tv;
    int ret;

    /* grab a packet */
    Packet *p = PacketGetFromQueueOrAlloc();
    if (p == NULL) {
        return -1;
    }
    PKT_SET_SRC(p, PKT_SRC_WIRE);

    p->nfq_v.nfq_index = ntv->nfq_index;
    /* if bypass mask is set then we may want to bypass so set pointer */
    if (nfq_config.bypass_mask) {
        p->BypassPacketsFlow = NFQBypassCallback;
    }
    ret = NFQSetupPkt(p, qh, (void *)nfa);
    if (ret == -1) {
#ifdef COUNTERS
        NFQQueueVars *q = NFQGetQueue(ntv->nfq_index);
        q->errs++;
        q->pkts++;
        q->bytes += GET_PKT_LEN(p);
#endif /* COUNTERS */
        (void) SC_ATOMIC_ADD(ntv->livedev->pkts, 1);

        /* NFQSetupPkt is issuing a verdict
           so we only recycle Packet and leave */
        TmqhOutputPacketpool(tv, p);
        return 0;
    }

    p->ReleasePacket = NFQReleasePacket;

#ifdef COUNTERS
    NFQQueueVars *q = NFQGetQueue(ntv->nfq_index);
    q->pkts++;
    q->bytes += GET_PKT_LEN(p);
#endif /* COUNTERS */
    (void) SC_ATOMIC_ADD(ntv->livedev->pkts, 1);

    if (ntv->slot) {
        if (TmThreadsSlotProcessPkt(tv, ntv->slot, p) != TM_ECODE_OK) {
            TmqhOutputPacketpool(ntv->tv, p);
            return -1;
        }
    } else {
        /* pass on... */
        tv->tmqh_out(tv, p);
    }

    return 0;
}
Exemplo n.º 3
0
/**
* \brief extract information from config file
*
* The returned structure will be freed by the thread init function.
* This is thus necessary to or copy the structure before giving it
* to thread or to reparse the file for each thread (and thus have
* new structure.
*
* \return a NetmapIfaceConfig corresponding to the interface name
*/
static void *ParseNetmapConfig(const char *iface_name)
{
    ConfNode *if_root = NULL;
    ConfNode *if_default = NULL;
    ConfNode *netmap_node;
    char *out_iface = NULL;

    if (iface_name == NULL) {
        return NULL;
    }

    NetmapIfaceConfig *aconf = SCMalloc(sizeof(*aconf));
    if (unlikely(aconf == NULL)) {
        return NULL;
    }

    memset(aconf, 0, sizeof(*aconf));
    aconf->DerefFunc = NetmapDerefConfig;
    strlcpy(aconf->iface_name, iface_name, sizeof(aconf->iface_name));
    SC_ATOMIC_INIT(aconf->ref);
    (void) SC_ATOMIC_ADD(aconf->ref, 1);

    /* Find initial node */
    netmap_node = ConfGetNode("netmap");
    if (netmap_node == NULL) {
        SCLogInfo("Unable to find netmap config using default value");
    } else {
        if_root = ConfFindDeviceConfig(netmap_node, aconf->iface_name);
        if_default = ConfFindDeviceConfig(netmap_node, "default");
    }

    /* parse settings for capture iface */
    ParseNetmapSettings(&aconf->in, aconf->iface_name, if_root, if_default);

    /* if we have a copy iface, parse that as well */
    if (netmap_node != NULL) {
        if (ConfGetChildValueWithDefault(if_root, if_default, "copy-iface", &out_iface) == 1) {
            if (strlen(out_iface) > 0) {
                if_root = ConfFindDeviceConfig(netmap_node, out_iface);
                ParseNetmapSettings(&aconf->out, out_iface, if_root, if_default);
            }
        }
    }

    SC_ATOMIC_RESET(aconf->ref);
    (void) SC_ATOMIC_ADD(aconf->ref, aconf->in.threads);
    SCLogPerf("Using %d threads for interface %s", aconf->in.threads,
            aconf->iface_name);

    return aconf;
}
Exemplo n.º 4
0
static void PcapCallbackLoop(char *user, struct pcap_pkthdr *h, u_char *pkt)
{
    SCEnter();

    PcapThreadVars *ptv = (PcapThreadVars *)user;
    Packet *p = PacketGetFromQueueOrAlloc();
    struct timeval current_time;

    if (unlikely(p == NULL)) {
        SCReturn;
    }

    PKT_SET_SRC(p, PKT_SRC_WIRE);
    p->ts.tv_sec = h->ts.tv_sec;
    p->ts.tv_usec = h->ts.tv_usec;
    SCLogDebug("p->ts.tv_sec %"PRIuMAX"", (uintmax_t)p->ts.tv_sec);
    p->datalink = ptv->datalink;

    ptv->pkts++;
    ptv->bytes += h->caplen;
    (void) SC_ATOMIC_ADD(ptv->livedev->pkts, 1);
    p->livedev = ptv->livedev;

    if (unlikely(PacketCopyData(p, pkt, h->caplen))) {
        TmqhOutputPacketpool(ptv->tv, p);
        SCReturn;
    }

    switch (ptv->checksum_mode) {
        case CHECKSUM_VALIDATION_AUTO:
            if (ptv->livedev->ignore_checksum) {
                p->flags |= PKT_IGNORE_CHECKSUM;
            } else if (ChecksumAutoModeCheck(ptv->pkts,
                        SC_ATOMIC_GET(ptv->livedev->pkts),
                        SC_ATOMIC_GET(ptv->livedev->invalid_checksums))) {
                ptv->livedev->ignore_checksum = 1;
                p->flags |= PKT_IGNORE_CHECKSUM;
            }
            break;
        case CHECKSUM_VALIDATION_DISABLE:
            p->flags |= PKT_IGNORE_CHECKSUM;
            break;
        default:
            break;
    }

    if (TmThreadsSlotProcessPkt(ptv->tv, ptv->slot, p) != TM_ECODE_OK) {
        pcap_breakloop(ptv->pcap_handle);
        ptv->cb_result = TM_ECODE_FAILED;
    }

    /* Trigger one dump of stats every second */
    TimeGet(&current_time);
    if (current_time.tv_sec != ptv->last_stats_dump) {
        PcapDumpCounters(ptv);
        ptv->last_stats_dump = current_time.tv_sec;
    }

    SCReturn;
}
Exemplo n.º 5
0
/**
 *  \brief put a ptr in the RingBuffer.
 *
 *  As we support multiple writers we need to protect 2 things:
 *   1. writing the ptr to the array
 *   2. incrementing the rb->write idx
 *
 *  We can't do both at the same time in one atomic operation, so
 *  we need to (spin) lock it. We do increment rb->write atomically
 *  after that, so that we don't need to use the lock in our *Get
 *  function.
 *
 *  \param rb the ringbuffer
 *  \param ptr ptr to store
 *
 *  \retval 0 ok
 *  \retval -1 wait loop interrupted because of engine flags
 */
int RingBufferMrMwPut(RingBuffer16 *rb, void *ptr) {
    SCLogDebug("ptr %p", ptr);

    /* buffer is full, wait... */
retry:
    while ((unsigned short)(SC_ATOMIC_GET(rb->write) + 1) == SC_ATOMIC_GET(rb->read)) {
        /* break out if the engine wants to shutdown */
        if (rb->shutdown != 0)
            return -1;

        RingBufferDoWait(rb);
    }

    /* get our lock */
    SCSpinLock(&rb->spin);
    /* if while we got our lock the buffer changed, we need to retry */
    if ((unsigned short)(SC_ATOMIC_GET(rb->write) + 1) == SC_ATOMIC_GET(rb->read)) {
        SCSpinUnlock(&rb->spin);
        goto retry;
    }

    SCLogDebug("rb->write %u, ptr %p", SC_ATOMIC_GET(rb->write), ptr);

    /* update the ring buffer */
    rb->array[SC_ATOMIC_GET(rb->write)] = ptr;
    (void) SC_ATOMIC_ADD(rb->write, 1);
    SCSpinUnlock(&rb->spin);
    SCLogDebug("ptr %p, done", ptr);

#ifdef RINGBUFFER_MUTEX_WAIT
    SCCondSignal(&rb->wait_cond);
#endif
    return 0;
}
Exemplo n.º 6
0
/**
 *  \brief Check if alloc'ing "size" would mean we're over memcap
 *
 *  \retval 1 if in bounds
 *  \retval 0 if not in bounds
 */
int HTPCheckMemcap(uint64_t size)
{
    if (htp_config_memcap == 0 || size + SC_ATOMIC_GET(htp_memuse) <= htp_config_memcap)
        return 1;
    (void) SC_ATOMIC_ADD(htp_memcap, 1);
    return 0;
}
Exemplo n.º 7
0
static inline void PfringDumpCounters(PfringThreadVars *ptv)
{
    pfring_stat pfring_s;
    if (likely((pfring_stats(ptv->pd, &pfring_s) >= 0))) {
        /* pfring counter is per socket and is not cleared after read.
         * So to get the number of packet on the interface we can add
         * the newly seen packets and drops for this thread and add it
         * to the interface counter */
        uint64_t th_pkts = StatsGetLocalCounterValue(ptv->tv, ptv->capture_kernel_packets);
        uint64_t th_drops = StatsGetLocalCounterValue(ptv->tv, ptv->capture_kernel_drops);
        SC_ATOMIC_ADD(ptv->livedev->pkts, pfring_s.recv - th_pkts);
        SC_ATOMIC_ADD(ptv->livedev->drop, pfring_s.drop - th_drops);
        StatsSetUI64(ptv->tv, ptv->capture_kernel_packets, pfring_s.recv);
        StatsSetUI64(ptv->tv, ptv->capture_kernel_drops, pfring_s.drop);
    }
}
Exemplo n.º 8
0
PktProfiling *SCProfilePacketStart(void)
{
    uint64_t sample = SC_ATOMIC_ADD(samples, 1);
    if (sample % rate == 0)
        return SCCalloc(1, sizeof(PktProfiling));
    else
        return NULL;
}
Exemplo n.º 9
0
/** \internal
 *  \brief Get a host from the hash directly.
 *
 *  Called in conditions where the spare queue is empty and memcap is reached.
 *
 *  Walks the hash until a host can be freed. "host_prune_idx" atomic int makes
 *  sure we don't start at the top each time since that would clear the top of
 *  the hash leading to longer and longer search times under high pressure (observed).
 *
 *  \retval h host or NULL
 */
static Host *HostGetUsedHost(void) {
    uint32_t idx = SC_ATOMIC_GET(host_prune_idx) % host_config.hash_size;
    uint32_t cnt = host_config.hash_size;

    while (cnt--) {
        if (++idx >= host_config.hash_size)
            idx = 0;

        HostHashRow *hb = &host_hash[idx];
        if (hb == NULL)
            continue;

        if (HRLOCK_TRYLOCK(hb) != 0)
            continue;

        Host *h = hb->tail;
        if (h == NULL) {
            HRLOCK_UNLOCK(hb);
            continue;
        }

        if (SCMutexTrylock(&h->m) != 0) {
            HRLOCK_UNLOCK(hb);
            continue;
        }

        /** never prune a host that is used by a packets
         *  we are currently processing in one of the threads */
        if (SC_ATOMIC_GET(h->use_cnt) > 0) {
            HRLOCK_UNLOCK(hb);
            SCMutexUnlock(&h->m);
            continue;
        }

        /* remove from the hash */
        if (h->hprev != NULL)
            h->hprev->hnext = h->hnext;
        if (h->hnext != NULL)
            h->hnext->hprev = h->hprev;
        if (hb->head == h)
            hb->head = h->hnext;
        if (hb->tail == h)
            hb->tail = h->hprev;

        h->hnext = NULL;
        h->hprev = NULL;
        HRLOCK_UNLOCK(hb);

        HostClearMemory (h);

        SCMutexUnlock(&h->m);

        (void) SC_ATOMIC_ADD(host_prune_idx, (host_config.hash_size - cnt));
        return h;
    }

    return NULL;
}
Exemplo n.º 10
0
/** \internal
 *  \brief Get a flow from the hash directly.
 *
 *  Called in conditions where the spare queue is empty and memcap is reached.
 *
 *  Walks the hash until a flow can be freed. Timeouts are disregarded, use_cnt
 *  is adhered to. "flow_prune_idx" atomic int makes sure we don't start at the
 *  top each time since that would clear the top of the hash leading to longer
 *  and longer search times under high pressure (observed).
 *
 *  \retval f flow or NULL
 */
static Flow *FlowGetUsedFlow(void)
{
    uint32_t idx = SC_ATOMIC_GET(flow_prune_idx) % flow_config.hash_size;
    uint32_t cnt = flow_config.hash_size;

    while (cnt--) {
        if (++idx >= flow_config.hash_size)
            idx = 0;

        FlowBucket *fb = &flow_hash[idx];

        if (FBLOCK_TRYLOCK(fb) != 0)
            continue;

        Flow *f = fb->tail;
        if (f == NULL) {
            FBLOCK_UNLOCK(fb);
            continue;
        }

        if (FLOWLOCK_TRYWRLOCK(f) != 0) {
            FBLOCK_UNLOCK(fb);
            continue;
        }

        /** never prune a flow that is used by a packet or stream msg
         *  we are currently processing in one of the threads */
        if (SC_ATOMIC_GET(f->use_cnt) > 0) {
            FBLOCK_UNLOCK(fb);
            FLOWLOCK_UNLOCK(f);
            continue;
        }

        /* remove from the hash */
        if (f->hprev != NULL)
            f->hprev->hnext = f->hnext;
        if (f->hnext != NULL)
            f->hnext->hprev = f->hprev;
        if (fb->head == f)
            fb->head = f->hnext;
        if (fb->tail == f)
            fb->tail = f->hprev;

        f->hnext = NULL;
        f->hprev = NULL;
        f->fb = NULL;
        FBLOCK_UNLOCK(fb);

        FlowClearMemory(f, f->protomap);

        FLOWLOCK_UNLOCK(f);

        (void) SC_ATOMIC_ADD(flow_prune_idx, (flow_config.hash_size - cnt));
        return f;
    }

    return NULL;
}
Exemplo n.º 11
0
/**
 * \brief select the queue to output to based on queue lengths.
 *
 * \param tv thread vars
 * \param p packet
 */
void TmqhOutputFlowActivePackets(ThreadVars *tv, Packet *p)
{
    int32_t qid = 0;

    TmqhFlowCtx *ctx = (TmqhFlowCtx *)tv->outctx;

    /* if no flow we use the first queue,
     * should be rare */
    if (p->flow != NULL) {
        qid = SC_ATOMIC_GET(p->flow->autofp_tmqh_flow_qid);
        if (qid == -1) {
            uint16_t i = 0;
            int lowest_id = 0;
            TmqhFlowMode *queues = ctx->queues;
            uint32_t lowest = queues[i].q->len;
            for (i = 1; i < ctx->size; i++) {
                if (queues[i].q->len < lowest) {
                    lowest = queues[i].q->len;
                    lowest_id = i;
                }
            }
            qid = lowest_id;
            (void) SC_ATOMIC_SET(p->flow->autofp_tmqh_flow_qid, lowest_id);
            (void) SC_ATOMIC_ADD(ctx->queues[qid].total_flows, 1);
        }
    } else {
        qid = ctx->last++;

        if (ctx->last == ctx->size)
            ctx->last = 0;
    }
    (void) SC_ATOMIC_ADD(ctx->queues[qid].total_packets, 1);

    PacketQueue *q = ctx->queues[qid].q;
    SCMutexLock(&q->mutex_q);
    PacketEnqueue(q, p);
#ifdef __tile__
    q->cond_q = 1;
#else
    SCCondSignal(&q->cond_q);
#endif
    SCMutexUnlock(&q->mutex_q);

    return;
}
Exemplo n.º 12
0
/** \internal
 *  \brief Get a tracker from the hash directly.
 *
 *  Called in conditions where the spare queue is empty and memcap is reached.
 *
 *  Walks the hash until a tracker can be freed. "defragtracker_prune_idx" atomic int makes
 *  sure we don't start at the top each time since that would clear the top of
 *  the hash leading to longer and longer search times under high pressure (observed).
 *
 *  \retval dt tracker or NULL
 */
static DefragTracker *DefragTrackerGetUsedDefragTracker(void)
{
    uint32_t idx = SC_ATOMIC_GET(defragtracker_prune_idx) % defrag_config.hash_size;
    uint32_t cnt = defrag_config.hash_size;

    while (cnt--) {
        if (++idx >= defrag_config.hash_size)
            idx = 0;

        DefragTrackerHashRow *hb = &defragtracker_hash[idx];

        if (DRLOCK_TRYLOCK(hb) != 0)
            continue;

        DefragTracker *dt = hb->tail;
        if (dt == NULL) {
            DRLOCK_UNLOCK(hb);
            continue;
        }

        if (SCMutexTrylock(&dt->lock) != 0) {
            DRLOCK_UNLOCK(hb);
            continue;
        }

        /** never prune a tracker that is used by a packets
         *  we are currently processing in one of the threads */
        if (SC_ATOMIC_GET(dt->use_cnt) > 0) {
            DRLOCK_UNLOCK(hb);
            SCMutexUnlock(&dt->lock);
            continue;
        }

        /* remove from the hash */
        if (dt->hprev != NULL)
            dt->hprev->hnext = dt->hnext;
        if (dt->hnext != NULL)
            dt->hnext->hprev = dt->hprev;
        if (hb->head == dt)
            hb->head = dt->hnext;
        if (hb->tail == dt)
            hb->tail = dt->hprev;

        dt->hnext = NULL;
        dt->hprev = NULL;
        DRLOCK_UNLOCK(hb);

        DefragTrackerClearMemory(dt);

        SCMutexUnlock(&dt->lock);

        (void) SC_ATOMIC_ADD(defragtracker_prune_idx, (defrag_config.hash_size - cnt));
        return dt;
    }

    return NULL;
}
Exemplo n.º 13
0
void PacketPoolWait(void)
{
    PktPool *my_pool = GetThreadPacketPool();

    if (PacketPoolIsEmpty(my_pool)) {
        SCMutexLock(&my_pool->return_stack.mutex);
        SC_ATOMIC_ADD(my_pool->return_stack.sync_now, 1);
        SCCondWait(&my_pool->return_stack.cond, &my_pool->return_stack.mutex);
        SCMutexUnlock(&my_pool->return_stack.mutex);
    }

    while(PacketPoolIsEmpty(my_pool))
        cc_barrier();
}
Exemplo n.º 14
0
/**
 * \brief This function is used to add a tag to a session (type session)
 *        or update it if it's already installed. The number of times to
 *        allow an update is limited by DETECT_TAG_MATCH_LIMIT. This way
 *        repetitive matches to the same rule are limited of setting tags,
 *        to avoid DOS attacks
 *
 * \param p pointer to the current packet
 * \param tde pointer to the new DetectTagDataEntry
 *
 * \retval 0 if the tde was added succesfuly
 * \retval 1 if an entry of this sid/gid already exist and was updated
 */
int TagFlowAdd(Packet *p, DetectTagDataEntry *tde) {
    uint8_t updated = 0;
    uint16_t num_tags = 0;
    DetectTagDataEntry *iter = NULL;

    if (p->flow == NULL)
        return 1;

    FLOWLOCK_WRLOCK(p->flow);

    if (p->flow->tag_list != NULL) {
        iter = p->flow->tag_list;

        /* First iterate installed entries searching a duplicated sid/gid */
        for (; iter != NULL; iter = iter->next) {
            num_tags++;

            if (iter->sid == tde->sid && iter->gid == tde->gid) {
                iter->cnt_match++;

                /* If so, update data, unless the maximum MATCH limit is
                 * reached. This prevents possible DOS attacks */
                if (iter->cnt_match < DETECT_TAG_MATCH_LIMIT) {
                    /* Reset time and counters */
                    iter->first_ts = iter->last_ts = tde->first_ts;
                    iter->packets = 0;
                    iter->bytes = 0;
                }
                updated = 1;
                break;
            }
        }
    }

    /* If there was no entry of this rule, prepend the new tde */
    if (updated == 0 && num_tags < DETECT_TAG_MAX_TAGS) {
        DetectTagDataEntry *new_tde = DetectTagDataCopy(tde);
        if (new_tde != NULL) {
            new_tde->next = p->flow->tag_list;
            p->flow->tag_list = new_tde;
            (void) SC_ATOMIC_ADD(num_tags, 1);
        }
    } else if (num_tags == DETECT_TAG_MAX_TAGS) {
        SCLogDebug("Max tags for sessions reached (%"PRIu16")", num_tags);
    }

    FLOWLOCK_UNLOCK(p->flow);
    return updated;
}
Exemplo n.º 15
0
static inline void PfringDumpCounters(PfringThreadVars *ptv)
{
    pfring_stat pfring_s;
    if (likely((pfring_stats(ptv->pd, &pfring_s) >= 0))) {
        /* pfring counter is per socket and is not cleared after read.
         * So to get the number of packet on the interface we can add
         * the newly seen packets and drops for this thread and add it
         * to the interface counter */
        uint64_t th_pkts = StatsGetLocalCounterValue(ptv->tv, ptv->capture_kernel_packets);
        uint64_t th_drops = StatsGetLocalCounterValue(ptv->tv, ptv->capture_kernel_drops);
        SC_ATOMIC_ADD(ptv->livedev->pkts, pfring_s.recv - th_pkts);
        SC_ATOMIC_ADD(ptv->livedev->drop, pfring_s.drop - th_drops);
        StatsSetUI64(ptv->tv, ptv->capture_kernel_packets, pfring_s.recv);
        StatsSetUI64(ptv->tv, ptv->capture_kernel_drops, pfring_s.drop);

#ifdef HAVE_PF_RING_FLOW_OFFLOAD
        if (ptv->flags & PFRING_FLAGS_BYPASS) {
            uint64_t th_bypassed = StatsGetLocalCounterValue(ptv->tv, ptv->capture_bypassed);
            SC_ATOMIC_ADD(ptv->livedev->bypassed, pfring_s.shunt - th_bypassed);
            StatsSetUI64(ptv->tv, ptv->capture_bypassed, pfring_s.shunt);
        }
#endif
    }
}
Exemplo n.º 16
0
/* see if we want to profile rules for this packet */
int SCProfileRuleStart(Packet *p)
{
#ifdef PROFILE_LOCKING
    if (p->profile != NULL) {
        p->flags |= PKT_PROFILE;
        return 1;
    }
#else
    uint64_t sample = SC_ATOMIC_ADD(samples, 1);
    if (sample % rate == 0) {
        p->flags |= PKT_PROFILE;
        return 1;
    }
#endif
    return 0;
}
Exemplo n.º 17
0
int RingBufferSrSwPut(RingBuffer16 *rb, void *ptr) {
    /* buffer is full, wait... */
    while ((unsigned short)(SC_ATOMIC_GET(rb->write) + 1) == SC_ATOMIC_GET(rb->read)) {
        /* break out if the engine wants to shutdown */
        if (rb->shutdown != 0)
            return -1;

        RingBufferDoWait(rb);
    }

    rb->array[SC_ATOMIC_GET(rb->write)] = ptr;
    (void) SC_ATOMIC_ADD(rb->write, 1);

#ifdef RINGBUFFER_MUTEX_WAIT
    SCCondSignal(&rb->wait_cond);
#endif
    return 0;
}
Exemplo n.º 18
0
/**
 *  \brief Get a new ippair
 *
 *  Get a new ippair. We're checking memcap first and will try to make room
 *  if the memcap is reached.
 *
 *  \retval h *LOCKED* ippair on succes, NULL on error.
 */
static IPPair *IPPairGetNew(Address *a, Address *b)
{
    IPPair *h = NULL;

    /* get a ippair from the spare queue */
    h = IPPairDequeue(&ippair_spare_q);
    if (h == NULL) {
        /* If we reached the max memcap, we get a used ippair */
        if (!(IPPAIR_CHECK_MEMCAP(g_ippair_size))) {
            /* declare state of emergency */
            //if (!(SC_ATOMIC_GET(ippair_flags) & IPPAIR_EMERGENCY)) {
            //    SC_ATOMIC_OR(ippair_flags, IPPAIR_EMERGENCY);

                /* under high load, waking up the flow mgr each time leads
                 * to high cpu usage. Flows are not timed out much faster if
                 * we check a 1000 times a second. */
            //    FlowWakeupFlowManagerThread();
            //}

            h = IPPairGetUsedIPPair();
            if (h == NULL) {
                return NULL;
            }

            /* freed a ippair, but it's unlocked */
        } else {
            /* now see if we can alloc a new ippair */
            h = IPPairNew(a,b);
            if (h == NULL) {
                return NULL;
            }

            /* ippair is initialized but *unlocked* */
        }
    } else {
        /* ippair has been recycled before it went into the spare queue */

        /* ippair is initialized (recylced) but *unlocked* */
    }

    (void) SC_ATOMIC_ADD(ippair_counter, 1);
    SCMutexLock(&h->m);
    return h;
}
Exemplo n.º 19
0
/**
 *  \brief Get a new defrag tracker
 *
 *  Get a new defrag tracker. We're checking memcap first and will try to make room
 *  if the memcap is reached.
 *
 *  \retval dt *LOCKED* tracker on succes, NULL on error.
 */
static DefragTracker *DefragTrackerGetNew(Packet *p)
{
    DefragTracker *dt = NULL;

    /* get a tracker from the spare queue */
    dt = DefragTrackerDequeue(&defragtracker_spare_q);
    if (dt == NULL) {
        /* If we reached the max memcap, we get a used tracker */
        if (!(DEFRAG_CHECK_MEMCAP(sizeof(DefragTracker)))) {
            /* declare state of emergency */
            //if (!(SC_ATOMIC_GET(defragtracker_flags) & DEFRAG_EMERGENCY)) {
            //    SC_ATOMIC_OR(defragtracker_flags, DEFRAG_EMERGENCY);

                /* under high load, waking up the flow mgr each time leads
                 * to high cpu usage. Flows are not timed out much faster if
                 * we check a 1000 times a second. */
            //    FlowWakeupFlowManagerThread();
            //}

            dt = DefragTrackerGetUsedDefragTracker();
            if (dt == NULL) {
                return NULL;
            }

            /* freed a tracker, but it's unlocked */
        } else {
            /* now see if we can alloc a new tracker */
            dt = DefragTrackerAlloc();
            if (dt == NULL) {
                return NULL;
            }

            /* tracker is initialized but *unlocked* */
        }
    } else {
        /* tracker has been recycled before it went into the spare queue */

        /* tracker is initialized (recylced) but *unlocked* */
    }

    (void) SC_ATOMIC_ADD(defragtracker_counter, 1);
    SCMutexLock(&dt->lock);
    return dt;
}
Exemplo n.º 20
0
/** \brief allocate a flow
 *
 *  We check against the memuse counter. If it passes that check we increment
 *  the counter first, then we try to alloc.
 *
 *  \retval f the flow or NULL on out of memory
 */
Flow *FlowAlloc(void)
{
    Flow *f;

    if (!(FLOW_CHECK_MEMCAP(sizeof(Flow)))) {
        return NULL;
    }

    (void) SC_ATOMIC_ADD(flow_memuse, sizeof(Flow));

    f = SCMalloc(sizeof(Flow));
    if (f == NULL) {
        (void) SC_ATOMIC_SUB(flow_memuse, sizeof(Flow));
        return NULL;
    }

    FLOW_INITIALIZE(f);
    return f;
}
Exemplo n.º 21
0
/**
 *  \brief Get a new host
 *
 *  Get a new host. We're checking memcap first and will try to make room
 *  if the memcap is reached.
 *
 *  \retval h *LOCKED* host on succes, NULL on error.
 */
static Host *HostGetNew(Address *a) {
    Host *h = NULL;

    /* get a host from the spare queue */
    h = HostDequeue(&host_spare_q);
    if (h == NULL) {
        /* If we reached the max memcap, we get a used host */
        if (!(HOST_CHECK_MEMCAP(sizeof(Host)))) {
            /* declare state of emergency */
            //if (!(SC_ATOMIC_GET(host_flags) & HOST_EMERGENCY)) {
            //    SC_ATOMIC_OR(host_flags, HOST_EMERGENCY);

                /* under high load, waking up the flow mgr each time leads
                 * to high cpu usage. Flows are not timed out much faster if
                 * we check a 1000 times a second. */
            //    FlowWakeupFlowManagerThread();
            //}

            h = HostGetUsedHost();
            if (h == NULL) {
                return NULL;
            }

            /* freed a host, but it's unlocked */
        } else {
            /* now see if we can alloc a new host */
            h = HostNew(a);
            if (h == NULL) {
                return NULL;
            }

            /* host is initialized but *unlocked* */
        }
    } else {
        /* host has been recycled before it went into the spare queue */

        /* host is initialized (recylced) but *unlocked* */
    }

    (void) SC_ATOMIC_ADD(host_counter, 1);
    SCMutexLock(&h->m);
    return h;
}
Exemplo n.º 22
0
Host *HostAlloc(void) {
    if (!(HOST_CHECK_MEMCAP(sizeof(Host)))) {
        return NULL;
    }

    (void) SC_ATOMIC_ADD(host_memuse, sizeof(Host));

    Host *h = SCMalloc(sizeof(Host));
    if (unlikely(h == NULL))
        goto error;

    memset(h, 0x00, sizeof(Host));

    SCMutexInit(&h->m, NULL);
    SC_ATOMIC_INIT(h->use_cnt);
    return h;

error:
    return NULL;
}
Exemplo n.º 23
0
void *RingBufferSrSwGet(RingBuffer16 *rb) {
    void *ptr = NULL;

    /* buffer is empty, wait... */
    while (SC_ATOMIC_GET(rb->write) == SC_ATOMIC_GET(rb->read)) {
        /* break out if the engine wants to shutdown */
        if (rb->shutdown != 0)
            return NULL;

        RingBufferDoWait(rb);
    }

    ptr = rb->array[SC_ATOMIC_GET(rb->read)];
    (void) SC_ATOMIC_ADD(rb->read, 1);

#ifdef RINGBUFFER_MUTEX_WAIT
    SCCondSignal(&rb->wait_cond);
#endif
    return ptr;
}
Exemplo n.º 24
0
/** \brief allocate a flow
 *
 *  We check against the memuse counter. If it passes that check we increment
 *  the counter first, then we try to alloc.
 *
 *  \retval f the flow or NULL on out of memory
 */
Flow *FlowAlloc(void)
{
    Flow *f;
    size_t size = sizeof(Flow) + FlowStorageSize();

    if (!(FLOW_CHECK_MEMCAP(size))) {
        return NULL;
    }

    (void) SC_ATOMIC_ADD(flow_memuse, size);

    f = SCMalloc(size);
    if (unlikely(f == NULL)) {
        (void)SC_ATOMIC_SUB(flow_memuse, size);
        return NULL;
    }
    memset(f, 0, size);

    FLOW_INITIALIZE(f);
    return f;
}
Exemplo n.º 25
0
static DefragTracker *DefragTrackerAlloc(void)
{
    if (!(DEFRAG_CHECK_MEMCAP(sizeof(DefragTracker)))) {
        return NULL;
    }

    (void) SC_ATOMIC_ADD(defrag_memuse, sizeof(DefragTracker));

    DefragTracker *dt = SCMalloc(sizeof(DefragTracker));
    if (unlikely(dt == NULL))
        goto error;

    memset(dt, 0x00, sizeof(DefragTracker));

    SCMutexInit(&dt->lock, NULL);
    SC_ATOMIC_INIT(dt->use_cnt);
    return dt;

error:
    return NULL;
}
Exemplo n.º 26
0
IPPair *IPPairAlloc(void)
{
    if (!(IPPAIR_CHECK_MEMCAP(g_ippair_size))) {
        return NULL;
    }

    (void) SC_ATOMIC_ADD(ippair_memuse, g_ippair_size);

    IPPair *h = SCMalloc(g_ippair_size);
    if (unlikely(h == NULL))
        goto error;

    memset(h, 0x00, g_ippair_size);

    SCMutexInit(&h->m, NULL);
    SC_ATOMIC_INIT(h->use_cnt);
    return h;

error:
    return NULL;
}
Exemplo n.º 27
0
/** \brief Wait until we have the requested ammount of packets in the pool
 *
 *  In some cases waiting for packets is undesirable. Especially when
 *  a wait would happen under a lock of some kind, other parts of the
 *  engine could have to wait.
 *
 *  This function only returns when at least N packets are in our pool.
 *
 *  \param n number of packets needed
 */
void PacketPoolWaitForN(int n)
{
    PktPool *my_pool = GetThreadPacketPool();
    Packet *p = NULL;

    while (1) {
        int i = 0;
        PacketPoolWait();

        /* count packets in our stack */
        p = my_pool->head;
        while (p != NULL) {
            if (++i == n)
                return;

            p = p->next;
        }

        /* continue counting in the return stack */
        if (my_pool->return_stack.head != NULL) {
            SCMutexLock(&my_pool->return_stack.mutex);
            p = my_pool->return_stack.head;
            while (p != NULL) {
                if (++i == n) {
                    SCMutexUnlock(&my_pool->return_stack.mutex);
                    return;
                }
                p = p->next;
            }
            SCMutexUnlock(&my_pool->return_stack.mutex);

        /* or signal that we need packets and wait */
        } else {
            SCMutexLock(&my_pool->return_stack.mutex);
            SC_ATOMIC_ADD(my_pool->return_stack.sync_now, 1);
            SCCondWait(&my_pool->return_stack.cond, &my_pool->return_stack.mutex);
            SCMutexUnlock(&my_pool->return_stack.mutex);
        }
    }
}
Exemplo n.º 28
0
/**
 * Create an entry in expectation list
 *
 * Create a expectation from an existing Flow. Currently, only Flow between
 * the two original IP addresses are supported.
 *
 * \param f a pointer to the original Flow
 * \param direction the direction of the data in the expectation flow
 * \param src source port of the expected flow, use 0 for any
 * \param dst destination port of the expected flow, use 0 for any
 * \param alproto the protocol that need to be set on the expected flow
 * \param data pointer to data that will be attached to the expected flow
 *
 * \return -1 if error
 * \return 0 if success
 */
int AppLayerExpectationCreate(Flow *f, int direction, Port src, Port dst,
                              AppProto alproto, void *data)
{
    Expectation *iexp = NULL;
    IPPair *ipp;
    Address ip_src, ip_dst;

    Expectation *exp = SCCalloc(1, sizeof(*exp));
    if (exp == NULL)
        return -1;

    exp->sp = src;
    exp->dp = dst;
    exp->alproto = alproto;
    exp->ts = f->lastts;
    exp->data = data;
    exp->direction = direction;

    if (GetFlowAddresses(f, &ip_src, &ip_dst) == -1)
        goto error;
    ipp = IPPairGetIPPairFromHash(&ip_src, &ip_dst);
    if (ipp == NULL)
        goto error;

    iexp = IPPairGetStorageById(ipp, g_expectation_id);
    exp->next = iexp;
    IPPairSetStorageById(ipp, g_expectation_id, exp);

    SC_ATOMIC_ADD(expectation_count, 1);
    /* As we are creating the expectation, we release lock on IPPair without
     * setting the ref count to 0. This way the IPPair will be kept till
     * cleanup */
    IPPairUnlock(ipp);
    return 0;

error:
    SCFree(exp);
    return -1;
}
Exemplo n.º 29
0
/**
 * \brief select the queue to output based on address hash.
 *
 * \param tv thread vars.
 * \param p packet.
 */
void TmqhOutputFlowHash(ThreadVars *tv, Packet *p)
{
    int32_t qid = 0;

    TmqhFlowCtx *ctx = (TmqhFlowCtx *)tv->outctx;

    /* if no flow we use the first queue,
     * should be rare */
    if (p->flow != NULL) {
        qid = SC_ATOMIC_GET(p->flow->autofp_tmqh_flow_qid);
        if (qid == -1) {
#if __WORDSIZE == 64
            uint64_t addr = (uint64_t)p->flow;
#else
            uint32_t addr = (uint32_t)p->flow;
#endif
            addr >>= 7;

            /* we don't have to worry about possible overflow, since
             * ctx->size will be lesser than 2 ** 31 for sure */
            qid = addr % ctx->size;
            (void) SC_ATOMIC_SET(p->flow->autofp_tmqh_flow_qid, qid);
            (void) SC_ATOMIC_ADD(ctx->queues[qid].total_flows, 1);
        }
Exemplo n.º 30
0
static int
ProfilingGenericTicksTest01(void)
{
#define TEST_RUNS 1024
    uint64_t ticks_start = 0;
    uint64_t ticks_end = 0;
    void *ptr[TEST_RUNS];
    int i;

    ticks_start = UtilCpuGetTicks();
    for (i = 0; i < TEST_RUNS; i++) {
        ptr[i] = SCMalloc(1024);
    }
    ticks_end = UtilCpuGetTicks();
    printf("malloc(1024) %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS);

    ticks_start = UtilCpuGetTicks();
    for (i = 0; i < TEST_RUNS; i++) {
        SCFree(ptr[i]);
    }
    ticks_end = UtilCpuGetTicks();
    printf("SCFree(1024) %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS);

    SCMutex m[TEST_RUNS];

    ticks_start = UtilCpuGetTicks();
    for (i = 0; i < TEST_RUNS; i++) {
        SCMutexInit(&m[i], NULL);
    }
    ticks_end = UtilCpuGetTicks();
    printf("SCMutexInit() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS);

    ticks_start = UtilCpuGetTicks();
    for (i = 0; i < TEST_RUNS; i++) {
        SCMutexLock(&m[i]);
    }
    ticks_end = UtilCpuGetTicks();
    printf("SCMutexLock() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS);

    ticks_start = UtilCpuGetTicks();
    for (i = 0; i < TEST_RUNS; i++) {
        SCMutexUnlock(&m[i]);
    }
    ticks_end = UtilCpuGetTicks();
    printf("SCMutexUnlock() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS);

    ticks_start = UtilCpuGetTicks();
    for (i = 0; i < TEST_RUNS; i++) {
        SCMutexDestroy(&m[i]);
    }
    ticks_end = UtilCpuGetTicks();
    printf("SCMutexDestroy() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS);

    SCSpinlock s[TEST_RUNS];

    ticks_start = UtilCpuGetTicks();
    for (i = 0; i < TEST_RUNS; i++) {
        SCSpinInit(&s[i], 0);
    }
    ticks_end = UtilCpuGetTicks();
    printf("SCSpinInit() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS);

    ticks_start = UtilCpuGetTicks();
    for (i = 0; i < TEST_RUNS; i++) {
        SCSpinLock(&s[i]);
    }
    ticks_end = UtilCpuGetTicks();
    printf("SCSpinLock() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS);

    ticks_start = UtilCpuGetTicks();
    for (i = 0; i < TEST_RUNS; i++) {
        SCSpinUnlock(&s[i]);
    }
    ticks_end = UtilCpuGetTicks();
    printf("SCSpinUnlock() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS);

    ticks_start = UtilCpuGetTicks();
    for (i = 0; i < TEST_RUNS; i++) {
        SCSpinDestroy(&s[i]);
    }
    ticks_end = UtilCpuGetTicks();
    printf("SCSpinDestroy() %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS);

    SC_ATOMIC_DECL_AND_INIT(unsigned int, test);
    ticks_start = UtilCpuGetTicks();
    for (i = 0; i < TEST_RUNS; i++) {
        (void) SC_ATOMIC_ADD(test,1);
    }
    ticks_end = UtilCpuGetTicks();
    printf("SC_ATOMIC_ADD %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS);

    ticks_start = UtilCpuGetTicks();
    for (i = 0; i < TEST_RUNS; i++) {
        SC_ATOMIC_CAS(&test,i,i+1);
    }
    ticks_end = UtilCpuGetTicks();
    printf("SC_ATOMIC_CAS %"PRIu64"\n", (ticks_end - ticks_start)/TEST_RUNS);
    return 1;
}