Example #1
0
/* append an item to queue non-safely */
uint32_t q_append_nolock(queue_t *q, blob_t *b) {
    if (q->head == NULL)
        q->head = b;
    else
        BLOB_NEXT_set(q->tail,b);

    q->tail = b;
    BLOB_NEXT_set(b,NULL);
    return ++(q->count);
}
Example #2
0
static stats_count_t spill_all(socket_worker_t * self, queue_t * private_queue, queue_t * spill_queue)
{
    blob_t *cur_blob = private_queue->head;

    if (!cur_blob)
        return 0;

    stats_count_t spilled = 0;

    spill_queue->head = cur_blob;
    spill_queue->count = 1;
    while (BLOB_NEXT(cur_blob)) {
        cur_blob = BLOB_NEXT(cur_blob);
        spill_queue->count++;
    }
    spill_queue->tail = cur_blob;
    private_queue->head = BLOB_NEXT(cur_blob);
    private_queue->count -= spill_queue->count;
    BLOB_NEXT_set(cur_blob, NULL);

    spilled += spill_queue->count;

    enqueue_queue_for_disk_writing(self, spill_queue);

    return spilled;
}
Example #3
0
/* blob= b_new(size) - create a new empty blob with space for size bytes */
INLINE blob_t * b_new(size_t size) {
    blob_t *b;

    b = malloc_or_die(sizeof(blob_t));
    BLOB_NEXT_set(b, NULL);
    BLOB_REF_PTR_set(b, malloc_or_die(sizeof(_refcnt_blob_t) + size));
    BLOB_REFCNT_set(b, 1); /* overwritten in enqueue_blob_for_transmision */
    BLOB_BUF_SIZE_set(b, size);
    (void)get_time(&BLOB_RECEIVED_TIME(b));

    return b;
}
Example #4
0
/* blob= b_clone_no_refcnt_inc(a_blob) - lightweight clone of the original
 * note the refcount of the underlying _refcnt_blob_t is NOT
 * incremented, that must be done externally. */
INLINE blob_t * b_clone_no_refcnt_inc(blob_t *b) {
    blob_t *clone;

    clone= malloc_or_die(sizeof(blob_t));
    BLOB_NEXT_set(clone, NULL);

    /* Note we assume that BLOB_REFCNT(b) is setup externally
     * so we do NOT set the refcnt when we do this.
     *
     * This also avoid unnecessary lock churn.
     */
    BLOB_REF_PTR_set(clone, BLOB_REF_PTR(b));

    return clone;
}
Example #5
0
uint32_t q_append_q_nolock(queue_t *q, queue_t *tail) {

    if (q->head == NULL)
        q->head = tail->head;
    else
        BLOB_NEXT_set(q->tail,tail->head);

    q->tail = tail->tail;
    q->count += tail->count;

    tail->head= NULL;
    tail->tail= NULL;
    tail->count= 0;

    return q->count;
}
Example #6
0
/* Peels off all the blobs which have been in the input queue for longer
 * than the spill limit, move them to the spill queue, and enqueue
 * them for eventual spilling or dropping.
 *
 * Note that the "spill queue" is used either for actual spilling (to the disk)
 * or dropping.
 *
 * Returns the number of (eventually) spilled (if spill enabled) or
 * dropped (if spill disabled) items. */
static stats_count_t spill_by_age(socket_worker_t * self, int spill_enabled, queue_t * private_queue,
                                  queue_t * spill_queue, uint64_t spill_microsec, struct timeval *now)
{
    blob_t *cur_blob = private_queue->head;

    if (!cur_blob)
        return 0;

    /* If spill is disabled, this really counts the dropped packets. */
    stats_count_t spilled = 0;

    if (elapsed_usec(&BLOB_RECEIVED_TIME(cur_blob), now) >= spill_microsec) {
        spill_queue->head = cur_blob;
        spill_queue->count = 1;
        while (BLOB_NEXT(cur_blob)
               && elapsed_usec(&BLOB_RECEIVED_TIME(BLOB_NEXT(cur_blob)), now) >= spill_microsec) {
            cur_blob = BLOB_NEXT(cur_blob);
            spill_queue->count++;
        }
        spill_queue->tail = cur_blob;
        private_queue->head = BLOB_NEXT(cur_blob);
        private_queue->count -= spill_queue->count;
        BLOB_NEXT_set(cur_blob, NULL);

        spilled += spill_queue->count;

        if (spill_enabled) {
            RELAY_ATOMIC_INCREMENT(self->counters.spilled_count, spill_queue->count);
        } else {
            RELAY_ATOMIC_INCREMENT(self->counters.dropped_count, spill_queue->count);
        }

        enqueue_queue_for_disk_writing(self, spill_queue);
    }

    return spilled;
}