Ejemplo n.º 1
0
nitro_frame_t *nitro_queue_pull(nitro_queue_t *q,
    int wait) {
    nitro_frame_t *ptr = NULL;
    pthread_mutex_lock(&q->lock);

    while (q->count == 0) {
        if (!wait) {
            pthread_mutex_unlock(&q->lock);
            nitro_set_error(NITRO_ERR_EAGAIN);
            return NULL;
        }
        pthread_cond_wait(&q->trigger, &q->lock);
    }

    ptr = *q->head;
    q->head++;
    q->count--;
    /* Wrap? */
    if (q->head == q->end) {
        q->head = q->q;
    }

    nitro_queue_issue_callbacks(q, q->count + 1);

    if (q->capacity && q->count == (q->capacity - 1)) {
        pthread_cond_broadcast(&q->trigger);
    }
    pthread_mutex_unlock(&q->lock);
    return ptr;
}
Ejemplo n.º 2
0
int nitro_queue_push(nitro_queue_t *q, 
    nitro_frame_t *f, int wait) {
    pthread_mutex_lock(&q->lock);
    while (q->capacity && q->count == q->capacity) {
        if (!wait) {
            pthread_mutex_unlock(&q->lock);
            return nitro_set_error(NITRO_ERR_EAGAIN);
        }
        pthread_cond_wait(&q->trigger, &q->lock);
    }

    if (q->count == q->size) {
        nitro_queue_grow(q, 0);
    }

    /* tail marks to the next empty spot */
    *q->tail = f;
    q->tail++;
    q->count++;
    if (q->tail == q->end) {
        q->tail = q->q;
    }

    nitro_queue_issue_callbacks(q, q->count - 1);

    if (q->count == 1) {
        pthread_cond_broadcast(&q->trigger);
    }

    pthread_mutex_unlock(&q->lock);

    return 0;
}
Ejemplo n.º 3
0
static int Sinproc_check_opt(nitro_inproc_socket_t *s) {
    if (s->opt->want_eventfd || s->opt->secure || s->opt->has_remote_ident) {
        return nitro_set_error(NITRO_ERR_BAD_INPROC_OPT);
    }

    return 0;
}
Ejemplo n.º 4
0
int Sinproc_socket_bind(nitro_inproc_socket_t *s, char *location) {
    if (Sinproc_check_opt(s) < 0) {
        return -1;
    }

    Sinproc_create_queues(s);

    pthread_mutex_lock(&the_runtime->l_inproc);
    nitro_inproc_socket_t *match;

    s->bound = 1;
    s->bind_counter = nitro_counted_buffer_new(s, free_inproc, NULL);
    pthread_rwlock_init(&s->link_lock, NULL);
    s->current = NULL;

    HASH_FIND(hh, the_runtime->inprocs, location, strlen(location), match);

    if (match) {
        pthread_mutex_unlock(&the_runtime->l_inproc);
        return nitro_set_error(NITRO_ERR_INPROC_ALREADY_BOUND);
    }

    HASH_ADD_KEYPTR(hh, the_runtime->inprocs,
                    s->given_location,
                    strlen(s->given_location),
                    s);
    HASH_FIND(hh, the_runtime->inprocs, location, strlen(location), match);
    assert(match == s);

    pthread_mutex_unlock(&the_runtime->l_inproc);
    return 0;
}
Ejemplo n.º 5
0
int nitro_queue_fd_write_encrypted(nitro_queue_t *q, int fd, 
    nitro_frame_t *partial,
    nitro_frame_t **remain, 
    nitro_queue_encrypt_frame_cb encrypt, void *enc_baton) {
    *remain = NULL;
    int res = 0;

    nitro_frame_t *current = partial;

    if (!current) {
        nitro_frame_t *clear = nitro_queue_pull(q, 0);
        if (clear) {
            current = encrypt(clear, enc_baton);
            if (!current) {
                res = -1;
            }
        }
    }

    while (current) {
        int num;
        struct iovec *f_vs = nitro_frame_iovs(current, &num);
        int bwrite = writev(fd, f_vs, num);
        if (bwrite == -1) {
            if (!OKAY_ERRNO) {
                res = -1;
                nitro_set_error(NITRO_ERR_ERRNO);
                nitro_frame_destroy(current);
            } else {
                *remain = current;
            }
            break;
        }

        // advance
        res += bwrite;
        int i;
        int done = 0;
        for (i=0; bwrite > 0 && !done; ++i) {
            // modify in-place.. we own the frame privately
            bwrite -= nitro_frame_iovs_advance(
                current, current->iovs, i, bwrite, &done);
        }
        if (done) {
            nitro_frame_destroy(current);
            nitro_frame_t *clear = nitro_queue_pull(q, 0);
            if (clear) {
                current = encrypt(clear, enc_baton);
                if (!current) {
                    res = -1;
                }
            } else {
                current = NULL;
            }
        }
    }

    return res;
}
Ejemplo n.º 6
0
static int Sinproc_socket_send_general(nitro_inproc_socket_t *s,  nitro_frame_t *fr, int flags) {
    int ret = -1;

    if (s->bound) {
        pthread_rwlock_rdlock(&s->link_lock);

        volatile nitro_inproc_socket_t *try = s->current;

        if (try == NULL) {
                nitro_set_error(NITRO_ERR_INPROC_NO_CONNECTIONS);
            }
        else {
Ejemplo n.º 7
0
int Sinproc_socket_connect(nitro_inproc_socket_t *s, char *location) {
    if (Sinproc_check_opt(s) < 0) {
        return -1;
    }

    Sinproc_create_queues(s);

    pthread_mutex_lock(&the_runtime->l_inproc);
    nitro_inproc_socket_t *match;

    HASH_FIND(hh, the_runtime->inprocs, location, strlen(location), match);

    if (!match) {
        pthread_mutex_unlock(&the_runtime->l_inproc);
        return nitro_set_error(NITRO_ERR_INPROC_NOT_BOUND);
    }

    pthread_rwlock_init(&s->link_lock, NULL);
    Sinproc_socket_bound_add_conn(match, s);
    pthread_mutex_unlock(&the_runtime->l_inproc);

    return 0;
}
Ejemplo n.º 8
0
/* "internal" functions, mass population and eviction */
int nitro_queue_fd_write(nitro_queue_t *q, int fd, 
    nitro_frame_t *partial,
    nitro_frame_t **remain) {
    /* Does gather IO to avoid copying buffers around */
    pthread_mutex_lock(&q->lock);
    int actual_iovs = 0;
    int accum_bytes = 0;
    int ret = 0;
    nitro_frame_t **iter = q->head;
    struct iovec vectors[NITRO_MAX_IOV];

    if (partial) {
        int num;
        struct iovec *f_vs = nitro_frame_iovs(partial, &num);
        memcpy(&(vectors[0]), f_vs, num * sizeof(struct iovec));
        accum_bytes += IOV_TOTAL(f_vs);
        actual_iovs += num;
    }

    int old_count = q->count;
    int temp_count = old_count;
    int byte_target = q->send_target + QUEUE_FD_BUFFER_PADDING;;

    while (accum_bytes < byte_target && actual_iovs < (NITRO_MAX_IOV - 5) && temp_count) {
        int num;
        struct iovec *f_vs = nitro_frame_iovs(*iter, &num);
        memcpy(&(vectors[actual_iovs]), f_vs, num * sizeof(struct iovec));
        accum_bytes += IOV_TOTAL(f_vs);
        actual_iovs += num;
        ++iter;
        --temp_count;
        if (iter == q->end)
            iter = q->q;
    }

    if (!accum_bytes) {
        goto out;
    }

    int actual_bytes = writev(fd, (const struct iovec *)vectors, actual_iovs);

    /* On error, we don't move the queue pointers at all. 
       We'll let the caller sort out the errno. */
    if (actual_bytes == -1) {
        nitro_set_error(NITRO_ERR_ERRNO);
        ret = -1;
        goto out;
    }

    ret = actual_bytes;

    /* Sweep up *wholly* sent things by destroying the frames and
       advancing the queue; if a frame is left partially sent
       at the end, update its iovectors to represent the fractional
       state and return it as a "remainder" (but still pop it off
       this queue) */
    int i = 0, r = 0, done=0;
    *remain = NULL;
    if (partial) {
        i = 0;
        do {
            r = nitro_frame_iovs_advance(partial, partial->iovs, i++, actual_bytes, &done);
            actual_bytes -= r;
        } while (actual_bytes && !done);

        if (done) {
            nitro_frame_destroy(partial);
        } else {
            assert(!actual_bytes);
             *remain = partial;
        }
    }
    while (actual_bytes) {
        nitro_frame_t *fr = *q->head;
        struct iovec scratch[4];
        memcpy(scratch, fr->iovs, sizeof(scratch));
        i = 0;
        do {
            r = nitro_frame_iovs_advance(fr, scratch, i++, actual_bytes, &done);
            actual_bytes -= r;
        } while (actual_bytes && !done);

        if (done) {
            nitro_frame_destroy(fr);
        } else {
            assert(!actual_bytes);
            *remain = nitro_frame_copy_partial(fr, scratch);
        }

        q->head++;
        if (q->head == q->end)
            q->head = q->q;
        q->count--;
    }

    nitro_queue_issue_callbacks(q, old_count);
    if (q->count && ret > 0) {
        q->send_target = ret >  QUEUE_FD_BUFFER_GUESS ? QUEUE_FD_BUFFER_GUESS : ret;
    }

out:
    pthread_mutex_unlock(&q->lock);
    return ret;
}