Ejemplo n.º 1
0
int ringbuffer_memcpy(ringbuffer_t *dst, ringbuffer_t *src,
                      lcb_size_t nbytes)
{
    ringbuffer_t copy = *src;
    struct lcb_iovec_st iov[2];
    int ii = 0;
    lcb_size_t towrite = nbytes;
    lcb_size_t toread, nb;

    if (nbytes > ringbuffer_get_nbytes(src)) {
        /* EINVAL */
        return -1;
    }

    if (!ringbuffer_ensure_capacity(dst, nbytes)) {
        /* Failed to allocate space */
        return -1;
    }

    ringbuffer_get_iov(dst, RINGBUFFER_WRITE, iov);
    toread = minimum(iov[ii].iov_len, nbytes);
    do {
        assert(ii < 2);
        nb = ringbuffer_read(&copy, iov[ii].iov_base, toread);
        toread -= nb;
        towrite -= nb;
        ++ii;
    } while (towrite > 0);
    ringbuffer_produced(dst, nbytes);
    return 0;
}
Ejemplo n.º 2
0
void libcouchbase_server_buffer_start_packet(libcouchbase_server_t *c,
                                             const void *command_cookie,
                                             ringbuffer_t *buff,
                                             ringbuffer_t *buff_cookie,
                                             const void *data,
                                             libcouchbase_size_t size)
{
    struct libcouchbase_command_data_st ct;
    memset(&ct, 0, sizeof(struct libcouchbase_command_data_st));
    /* @todo we don't want to call gethrtime for each operation, */
    /* so I need to pass it down the chain so that a large */
    /* multiget can reuse the same timer... */
    ct.start = gethrtime();
    ct.cookie = command_cookie;

    if (ringbuffer_get_nbytes(buff_cookie) == 0) {
        c->next_timeout = ct.start;
        libcouchbase_update_timer(c->instance);
    }

    if (!ringbuffer_ensure_capacity(buff, size) ||
            !ringbuffer_ensure_capacity(&c->cmd_log, size) ||
            !ringbuffer_ensure_capacity(buff_cookie, sizeof(ct)) ||
            ringbuffer_write(buff, data, size) != size ||
            ringbuffer_write(&c->cmd_log, data, size) != size ||
            ringbuffer_write(buff_cookie, &ct, sizeof(ct)) != sizeof(ct)) {
        abort();
    }
}
Ejemplo n.º 3
0
int ringbuffer_ensure_alignment(ringbuffer_t *c)
{
#if defined(__hpux__) || defined(__hpux) || defined(__sparc__) || defined(__sparc)
    intptr_t addr = (intptr_t)c->read_head;

    if (addr % 8 != 0) {
        ringbuffer_t copy;
        if (ringbuffer_initialize(&copy, c->size) == 0 ||
                ringbuffer_memcpy(&copy, c, ringbuffer_get_nbytes(c)) == -1) {
            return -1;
        }
        ringbuffer_destruct(c);
        *c = copy;
    }
#else
    (void)c;
#endif
    return 0;
}
Ejemplo n.º 4
0
void libcouchbase_server_buffer_retry_packet(libcouchbase_server_t *c,
                                             struct libcouchbase_command_data_st *ct,
                                             ringbuffer_t *buff,
                                             ringbuffer_t *buff_cookie,
                                             const void *data,
                                             libcouchbase_size_t size)
{
    libcouchbase_size_t ct_size = sizeof(struct libcouchbase_command_data_st);
    if (ringbuffer_get_nbytes(buff_cookie) == 0) {
        c->next_timeout = ct->start;
        libcouchbase_update_timer(c->instance);
    }

    if (!ringbuffer_ensure_capacity(buff, size) ||
            !ringbuffer_ensure_capacity(&c->cmd_log, size) ||
            !ringbuffer_ensure_capacity(buff_cookie, ct_size) ||
            ringbuffer_write(buff, data, size) != size ||
            ringbuffer_write(&c->cmd_log, data, size) != size ||
            ringbuffer_write(buff_cookie, ct, ct_size) != ct_size) {
        abort();
    }
}
Ejemplo n.º 5
0
/**
 * Extended version of observe command. This allows us to service
 * various forms of higher level operations which use observe in one way
 * or another
 */
lcb_error_t lcb_observe_ex(lcb_t instance,
                           const void *command_cookie,
                           lcb_size_t num,
                           const void *const *items,
                           lcb_observe_type_t type)
{
    lcb_size_t ii;
    lcb_size_t maxix;
    lcb_uint32_t opaque;
    struct lcb_command_data_st ct;
    struct observe_requests_st reqs;

    memset(&reqs, 0, sizeof(reqs));

    if (instance->type != LCB_TYPE_BUCKET) {
        return lcb_synchandler_return(instance, LCB_EBADHANDLE);
    }

    if (instance->config.handle == NULL) {
        return lcb_synchandler_return(instance, LCB_CLIENT_ETMPFAIL);
    }

    if (instance->config.dist_type != VBUCKET_DISTRIBUTION_VBUCKET) {
        return lcb_synchandler_return(instance, LCB_NOT_SUPPORTED);
    }

    opaque = ++instance->seqno;
    ct.cookie = command_cookie;
    maxix = instance->config.nreplicas;

    if (type == LCB_OBSERVE_TYPE_CHECK) {
        maxix = 0;

    } else {
        if (type == LCB_OBSERVE_TYPE_DURABILITY) {
            ct.flags = LCB_CMD_F_OBS_DURABILITY | LCB_CMD_F_OBS_BCAST;

        } else {
            ct.flags = LCB_CMD_F_OBS_BCAST;
        }
    }

    reqs.nrequests = instance->nservers;
    reqs.requests = calloc(reqs.nrequests, sizeof(*reqs.requests));

    for (ii = 0; ii < num; ii++) {
        const void *key, *hashkey;
        lcb_size_t nkey, nhashkey;
        int vbid, jj;

        if (type == LCB_OBSERVE_TYPE_DURABILITY) {
            const lcb_durability_entry_t *ent = items[ii];
            key = ent->request.v.v0.key;
            nkey = ent->request.v.v0.nkey;
            hashkey = ent->request.v.v0.hashkey;
            nhashkey = ent->request.v.v0.nhashkey;
        } else {
            const lcb_observe_cmd_t *ocmd = items[ii];
            key = ocmd->v.v0.key;
            nkey = ocmd->v.v0.nkey;
            hashkey = ocmd->v.v0.hashkey;
            nhashkey = ocmd->v.v0.nhashkey;
        }
        if (!nhashkey) {
            hashkey = key;
            nhashkey = nkey;
        }

        vbid = vbucket_get_vbucket_by_key(instance->config.handle,
                                          hashkey, nhashkey);

        for (jj = -1; jj < (int)maxix; jj++) {
            struct observe_st *rr;

            int idx = vbucket_get_replica(instance->config.handle,
                                          vbid, jj);

            if (idx < 0 || idx > (int)instance->nservers) {
                if (jj == -1) {
                    destroy_requests(&reqs);
                    return lcb_synchandler_return(instance, LCB_NO_MATCHING_SERVER);
                }
                continue;
            }
            lcb_assert(idx < (int)reqs.nrequests);
            rr = reqs.requests + idx;

            if (!rr->allocated) {
                if (!init_request(rr)) {
                    destroy_requests(&reqs);
                    return lcb_synchandler_return(instance, LCB_CLIENT_ENOMEM);
                }
            }

            {
                lcb_uint16_t vb = htons((lcb_uint16_t)vbid);
                lcb_uint16_t len = htons((lcb_uint16_t)nkey);

                rr->packet.message.header.request.magic = PROTOCOL_BINARY_REQ;
                rr->packet.message.header.request.opcode = CMD_OBSERVE;
                rr->packet.message.header.request.datatype = PROTOCOL_BINARY_RAW_BYTES;
                rr->packet.message.header.request.opaque = opaque;

                ringbuffer_ensure_capacity(&rr->body,
                                           sizeof(vb) + sizeof(len) + nkey);
                rr->nbody += ringbuffer_write(&rr->body, &vb, sizeof(vb));
                rr->nbody += ringbuffer_write(&rr->body, &len, sizeof(len));
                rr->nbody += ringbuffer_write(&rr->body, key, nkey);
            }
        }
    }

    for (ii = 0; ii < reqs.nrequests; ii++) {
        struct observe_st *rr = reqs.requests + ii;
        struct lcb_server_st *server = instance->servers + ii;
        char *tmp;

        if (!rr->allocated) {
            continue;
        }

        rr->packet.message.header.request.bodylen = ntohl((lcb_uint32_t)rr->nbody);
        ct.start = gethrtime();

        lcb_server_start_packet_ct(server, &ct, rr->packet.bytes,
                                   sizeof(rr->packet.bytes));

        if (ringbuffer_is_continous(&rr->body, RINGBUFFER_READ, rr->nbody)) {
            tmp = ringbuffer_get_read_head(&rr->body);
            TRACE_OBSERVE_BEGIN(&rr->packet, server->authority, tmp, rr->nbody);
            lcb_server_write_packet(server, tmp, rr->nbody);
        } else {
            tmp = malloc(ringbuffer_get_nbytes(&rr->body));
            if (!tmp) {
                /* FIXME by this time some of requests might be scheduled */
                destroy_requests(&reqs);
                return lcb_synchandler_return(instance, LCB_CLIENT_ENOMEM);
            } else {
                ringbuffer_read(&rr->body, tmp, rr->nbody);
                TRACE_OBSERVE_BEGIN(&rr->packet, server->authority, tmp, rr->nbody);
                lcb_server_write_packet(server, tmp, rr->nbody);
            }
        }
        lcb_server_end_packet(server);
        lcb_server_send_packets(server);
    }

    destroy_requests(&reqs);
    return lcb_synchandler_return(instance, LCB_SUCCESS);
}
Ejemplo n.º 6
0
void ringbuffer_reset(ringbuffer_t *buffer)
{
    ringbuffer_consumed(buffer,
                        ringbuffer_get_nbytes(buffer));
}
Ejemplo n.º 7
0
static void purge_single_server(lcb_server_t *server, lcb_error_t error,
                                hrtime_t min_nonstale,
                                hrtime_t *tmo_next)
{
    protocol_binary_request_header req;
    struct lcb_command_data_st ct;
    lcb_size_t nr;
    char *packet;
    lcb_size_t packetsize;
    char *keyptr;
    ringbuffer_t rest;
    ringbuffer_t *stream = &server->cmd_log;
    ringbuffer_t *cookies;
    ringbuffer_t *mirror = NULL; /* mirror buffer should be purged with main stream */
    lcb_connection_t conn = &server->connection;
    lcb_size_t send_size = 0;
    lcb_size_t stream_size = ringbuffer_get_nbytes(stream);
    hrtime_t now = gethrtime();

    if (server->connection_ready) {
        cookies = &server->output_cookies;
    } else {
        cookies = &server->pending_cookies;
        mirror = &server->pending;
    }

    if (conn->output) {
        /* This will usually be false for v1 */
        send_size = ringbuffer_get_nbytes(conn->output);
    }

    lcb_assert(ringbuffer_initialize(&rest, 1024));


    do {
        int allocated = 0;
        lcb_uint32_t headersize;
        lcb_uint16_t nkey;

        nr = ringbuffer_peek(cookies, &ct, sizeof(ct));
        if (nr != sizeof(ct)) {
            break;
        }
        nr = ringbuffer_peek(stream, req.bytes, sizeof(req));
        if (nr != sizeof(req)) {
            break;
        }
        packetsize = (lcb_uint32_t)sizeof(req) + ntohl(req.request.bodylen);
        if (stream->nbytes < packetsize) {
            break;
        }
        if (min_nonstale && ct.start >= min_nonstale) {
            lcb_log(LOGARGS(server, INFO),
                    "Still have %d ms remaining for command",
                    (ct.start - min_nonstale) / 1000000);

            if (tmo_next) {
                *tmo_next = (ct.start - min_nonstale) + 1;
            }
            break;
        }

        lcb_log(LOGARGS(server, INFO),
                "Command with cookie=%p timed out from server %s:%s",
                ct.cookie,
                server->curhost.host,
                server->curhost.port);

        ringbuffer_consumed(cookies, sizeof(ct));

        lcb_assert(nr == sizeof(req));
        packet = stream->read_head;

        if (server->instance->histogram) {
            lcb_record_metrics(server->instance, now - ct.start,
                               req.request.opcode);
        }

        if (server->connection_ready &&
                stream_size > send_size && (stream_size - packetsize) < send_size) {
            /* Copy the rest of the current packet into the
               temporary stream */

            /* I do believe I have some IOV functions to do that? */
            lcb_size_t nbytes = packetsize - (stream_size - send_size);
            lcb_assert(ringbuffer_memcpy(&rest,
                                         conn->output,
                                         nbytes) == 0);
            ringbuffer_consumed(conn->output, nbytes);
            send_size -= nbytes;
        }
        stream_size -= packetsize;
        headersize = (lcb_uint32_t)sizeof(req) + req.request.extlen + htons(req.request.keylen);
        if (!ringbuffer_is_continous(stream, RINGBUFFER_READ, headersize)) {
            packet = malloc(headersize);
            if (packet == NULL) {
                lcb_error_handler(server->instance, LCB_CLIENT_ENOMEM, NULL);
                abort();
            }

            nr = ringbuffer_peek(stream, packet, headersize);
            if (nr != headersize) {
                lcb_error_handler(server->instance, LCB_EINTERNAL, NULL);
                free(packet);
                abort();
            }
            allocated = 1;
        }

        keyptr = packet + sizeof(req) + req.request.extlen;
        nkey = ntohs(req.request.keylen);

        failout_single_request(server, &req, &ct, error, keyptr, nkey, packet);

        if (allocated) {
            free(packet);
        }
        ringbuffer_consumed(stream, packetsize);
        if (mirror) {
            ringbuffer_consumed(mirror, packetsize);
        }
    } while (1); /* CONSTCOND */

    if (server->connection_ready && conn->output) {
        /* Preserve the rest of the stream */
        lcb_size_t nbytes = ringbuffer_get_nbytes(stream);
        send_size = ringbuffer_get_nbytes(conn->output);

        if (send_size >= nbytes) {
            ringbuffer_consumed(conn->output, send_size - nbytes);
            lcb_assert(ringbuffer_memcpy(&rest, conn->output, nbytes) == 0);
        }
        ringbuffer_reset(conn->output);
        ringbuffer_append(&rest, conn->output);
    }

    ringbuffer_destruct(&rest);
    lcb_maybe_breakout(server->instance);
}
Ejemplo n.º 8
0
LIBCOUCHBASE_API
lcb_error_t lcb_observe(lcb_t instance,
                        const void *command_cookie,
                        lcb_size_t num,
                        const lcb_observe_cmd_t *const *items)
{
    int vbid, idx, jj;
    lcb_size_t ii;
    lcb_uint32_t opaque;
    struct observe_st *requests;

    /* we need a vbucket config before we can start getting data.. */
    if (instance->vbucket_config == NULL) {
        switch (instance->type) {
        case LCB_TYPE_CLUSTER:
            return lcb_synchandler_return(instance, LCB_EBADHANDLE);
        case LCB_TYPE_BUCKET:
        default:
            return lcb_synchandler_return(instance, LCB_CLIENT_ETMPFAIL);
        }
    }

    if (instance->dist_type != VBUCKET_DISTRIBUTION_VBUCKET) {
        return lcb_synchandler_return(instance, LCB_NOT_SUPPORTED);
    }

    /* the list of pointers to body buffers for each server */
    requests = calloc(instance->nservers, sizeof(struct observe_st));
    opaque = ++instance->seqno;
    for (ii = 0; ii < num; ++ii) {
        const void *key = items[ii]->v.v0.key;
        lcb_size_t nkey = items[ii]->v.v0.nkey;
        const void *hashkey = items[ii]->v.v0.hashkey;
        lcb_size_t nhashkey = items[ii]->v.v0.nhashkey;

        if (nhashkey == 0) {
            hashkey = key;
            nhashkey = nkey;
        }

        vbid = vbucket_get_vbucket_by_key(instance->vbucket_config, hashkey,
                                          nhashkey);
        for (jj = -1; jj < instance->nreplicas; ++jj) {
            struct observe_st *rr;
            /* it will increment jj to get server index, so (-1 + 1) = 0 (master) */
            idx = vbucket_get_replica(instance->vbucket_config, vbid, jj);
            if ((idx < 0 || idx > (int)instance->nservers)) {
                /* the config says that there is no server yet at that position (-1) */
                if (jj == -1) {
                    /* master node must be available */
                    destroy_requests(requests, instance->nservers);
                    return lcb_synchandler_return(instance, LCB_NETWORK_ERROR);
                } else {
                    continue;
                }
            }
            rr = requests + idx;
            if (!rr->allocated) {
                if (!init_request(rr)) {
                    destroy_requests(requests, instance->nservers);
                    return lcb_synchandler_return(instance, LCB_CLIENT_ENOMEM);
                }
                rr->req.message.header.request.magic = PROTOCOL_BINARY_REQ;
                rr->req.message.header.request.opcode = CMD_OBSERVE;
                rr->req.message.header.request.datatype = PROTOCOL_BINARY_RAW_BYTES;
                rr->req.message.header.request.opaque = opaque;
            }

            {
                lcb_uint16_t vb = htons((lcb_uint16_t)vbid);
                lcb_uint16_t len = htons((lcb_uint16_t)nkey);
                ringbuffer_ensure_capacity(&rr->body, sizeof(vb) + sizeof(len) + nkey);
                rr->nbody += ringbuffer_write(&rr->body, &vb, sizeof(vb));
                rr->nbody += ringbuffer_write(&rr->body, &len, sizeof(len));
                rr->nbody += ringbuffer_write(&rr->body, key, nkey);
            }
        }
    }

    for (ii = 0; ii < instance->nservers; ++ii) {
        struct observe_st *rr = requests + ii;
        lcb_server_t *server = instance->servers + ii;

        if (rr->allocated) {
            char *tmp;
            rr->req.message.header.request.bodylen = ntohl((lcb_uint32_t)rr->nbody);
            lcb_server_start_packet(server, command_cookie, rr->req.bytes, sizeof(rr->req.bytes));
            if (ringbuffer_is_continous(&rr->body, RINGBUFFER_READ, rr->nbody)) {
                tmp = ringbuffer_get_read_head(&rr->body);
                TRACE_OBSERVE_BEGIN(&rr->req, server->authority, tmp, rr->nbody);
                lcb_server_write_packet(server, tmp, rr->nbody);
            } else {
                tmp = malloc(ringbuffer_get_nbytes(&rr->body));
                if (!tmp) {
                    /* FIXME by this time some of requests might be scheduled */
                    destroy_requests(requests, instance->nservers);
                    return lcb_synchandler_return(instance, LCB_CLIENT_ENOMEM);
                } else {
                    ringbuffer_read(&rr->body, tmp, rr->nbody);
                    TRACE_OBSERVE_BEGIN(&rr->req, server->authority, tmp, rr->nbody);
                    lcb_server_write_packet(server, tmp, rr->nbody);
                }
            }
            lcb_server_end_packet(server);
            lcb_server_send_packets(server);
        }
    }

    destroy_requests(requests, instance->nservers);
    return lcb_synchandler_return(instance, LCB_SUCCESS);
}