static void test_set2(void) { lcb_error_t err; struct rvbuf rv; lcb_size_t ii; lcb_store_cmd_t cmd; const lcb_store_cmd_t *cmds[] = { &cmd }; memset(&cmd, 0, sizeof(cmd)); cmd.v.v0.key = "foo"; cmd.v.v0.nkey = strlen(cmd.v.v0.key); cmd.v.v0.bytes = "bar"; cmd.v.v0.nbytes = strlen(cmd.v.v0.bytes); cmd.v.v0.operation = LCB_SET; (void)lcb_set_store_callback(session, mstore_callback); rv.errors = 0; rv.counter = 0; for (ii = 0; ii < 10; ++ii, ++rv.counter) { err = lcb_store(session, &rv, 1, cmds); lcb_assert(err == LCB_SUCCESS); } io->v.v0.run_event_loop(io); lcb_assert(rv.errors == 0); }
static void version_callback(lcb_t instance, const void *cookie, lcb_error_t error, const lcb_server_version_resp_t *resp) { const char *server_endpoint = resp->v.v0.server_endpoint; const char *vstring = resp->v.v0.vstring; lcb_size_t nvstring = resp->v.v0.nvstring; struct rvbuf *rv = (struct rvbuf *)cookie; char *str; rv->error = error; lcb_assert(error == LCB_SUCCESS); if (server_endpoint == NULL) { lcb_assert(rv->counter == 0); io->v.v0.stop_event_loop(io); return; } rv->counter--; /*copy the key to an allocated buffer and ensure the key read from vstring * will not segfault */ str = malloc(nvstring); memcpy(str, vstring, nvstring); free(str); (void)instance; }
int genhash_store(genhash_t *h, const void *k, lcb_size_t klen, const void *v, lcb_size_t vlen) { lcb_size_t n = 0; struct genhash_entry_t *p; lcb_assert(h != NULL); n = h->ops.hashfunc(k, klen) % h->size; lcb_assert(n < h->size); p = calloc(1, sizeof(struct genhash_entry_t)); if (!p) { return -1; } p->key = dup_key(h, k, klen); p->nkey = klen; p->value = dup_value(h, v, vlen); p->nvalue = vlen; p->next = h->buckets[n]; h->buckets[n] = p; return 0; }
int genhash_delete(genhash_t *h, const void *k, lcb_size_t klen) { struct genhash_entry_t *deleteme = NULL; lcb_size_t n = 0; int rv = 0; lcb_assert(h != NULL); n = h->ops.hashfunc(k, klen) % h->size; lcb_assert(n < h->size); if (h->buckets[n] != NULL) { /* Special case the first one */ if (h->ops.hasheq(h->buckets[n]->key, h->buckets[n]->nkey, k, klen)) { deleteme = h->buckets[n]; h->buckets[n] = deleteme->next; } else { struct genhash_entry_t *p = NULL; for (p = h->buckets[n]; deleteme == NULL && p->next != NULL; p = p->next) { if (h->ops.hasheq(p->next->key, p->next->nkey, k, klen)) { deleteme = p->next; p->next = deleteme->next; } } } } if (deleteme != NULL) { free_item(h, deleteme); rv++; } return rv; }
static void refresh_nodes(clconfig_provider *pb, const hostlist_t newnodes, VBUCKET_CONFIG_HANDLE newconfig) { unsigned int ii; http_provider *http = (http_provider *)pb; hostlist_clear(http->nodes); if (!newconfig) { for (ii = 0; ii < newnodes->nentries; ii++) { hostlist_add_host(http->nodes, newnodes->entries + ii); } goto GT_DONE; } for (ii = 0; (int)ii < vbucket_config_get_num_servers(newconfig); ii++) { lcb_error_t status; const char *ss = vbucket_config_get_rest_api_server(newconfig, ii); lcb_assert(ss != NULL); status = hostlist_add_stringz(http->nodes, ss, LCB_CONFIG_HTTP_PORT); lcb_assert(status == LCB_SUCCESS); } GT_DONE: if (PROVIDER_SETTING(pb, randomize_bootstrap_nodes)) { hostlist_randomize(http->nodes); } }
static void timer_callback(lcb_socket_t sock, short which, void *arg) { lcb_timer_t timer = arg; lcb_t instance = timer->instance; lcb_assert(TMR_IS_ARMED(timer)); lcb_assert(!TMR_IS_DESTROYED(timer)); timer->state |= LCB_TIMER_S_ENTERED; lcb_timer_disarm(timer); timer->callback(timer, instance, timer->cookie); if (TMR_IS_DESTROYED(timer) == 0 && TMR_IS_PERIODIC(timer) != 0) { lcb_timer_rearm(timer, timer->usec_); return; } if (! TMR_IS_STANDALONE(timer)) { lcb_aspend_del(&instance->pendops, LCB_PENDTYPE_TIMER, timer); lcb_maybe_breakout(instance); } if (TMR_IS_DESTROYED(timer)) { destroy_timer(timer); } else { timer->state &= ~LCB_TIMER_S_ENTERED; } (void)sock; (void)which; }
int main(int argc, char **argv) { char str_node_count[16]; int is_mock = 1; const char *args[] = {"--nodes", "", "--buckets=default::memcache", NULL }; if (getenv("LCB_VERBOSE_TESTS") == NULL) { FILE *unused = freopen("/dev/null", "w", stdout); (void)unused; } setup_test_timeout_handler(); total_node_count = 5; snprintf(str_node_count, 16, "%d", total_node_count); args[1] = str_node_count; setup((char **)args, "default", NULL, "default"); /* first time it's initialized */ is_mock = mock->is_mock; test_set1(); test_set2(); test_get1(); test_get2(); test_version1(); teardown(); args[2] = NULL; setup((char **)args, "default", NULL, "default"); test_set1(); test_set2(); test_get1(); test_get2(); test_touch1(); test_version1(); teardown(); if (is_mock) { lcb_assert(test_connect((char **)args, "missing", NULL, "missing") == LCB_BUCKET_ENOENT); args[2] = "--buckets=protected:secret"; lcb_assert(test_connect((char **)args, "protected", "incorrect", "protected") == LCB_AUTH_ERROR); setup((char **)args, "protected", "secret", "protected"); test_spurious_saslerr(); teardown(); } else { fprintf(stderr, "FIXME: Skipping bad auth tests in real cluster\n"); } (void)argc; (void)argv; return EXIT_SUCCESS; }
static void setup(char **argv, const char *username, const char *password, const char *bucket) { const char *endpoint; struct lcb_create_st options; lcb_assert(session == NULL); lcb_assert(mock == NULL); lcb_assert(io == NULL); if (lcb_create_io_ops(&io, NULL) != LCB_SUCCESS) { fprintf(stderr, "Failed to create IO instance\n"); exit(1); } mock = start_test_server(argv); if (mock == NULL) { err_exit("Failed to start mock server"); } endpoint = get_mock_http_server(mock); memset(&options, 0, sizeof(options)); if (!mock->is_mock) { username = mock->username; password = mock->password; bucket = mock->bucket; } options.version = 2; options.v.v2.host = endpoint; options.v.v2.user = username; options.v.v2.passwd = password; options.v.v2.bucket = bucket; options.v.v2.io = io; options.v.v2.transports = enabled_transports; if (lcb_create(&session, &options) != LCB_SUCCESS) { err_exit("Failed to create libcouchbase session"); } (void)lcb_set_error_callback(session, error_callback); if (lcb_connect(session) != LCB_SUCCESS) { err_exit("Failed to connect to server"); } lcb_wait(session); if (!mock->is_mock) { const char *const *servers; total_node_count = 0; servers = lcb_get_server_list(session); for (; *servers; servers++, total_node_count++); } }
static lcb_error_t test_connect(char **argv, const char *username, const char *password, const char *bucket) { const char *endpoint; lcb_error_t rc; struct lcb_create_st options; lcb_assert(session == NULL); lcb_assert(mock == NULL); lcb_assert(io == NULL); if (lcb_create_io_ops(&io, NULL) != LCB_SUCCESS) { fprintf(stderr, "Failed to create IO instance\n"); exit(1); } mock = start_test_server(argv); if (mock == NULL) { err_exit("Failed to start mock server"); } endpoint = get_mock_http_server(mock); memset(&options, 0, sizeof(options)); options.version = 2; options.v.v2.host = endpoint; options.v.v2.user = username; options.v.v2.passwd = password; options.v.v2.bucket = bucket; options.v.v2.io = io; options.v.v2.transports = enabled_transports; if (lcb_create(&session, &options) != LCB_SUCCESS) { err_exit("Failed to create libcouchbase session"); } (void)lcb_set_error_callback(session, error_callback2); if (lcb_connect(session) != LCB_SUCCESS) { err_exit("Failed to connect to server"); } lcb_wait(session); rc = global_error; lcb_destroy(session); lcb_destroy_io_ops(io); session = NULL; io = NULL; shutdown_mock_server(mock); mock = NULL; return rc; }
int ringbuffer_append(ringbuffer_t *src, ringbuffer_t *dest) { char buffer[1024]; lcb_size_t nr, nw; while ((nr = ringbuffer_read(src, buffer, sizeof(buffer))) != 0) { lcb_assert(ringbuffer_ensure_capacity(dest, nr)); nw = ringbuffer_write(dest, buffer, nr); lcb_assert(nw == nr); } return 1; }
static void free_bufinfo_common(struct lcb_buf_info *bi) { if (bi->root || bi->ringbuffer) { lcb_assert((void *)bi->root != (void *)bi->ringbuffer); } lcb_assert((bi->ringbuffer == NULL && bi->root == NULL) || (bi->root && bi->ringbuffer)); lcb_mem_free(bi->root); lcb_mem_free(bi->ringbuffer); bi->root = NULL; bi->ringbuffer = NULL; }
static struct genhash_entry_t *genhash_find_entry(genhash_t *h, const void *k, lcb_size_t klen) { lcb_size_t n = 0; struct genhash_entry_t *p; lcb_assert(h != NULL); n = h->ops.hashfunc(k, klen) % h->size; lcb_assert(n < h->size); p = h->buckets[n]; for (p = h->buckets[n]; p && !h->ops.hasheq(k, klen, p->key, p->nkey); p = p->next); return p; }
int genhash_size(genhash_t *h) { int rv = 0; lcb_assert(h != NULL); genhash_iter(h, count_entries, &rv); return rv; }
/** * All-purpose callback dispatcher. */ static void timer_callback(lcb_socket_t sock, short which, void *arg) { lcb_DURSET *dset = arg; hrtime_t ns_now = gethrtime(); lcb_U32 us_now = (lcb_U32)(ns_now / 1000); if (us_now >= (dset->us_timeout - 50)) { dset->next_state = STATE_TIMEOUT; } switch (dset->next_state) { case STATE_OBSPOLL: poll_once(dset, 0); break; case STATE_TIMEOUT: { if (us_now >= (dset->us_timeout - 50)) { purge_entries(dset, LCB_ETIMEDOUT); } else { timer_schedule(dset, dset->us_timeout - us_now, STATE_TIMEOUT); } break; } case STATE_IGNORE: break; default: lcb_assert("unexpected state" && 0); break; } (void)sock; (void)which; }
static void connect_done_handler(lcb_connection_t conn, lcb_error_t err) { http_provider *http = (http_provider *)conn->data; const lcb_host_t *host = lcb_connection_get_host(conn); if (err != LCB_SUCCESS) { lcb_log(LOGARGS(http, ERR), "Connection to REST API @%s:%s failed with code=0x%x", host->host, host->port, err); io_error(http, err); return; } lcb_log(LOGARGS(http, DEBUG), "Successfuly connected to REST API %s:%s", host->host, host->port); lcb_connection_reset_buffers(conn); ringbuffer_strcat(conn->output, http->request_buf); lcb_assert(conn->output->nbytes > 0); lcb_sockrw_set_want(conn, LCB_RW_EVENT, 0); lcb_sockrw_apply_want(conn); lcb_timer_rearm(http->io_timer, PROVIDER_SETTING(&http->base, config_node_timeout)); }
void lcb_server_connected(lcb_server_t *server) { lcb_connection_t conn = &server->connection; server->connection_ready = 1; if (server->pending.nbytes > 0) { /* ** @todo we might want to do this a bit more optimal later on.. ** We're only using the pending ringbuffer while we're ** doing the SASL auth, so it shouldn't contain that ** much data.. */ ringbuffer_t copy = server->pending; ringbuffer_reset(&server->cmd_log); ringbuffer_reset(&server->output_cookies); ringbuffer_reset(conn->output); if (!ringbuffer_append(&server->pending, conn->output) || !ringbuffer_append(&server->pending_cookies, &server->output_cookies) || !ringbuffer_append(©, &server->cmd_log)) { ringbuffer_reset(&server->cmd_log); ringbuffer_reset(&server->output_cookies); lcb_server_release_connection(server, LCB_CLIENT_ENOMEM); lcb_connection_cleanup(conn); lcb_error_handler(server->instance, LCB_CLIENT_ENOMEM, NULL); return; } ringbuffer_reset(&server->pending); ringbuffer_reset(&server->pending_cookies); lcb_assert(conn->output->nbytes); lcb_server_send_packets(server); } }
void lcb_clconfig_cccp_enable(clconfig_provider *pb, lcb_t instance) { cccp_provider *cccp = (cccp_provider *)pb; lcb_assert(pb->type == LCB_CLCONFIG_CCCP); cccp->instance = instance; pb->enabled = 1; }
int ringbuffer_memcpy(ringbuffer_t *dst, ringbuffer_t *src, lcb_size_t nbytes) { ringbuffer_t copy = *src; struct lcb_iovec_st iov[2]; int ii = 0; lcb_size_t towrite = nbytes; lcb_size_t toread, nb; if (nbytes > ringbuffer_get_nbytes(src)) { /* EINVAL */ return -1; } if (!ringbuffer_ensure_capacity(dst, nbytes)) { /* Failed to allocate space */ return -1; } ringbuffer_get_iov(dst, RINGBUFFER_WRITE, iov); toread = minimum(iov[ii].iov_len, nbytes); do { lcb_assert(ii < 2); nb = ringbuffer_read(©, iov[ii].iov_base, toread); toread -= nb; towrite -= nb; ++ii; } while (towrite > 0); ringbuffer_produced(dst, nbytes); return 0; }
void ringbuffer_get_iov(ringbuffer_t *buffer, ringbuffer_direction_t direction, struct lcb_iovec_st *iov) { iov[1].iov_base = buffer->root; iov[1].iov_len = 0; if (direction == RINGBUFFER_READ) { iov[0].iov_base = buffer->read_head; iov[0].iov_len = buffer->nbytes; if (buffer->read_head >= buffer->write_head) { ptrdiff_t chunk = buffer->root + buffer->size - buffer->read_head; if (buffer->nbytes > (lcb_size_t)chunk) { iov[0].iov_len = (lcb_size_t)chunk; iov[1].iov_len = buffer->nbytes - (lcb_size_t)chunk; } } } else { lcb_assert(direction == RINGBUFFER_WRITE); iov[0].iov_base = buffer->write_head; iov[0].iov_len = buffer->size - buffer->nbytes; if (buffer->write_head >= buffer->read_head) { /* I may write all the way to the end! */ iov[0].iov_len = (lcb_size_t)((buffer->root + buffer->size) - buffer->write_head); /* And all the way up to the read head */ iov[1].iov_len = (lcb_size_t)(buffer->read_head - buffer->root); } } }
/** * Try to parse the piece of data we've got available to see if we got all * the data for this "chunk" * @param instance the instance containing the data * @return 1 if we got all the data we need, 0 otherwise */ static lcb_error_t parse_chunk(struct htvb_st *vbs) { lcb_string *chunk = &vbs->chunk; lcb_assert(vbs->chunk_size != 0); if (vbs->chunk_size == (lcb_size_t) - 1) { char *ptr = strstr(chunk->base, "\r\n"); long val; if (ptr == NULL) { /* We need more data! */ return LCB_BUSY; } ptr += 2; val = strtol(chunk->base, NULL, 16); val += 2; vbs->chunk_size = (lcb_size_t)val; lcb_string_erase_beginning(chunk, ptr - chunk->base); } if (chunk->nused < vbs->chunk_size) { /* need more data! */ return LCB_BUSY; } return LCB_SUCCESS; }
void lcb_sockrw_v1_onwrite_common(lcb_sockdata_t *sock, lcb_io_writebuf_t *wbuf, ringbuffer_t **dst) { struct lcb_buf_info *bi = &wbuf->buffer; lcb_io_opt_t io = sock->parent; if (*dst) { lcb_assert(*dst != bi->ringbuffer); /** * We can't override the existing buffer, so just return */ io->v.v1.release_writebuf(io, sock, wbuf); return; } *dst = bi->ringbuffer; ringbuffer_reset(*dst); bi->ringbuffer = NULL; bi->root = NULL; io->v.v1.release_writebuf(io, sock, wbuf); (void)sock; }
int genhash_size_for_key(genhash_t *h, const void *k, lcb_size_t klen) { int rv = 0; lcb_assert(h != NULL); genhash_iter_key(h, k, klen, count_entries, &rv); return rv; }
static void free_item(genhash_t *h, struct genhash_entry_t *i) { lcb_assert(i); free_key(h, i->key); free_value(h, i->value); free(i); }
int lcb_clconfig_file_set_filename(clconfig_provider *p, const char *f, int ro) { file_provider *provider = (file_provider *)p; lcb_assert(provider->base.type == LCB_CLCONFIG_FILE); provider->base.enabled = 1; if (provider->filename) { free(provider->filename); } provider->filename = mkcachefile(f, p->parent->settings->bucket); if (ro) { FILE *fp_tmp; provider->ro_mode = 1; fp_tmp = fopen(provider->filename, "r"); if (!fp_tmp) { lcb_log(LOGARGS(provider, ERROR), LOGFMT "Couldn't open for reading: %s", LOGID(provider), strerror(errno)); return -1; } else { fclose(fp_tmp); } } return 0; }
static void destroy_cursock(lcbio_CONNSTART *cs) { lcbio_SOCKET *s = cs->sock; lcbio_TABLE *iot = s->io; if (cs->ai) { cs->ai = cs->ai->ai_next; } if (!cs->ai) { return; } if (IOT_IS_EVENT(iot)) { if (cs->ev_active) { lcb_assert(s->u.fd != INVALID_SOCKET); IOT_V0EV(iot).cancel(IOT_ARG(iot), s->u.fd, cs->event); cs->ev_active = 0; } IOT_V0IO(iot).close(IOT_ARG(iot), s->u.fd); s->u.fd = INVALID_SOCKET; } else { if (s->u.sd) { IOT_V1(iot).close(IOT_ARG(iot), s->u.sd); s->u.sd = NULL; } } }
static void close_cb(lcbio_SOCKET *s, int reusable, void *arg) { *(lcbio_SOCKET **)arg = s; lcbio_ref(s); lcb_assert(reusable); }
static void options_from_info(struct lcb_create_io_ops_st *opts, const plugin_info *info) { void *cookie; switch (opts->version) { case 0: cookie = opts->v.v0.cookie; break; case 1: cookie = opts->v.v1.cookie; break; case 2: cookie = opts->v.v2.cookie; break; default: lcb_assert("unknown options version" && 0); cookie = NULL; } if (info->create) { opts->version = 2; opts->v.v2.create = info->create; opts->v.v2.cookie = cookie; return; } opts->version = 1; opts->v.v1.sofile = info->soname; opts->v.v1.symbol = info->symbol; opts->v.v1.cookie = cookie; }
/** * This one is asynchronously triggered, so as to ensure we don't have any * silly re-entrancy issues. */ static void socket_closing_cb(uv_idle_t *idle, int status) { my_sockdata_t *sock = idle->data; uv_idle_stop(idle); uv_close((uv_handle_t *)idle, generic_close_cb); if (sock->pending.read) { /** * UV doesn't invoke read callbacks once the handle has been closed * so we must track this ourselves. */ lcb_assert(sock->pending.read == 1); uv_read_stop((uv_stream_t *)&sock->tcp); sock->pending.read--; decref_sock(sock); } #ifdef DEBUG if (sock->pending.read || sock->pending.write) { sock_dump_pending(sock); } #endif decref_sock(sock); sock_do_uv_close(sock); (void)status; }
/** * This one is called from uv_close */ static void socket_closed_callback(uv_handle_t *handle) { my_sockdata_t *sock = PTR_FROM_FIELD(my_sockdata_t, handle, tcp); my_iops_t *io = (my_iops_t *)sock->base.parent; lcb_assert(sock->refcount == 0); free_bufinfo_common(&sock->base.read_buffer); lcb_assert(sock->base.read_buffer.root == NULL); lcb_assert(sock->base.read_buffer.ringbuffer == NULL); memset(sock, 0xEE, sizeof(*sock)); free(sock); decref_iops(&io->base); }
static void touch_callback(lcb_t instance, const void *cookie, lcb_error_t error, const lcb_touch_resp_t *resp) { struct rvbuf *rv = (struct rvbuf *)cookie; rv->error = error; lcb_assert(error == LCB_SUCCESS); rv->key = resp->v.v0.key; rv->nkey = resp->v.v0.nkey; rv->counter--; if (rv->counter <= 0) { lcb_assert(io); io->v.v0.stop_event_loop(io); } (void)instance; }