static void connect_done_handler(lcb_connection_t conn, lcb_error_t err) { http_provider *http = (http_provider *)conn->data; const lcb_host_t *host = lcb_connection_get_host(conn); if (err != LCB_SUCCESS) { lcb_log(LOGARGS(http, ERR), "Connection to REST API @%s:%s failed with code=0x%x", host->host, host->port, err); io_error(http, err); return; } lcb_log(LOGARGS(http, DEBUG), "Successfuly connected to REST API %s:%s", host->host, host->port); lcb_connection_reset_buffers(conn); ringbuffer_strcat(conn->output, http->request_buf); lcb_assert(conn->output->nbytes > 0); lcb_sockrw_set_want(conn, LCB_RW_EVENT, 0); lcb_sockrw_apply_want(conn); lcb_timer_rearm(http->io_timer, PROVIDER_SETTING(&http->base, config_node_timeout)); }
static void timeout_handler(lcb_timer_t tm, lcb_t i, const void *cookie) { http_provider *http = (http_provider *)cookie; const lcb_host_t *curhost = lcb_connection_get_host(&http->connection); lcb_log(LOGARGS(http, ERR), "HTTP Provider timed out on host %s:%s waiting for I/O", curhost->host, curhost->port); /** * If we're not the current provider then ignore the timeout until we're * actively requested to do so */ if (&http->base != http->base.parent->cur_provider || lcb_confmon_is_refreshing(http->base.parent) == 0) { lcb_log(LOGARGS(http, DEBUG), "Ignoring timeout because we're either not in a refresh " "or not the current provider"); return; } io_error(http, LCB_ETIMEDOUT); (void)tm; (void)i; }
/****************************************************************************** ****************************************************************************** ** Higher Level SSL_CTX Wrappers ** ****************************************************************************** ******************************************************************************/ static void log_callback(const SSL *ssl, int where, int ret) { const char *retstr = ""; int should_log = 0; lcbio_SOCKET *sock = SSL_get_app_data(ssl); /* Ignore low-level SSL stuff */ if (where & SSL_CB_ALERT) { should_log = 1; } if (where == SSL_CB_HANDSHAKE_START || where == SSL_CB_HANDSHAKE_DONE) { should_log = 1; } if ((where & SSL_CB_EXIT) && ret == 0) { should_log = 1; } if (!should_log) { return; } retstr = SSL_alert_type_string(ret); lcb_log(LOGARGS(ssl, LCB_LOG_TRACE), "sock=%p: ST(0x%x). %s. R(0x%x)%s", (void*)sock, where, SSL_state_string_long(ssl), ret, retstr); if (where == SSL_CB_HANDSHAKE_DONE) { lcb_log(LOGARGS(ssl, LCB_LOG_DEBUG), "sock=%p. Using SSL version %s. Cipher=%s", (void*)sock, SSL_get_version(ssl), SSL_get_cipher_name(ssl)); } }
/** * Common function to handle parsing the HTTP stream for both v0 and v1 io * implementations. */ static void read_common(http_provider *http) { lcb_error_t err; lcb_connection_t conn = &http->connection; int old_generation = http->stream.generation; lcb_log(LOGARGS(http, TRACE), "Received %d bytes on HTTP stream", conn->input->nbytes); lcb_timer_rearm(http->io_timer, PROVIDER_SETTING(&http->base, config_node_timeout)); lcb_string_rbappend(&http->stream.chunk, conn->input, 1); err = htvb_parse(&http->stream, http->base.parent->settings->conntype); if (http->stream.generation != old_generation) { lcb_log(LOGARGS(http, DEBUG), "Generation %d -> %d", old_generation, http->stream.generation); set_new_config(http); } else { lcb_log(LOGARGS(http, TRACE), "HTTP not yet done. Err=0x%x", err); } if (err != LCB_BUSY && err != LCB_SUCCESS) { io_error(http, err); return; } lcb_sockrw_set_want(conn, LCB_READ_EVENT, 1); lcb_sockrw_apply_want(conn); }
static lcb_error_t connect_next(http_provider *http) { char *errinfo = NULL; lcb_error_t err; lcb_conn_params params; lcb_connection_t conn = &http->connection; close_current(http); reset_stream_state(http); params.handler = connect_done_handler; params.timeout = PROVIDER_SETTING(&http->base, config_node_timeout); lcb_log(LOGARGS(http, TRACE), "Starting HTTP Configuration Provider %p", http); err = lcb_connection_cycle_nodes(conn, http->nodes, ¶ms, &errinfo); if (err == LCB_SUCCESS) { err = setup_request_header(http); } else { lcb_log(LOGARGS(http, ERROR), "%p: Couldn't schedule connection (0x%x)", http, err); } return err; }
/** * This function is where the configuration actually takes place. We ensure * in other functions that this is only ever called directly from an event * loop stack frame (or one of the small mini functions here) so that we * don't accidentally end up destroying resources underneath us. */ static void config_callback(clconfig_listener *listener, clconfig_event_t event, clconfig_info *info) { struct lcb_bootstrap_st *bs = (struct lcb_bootstrap_st *)listener; lcb_t instance = bs->parent; if (event != CLCONFIG_EVENT_GOT_NEW_CONFIG) { if (event == CLCONFIG_EVENT_PROVIDERS_CYCLED) { if (!instance->vbucket_config) { initial_bootstrap_error(instance, LCB_ERROR, "No more bootstrap providers remain"); } } return; } instance->last_error = LCB_SUCCESS; bs->active = 0; /** Ensure we're not called directly twice again */ listener->callback = async_step_callback; if (bs->timer) { lcb_timer_destroy(instance, bs->timer); bs->timer = NULL; } lcb_log(LOGARGS(instance, DEBUG), "Instance configured!"); if (instance->type != LCB_TYPE_CLUSTER) { lcb_update_vbconfig(instance, info); } if (!bs->bootstrapped) { bs->bootstrapped = 1; if (instance->type == LCB_TYPE_BUCKET && instance->dist_type == VBUCKET_DISTRIBUTION_KETAMA) { lcb_log(LOGARGS(instance, INFO), "Reverting to HTTP Config for memcached buckets"); /** Memcached bucket */ lcb_clconfig_set_http_always_on( lcb_confmon_get_provider( instance->confmon, LCB_CLCONFIG_HTTP)); lcb_confmon_set_provider_active(instance->confmon, LCB_CLCONFIG_HTTP, 1); lcb_confmon_set_provider_active(instance->confmon, LCB_CLCONFIG_CCCP, 0); lcb_confmon_set_provider_active(instance->confmon, LCB_CLCONFIG_CCCP, 0); } } lcb_maybe_breakout(instance); }
/** * Handler invoked to deliver final status for a connection. This will invoke * the user supplied callback with the relevant status (if it has not been * cancelled) and then free the CONNSTART object. */ static void cs_handler(void *cookie) { lcbio_CONNSTART *cs = cookie; lcb_error_t err; lcbio_SOCKET *s = cs->sock; if (s && cs->event) { cs_unwatch(cs); IOT_V0EV(s->io).destroy(IOT_ARG(s->io), cs->event); } if (cs->state == CS_PENDING) { /* state was not changed since initial scheduling */ err = LCB_ETIMEDOUT; } else if (cs->state == CS_CONNECTED) { /* clear pending error */ err = LCB_SUCCESS; } else { if (s != NULL && cs->pending == LCB_CONNECT_ERROR) { err = lcbio_mklcberr(cs->syserr, s->settings); } else { err = cs->pending; } } if (cs->state == CS_CANCELLED) { /* ignore everything. Clean up resources */ goto GT_DTOR; } if (s) { lcbio__load_socknames(s); if (err == LCB_SUCCESS) { lcb_log(LOGARGS(s, INFO), CSLOGFMT "Connected ", CSLOGID(s)); } else { lcb_log(LOGARGS(s, ERR), CSLOGFMT "Failed: lcb_err=0x%x, os_errno=%u", CSLOGID(s), err, cs->syserr); } } /** Handler section */ cs->in_uhandler = 1; cs->handler(err == LCB_SUCCESS ? s : NULL, cs->arg, err, cs->syserr); GT_DTOR: if (cs->async) { lcbio_timer_destroy(cs->async); } if (cs->sock) { lcbio_unref(cs->sock); } if (cs->ai_root) { freeaddrinfo(cs->ai_root); } free(cs); }
lcb_error_t lcb_bootstrap_common(lcb_t instance, int options) { struct lcb_BOOTSTRAP *bs = instance->bootstrap; hrtime_t now = gethrtime(); if (!bs) { bs = calloc(1, sizeof(*instance->bootstrap)); if (!bs) { return LCB_CLIENT_ENOMEM; } bs->tm = lcbio_timer_new(instance->iotable, bs, initial_timeout); instance->bootstrap = bs; bs->parent = instance; lcb_confmon_add_listener(instance->confmon, &bs->listener); } if (lcb_confmon_is_refreshing(instance->confmon)) { return LCB_SUCCESS; } if (options & LCB_BS_REFRESH_THROTTLE) { /* Refresh throttle requested. This is not true if options == ALWAYS */ hrtime_t next_ts; unsigned errthresh = LCBT_SETTING(instance, weird_things_threshold); if (options & LCB_BS_REFRESH_INCRERR) { bs->errcounter++; } next_ts = bs->last_refresh; next_ts += LCB_US2NS(LCBT_SETTING(instance, weird_things_delay)); if (now < next_ts && bs->errcounter < errthresh) { lcb_log(LOGARGS(instance, INFO), "Not requesting a config refresh because of throttling parameters. Next refresh possible in %ums or %u errors. " "See LCB_CNTL_CONFDELAY_THRESH and LCB_CNTL_CONFERRTHRESH to modify the throttling settings", LCB_NS2US(next_ts-now)/1000, (unsigned)errthresh-bs->errcounter); return LCB_SUCCESS; } } if (options == LCB_BS_REFRESH_INITIAL) { lcb_confmon_prepare(instance->confmon); bs->listener.callback = config_callback; lcbio_timer_set_target(bs->tm, initial_timeout); lcbio_timer_rearm(bs->tm, LCBT_SETTING(instance, config_timeout)); lcb_aspend_add(&instance->pendops, LCB_PENDTYPE_COUNTER, NULL); } else { /** No initial timer */ bs->listener.callback = async_step_callback; } /* Reset the counters */ bs->errcounter = 0; if (options != LCB_BS_REFRESH_INITIAL) { bs->last_refresh = now; } return lcb_confmon_start(instance->confmon); }
void lcb_timeout_server(lcb_server_t *server) { hrtime_t now, min_valid, next_ns = 0; lcb_uint32_t next_us; LOG(server, ERR, "Server timed out"); lcb_bootstrap_errcount_incr(server->instance); if (!server->connection_ready) { lcb_failout_server(server, LCB_ETIMEDOUT); return; } now = gethrtime(); /** The oldest valid command timestamp */ min_valid = now - ((hrtime_t)MCSERVER_TIMEOUT(server)) * 1000; purge_single_server(server, LCB_ETIMEDOUT, min_valid, &next_ns); if (next_ns) { next_us = (lcb_uint32_t) (next_ns / 1000); } else { next_us = MCSERVER_TIMEOUT(server); } lcb_log(LOGARGS(server, INFO), "%p, Scheduling next timeout for %d ms", server, next_us / 1000); lcb_timer_rearm(server->io_timer, next_us); lcb_maybe_breakout(server->instance); }
int lcb_clconfig_file_set_filename(clconfig_provider *p, const char *f, int ro) { file_provider *provider = (file_provider *)p; lcb_assert(provider->base.type == LCB_CLCONFIG_FILE); provider->base.enabled = 1; if (provider->filename) { free(provider->filename); } provider->filename = mkcachefile(f, p->parent->settings->bucket); if (ro) { FILE *fp_tmp; provider->ro_mode = 1; fp_tmp = fopen(provider->filename, "r"); if (!fp_tmp) { lcb_log(LOGARGS(provider, ERROR), LOGFMT "Couldn't open for reading: %s", LOGID(provider), strerror(errno)); return -1; } else { fclose(fp_tmp); } } return 0; }
lcbio_CTX * lcbio_ctx_new(lcbio_SOCKET *sock, void *data, const lcbio_EASYPROCS *procs) { lcbio_CTX *ctx = calloc(1, sizeof(*ctx)); ctx->sock = sock; sock->ctx = ctx; ctx->io = sock->io; ctx->data = data; ctx->procs = *procs; ctx->state = ES_ACTIVE; ctx->as_err = lcbio_timer_new(ctx->io, ctx, err_handler); ctx->subsys = "unknown"; rdb_init(&ctx->ior, sock->settings->allocator_factory()); lcbio_ref(sock); if (IOT_IS_EVENT(ctx->io)) { ctx->event = IOT_V0EV(ctx->io).create(IOT_ARG(ctx->io)); ctx->fd = sock->u.fd; } else { ctx->sd = sock->u.sd; } ctx->procs = *procs; ctx->state = ES_ACTIVE; lcb_log(LOGARGS(ctx, DEBUG), CTX_LOGFMT "Pairing with SOCK=%p", CTX_LOGID(ctx), (void*)sock); return ctx; }
static lcb_error_t mcio_error(cccp_provider *cccp, lcb_error_t err) { lcb_log(LOGARGS(cccp, ERR), "Got I/O Error=0x%x", err); release_socket(cccp, err == LCB_NOT_SUPPORTED); return schedule_next_request(cccp, err, 0); }
void iotssl_log_errors(lcbio_XSSL *xs) { unsigned long curerr; while ((curerr = ERR_get_error())) { char errbuf[4096]; ERR_error_string_n(curerr, errbuf, sizeof errbuf); lcb_log(LOGARGS(xs->ssl, LCB_LOG_ERROR), "%s", errbuf); if (xs->errcode != LCB_SUCCESS) { continue; /* Already set */ } if (ERR_GET_LIB(curerr) == ERR_LIB_SSL) { switch (ERR_GET_REASON(curerr)) { case SSL_R_CERTIFICATE_VERIFY_FAILED: case SSL_R_MISSING_VERIFY_MESSAGE: xs->errcode = LCB_SSL_CANTVERIFY; break; case SSL_R_BAD_PROTOCOL_VERSION_NUMBER: case SSL_R_UNKNOWN_PROTOCOL: case SSL_R_WRONG_VERSION_NUMBER: case SSL_R_UNKNOWN_SSL_VERSION: case SSL_R_UNSUPPORTED_SSL_VERSION: xs->errcode = LCB_PROTOCOL_ERROR; break; default: xs->errcode = LCB_SSL_ERROR; } } } }
void lcb_bootstrap_errcount_incr(lcb_t instance) { int should_refresh = 0; hrtime_t now = gethrtime(); instance->weird_things++; if (now - instance->bootstrap->last_refresh > LCB_US2NS(instance->settings.weird_things_delay)) { lcb_log(LOGARGS(instance, INFO), "Max grace period for refresh exceeded"); should_refresh = 1; } if (instance->weird_things == instance->settings.weird_things_threshold) { should_refresh = 1; } if (!should_refresh) { return; } instance->weird_things = 0; lcb_bootstrap_refresh(instance); }
static void log_vbdiff(lcb_t instance, VBUCKET_CONFIG_DIFF *diff) { char **curserver; lcb_log(LOGARGS(instance, INFO), "Config Diff: [ vBuckets Modified=%d ], [Sequence Changed=%d]", diff->n_vb_changes, diff->sequence_changed); if (diff->servers_added) { for (curserver = diff->servers_added; *curserver; curserver++) { lcb_log(LOGARGS(instance, INFO), "Detected server %s added", *curserver); } } if (diff->servers_removed) { for (curserver = diff->servers_removed; *curserver; curserver++) { lcb_log(LOGARGS(instance, INFO), "Detected server %s removed", *curserver); } } }
static void protocol_error(http_provider *http, lcb_error_t err) { int can_retry = 1; lcb_log(LOGARGS(http, ERROR), "Got protocol-level error 0x%x", err); PROVIDER_SET_ERROR(&http->base, err); /** * XXX: We only want to retry on some errors. Things which signify an * obvious user error should be left out here; we only care about * actual "network" errors */ if (err == LCB_AUTH_ERROR || err == LCB_PROTOCOL_ERROR || err == LCB_BUCKET_ENOENT) { can_retry = 0; } if (http->retry_on_missing && (err == LCB_BUCKET_ENOENT || err == LCB_AUTH_ERROR)) { LOG(http, INFO, "Retrying on AUTH||BUCKET_ENOENT"); can_retry = 1; } if (!can_retry) { close_current(http); lcb_confmon_provider_failed(&http->base, err); } else { io_error(http); } }
/** * Invoked when get a NOT_MY_VBUCKET response. If the response contains a JSON * payload then we refresh the configuration with it. * * This function returns 1 if the operation was successfully rescheduled; * otherwise it returns 0. If it returns 0 then we give the error back to the * user. */ static int handle_nmv(mc_SERVER *oldsrv, packet_info *resinfo, mc_PACKET *oldpkt) { mc_PACKET *newpkt; protocol_binary_request_header hdr; lcb_error_t err = LCB_ERROR; lcb_t instance = oldsrv->instance; lcb_U16 vbid; int tmpix; clconfig_provider *cccp = lcb_confmon_get_provider(instance->confmon, LCB_CLCONFIG_CCCP); mcreq_read_hdr(oldpkt, &hdr); vbid = ntohs(hdr.request.vbucket); lcb_log(LOGARGS(oldsrv, WARN), LOGFMT "NOT_MY_VBUCKET. Packet=%p (S=%u). VBID=%u", LOGID(oldsrv), (void*)oldpkt, oldpkt->opaque, vbid); /* Notify of new map */ tmpix = lcb_vbguess_remap(LCBT_VBCONFIG(instance), instance->vbguess, vbid, oldsrv->pipeline.index); if (tmpix > -1 && tmpix != oldsrv->pipeline.index) { lcb_log(LOGARGS(oldsrv, TRACE), LOGFMT "Heuristically set IX=%d as master for VBID=%u", LOGID(oldsrv), tmpix, vbid); } if (PACKET_NBODY(resinfo) && cccp->enabled) { lcb_string s; lcb_string_init(&s); lcb_string_append(&s, PACKET_VALUE(resinfo), PACKET_NVALUE(resinfo)); err = lcb_cccp_update(cccp, mcserver_get_host(oldsrv), &s); lcb_string_release(&s); } if (err != LCB_SUCCESS) { lcb_bootstrap_common(instance, LCB_BS_REFRESH_ALWAYS); } if (!lcb_should_retry(oldsrv->settings, oldpkt, LCB_NOT_MY_VBUCKET)) { return 0; } /** Reschedule the packet again .. */ newpkt = mcreq_renew_packet(oldpkt); newpkt->flags &= ~MCREQ_STATE_FLAGS; lcb_retryq_add(instance->retryq, (mc_EXPACKET*)newpkt, LCB_NOT_MY_VBUCKET); return 1; }
static void set_error_ex(mc_pSESSREQ sreq, lcb_error_t err, const char *msg) { lcb_log(LOGARGS(sreq, ERR), SESSREQ_LOGFMT "Error: 0x%x, %s", SESSREQ_LOGID(sreq), err, msg); if (sreq->err == LCB_SUCCESS) { sreq->err = err; } }
void lcbio_ctx_close_ex(lcbio_CTX *ctx, lcbio_CTXCLOSE_cb cb, void *arg, lcbio_CTXDTOR_cb dtor, void *dtor_arg) { unsigned oldrc; ctx->state = ES_DETACHED; assert(ctx->sock); if (ctx->event) { deactivate_watcher(ctx); IOT_V0EV(CTX_IOT(ctx)).destroy(IOT_ARG(CTX_IOT(ctx)), ctx->event); ctx->event = NULL; } if (ctx->as_err) { lcbio_timer_destroy(ctx->as_err); ctx->as_err = NULL; } oldrc = ctx->sock->refcount; lcb_log(LOGARGS(ctx, DEBUG), CTX_LOGFMT "Destroying. PND=%d,ENT=%d,SORC=%d", CTX_LOGID(ctx), (int)ctx->npending, (int)ctx->entered, oldrc); if (cb) { int reusable = ctx->npending == 0 && /* no pending events */ ctx->err == LCB_SUCCESS && /* no socket errors */ ctx->rdwant == 0 && /* no expected input */ ctx->wwant == 0 && /* no expected output */ (ctx->output == NULL || ctx->output->rb.nbytes == 0); cb(ctx->sock, reusable, arg); } if (oldrc == ctx->sock->refcount) { lcbio_shutdown(ctx->sock); } if (ctx->output) { ringbuffer_destruct(&ctx->output->rb); free(ctx->output); ctx->output = NULL; } ctx->fd = INVALID_SOCKET; ctx->sd = NULL; if (dtor) { ctx->data = dtor_arg; ctx->procs.cb_flush_ready = dtor; } else { ctx->procs.cb_flush_ready = NULL; } if (ctx->npending == 0 && ctx->entered == 0) { free_ctx(ctx); } }
/** * Called to retrive the mechlist from the packet. * @return 0 to continue authentication, 1 if no authentication needed, or * -1 on error. */ static int set_chosen_mech(mc_pSESSREQ sreq, lcb_string *mechlist, const char **data, unsigned int *ndata) { cbsasl_error_t saslerr; const char *chosenmech; mc_pSESSINFO ctx = sreq->inner; lcb_assert(sreq->inner); if (ctx->settings->sasl_mech_force) { char *forcemech = ctx->settings->sasl_mech_force; if (!strstr(mechlist->base, forcemech)) { /** Requested mechanism not found */ set_error_ex(sreq, LCB_SASLMECH_UNAVAILABLE, mechlist->base); return -1; } lcb_string_clear(mechlist); if (lcb_string_appendz(mechlist, forcemech)) { set_error_ex(sreq, LCB_CLIENT_ENOMEM, NULL); return -1; } } saslerr = cbsasl_client_start(ctx->sasl, mechlist->base, NULL, data, ndata, &chosenmech); switch (saslerr) { case SASL_OK: ctx->nmech = strlen(chosenmech); if (! (ctx->mech = strdup(chosenmech)) ) { set_error_ex(sreq, LCB_CLIENT_ENOMEM, NULL); return -1; } return 0; case SASL_NOMECH: lcb_log(LOGARGS(sreq, INFO), SESSREQ_LOGFMT "Server does not support SASL (no mechanisms supported)", SESSREQ_LOGID(sreq)); return 1; break; default: lcb_log(LOGARGS(sreq, INFO), SESSREQ_LOGFMT "cbsasl_client_start returned %d", SESSREQ_LOGID(sreq), saslerr); set_error_ex(sreq, LCB_EINTERNAL, "Couldn't start SASL client"); return -1; } }
static void log_global_errors(lcb_settings *settings) { unsigned long curerr; while ((curerr = ERR_get_error())) { char errbuf[4096]; ERR_error_string_n(curerr, errbuf, sizeof errbuf); lcb_log(settings, "SSL", LCB_LOG_ERROR, __FILE__, __LINE__, "SSL Error: %ld, %s", curerr, errbuf); } }
static void async_update(void *arg) { bc_MCRAW *mcr = arg; if (!mcr->config) { lcb_log(LOGARGS(mcr, WARN), "No current config set. Not setting configuration"); return; } lcb_confmon_provider_success(&mcr->base, mcr->config); }
/** * Invoked when get a NOT_MY_VBUCKET response. If the response contains a JSON * payload then we refresh the configuration with it. * * This function returns 1 if the operation was successfully rescheduled; * otherwise it returns 0. If it returns 0 then we give the error back to the * user. */ static int handle_nmv(mc_SERVER *oldsrv, packet_info *resinfo, mc_PACKET *oldpkt) { mc_PACKET *newpkt; protocol_binary_request_header hdr; lcb_error_t err = LCB_ERROR; lcb_t instance = oldsrv->instance; lcb_U16 vbid; clconfig_provider *cccp = lcb_confmon_get_provider(instance->confmon, LCB_CLCONFIG_CCCP); mcreq_read_hdr(oldpkt, &hdr); vbid = ntohs(hdr.request.vbucket); lcb_log(LOGARGS(oldsrv, WARN), LOGFMT "NOT_MY_VBUCKET. Packet=%p (S=%u). VBID=%u", LOGID(oldsrv), (void*)oldpkt, oldpkt->opaque, vbid); /* Notify of new map */ lcb_vbguess_remap(instance, vbid, oldsrv->pipeline.index); if (PACKET_NBODY(resinfo) && cccp->enabled) { lcb_string s; lcb_string_init(&s); lcb_string_append(&s, PACKET_VALUE(resinfo), PACKET_NVALUE(resinfo)); err = lcb_cccp_update(cccp, mcserver_get_host(oldsrv), &s); lcb_string_release(&s); } if (err != LCB_SUCCESS) { int bs_options; if (instance->cur_configinfo->origin == LCB_CLCONFIG_CCCP) { /** * XXX: Not enough to see if cccp was enabled, since cccp might * be requested by a user, but would still not actually be active * for clusters < 2.5 If our current config is from CCCP * then we can be fairly certain that CCCP is indeed working. * * For this reason, we don't use if (cccp->enabled) {...} */ bs_options = LCB_BS_REFRESH_THROTTLE; } else { bs_options = LCB_BS_REFRESH_ALWAYS; } lcb_bootstrap_common(instance, bs_options); } if (!lcb_should_retry(oldsrv->settings, oldpkt, LCB_NOT_MY_VBUCKET)) { return 0; } /** Reschedule the packet again .. */ newpkt = mcreq_renew_packet(oldpkt); newpkt->flags &= ~MCREQ_STATE_FLAGS; lcb_retryq_nmvadd(instance->retryq, (mc_EXPACKET*)newpkt); return 1; }
/****************************************************************************** ****************************************************************************** ** Higher Level SSL_CTX Wrappers ** ****************************************************************************** ******************************************************************************/ static void log_callback(const SSL *ssl, int where, int ret) { const char *retstr = ""; lcbio_SOCKET *sock = SSL_get_app_data(ssl); if (where & SSL_CB_ALERT) { retstr = SSL_alert_type_string(ret); } lcb_log(LOGARGS(ssl, LCB_LOG_TRACE), "sock=%p: ST(0x%x). %s. R(0x%x)%s", (void*)sock, where, SSL_state_string_long(ssl), ret, retstr); }
static lcb_error_t schedule_next_request(cccp_provider *cccp, lcb_error_t err, int can_rollover) { lcb_server_t *server = NULL; lcb_size_t ii; lcb_host_t *next_host = hostlist_shift_next(cccp->nodes, can_rollover); if (!next_host) { lcb_timer_disarm(cccp->timer); lcb_confmon_provider_failed(&cccp->base, err); cccp->server_active = 0; return err; } /** See if we can find a server */ for (ii = 0; ii < cccp->instance->nservers; ii++) { lcb_server_t *cur = cccp->instance->servers + ii; if (lcb_host_equals(&cur->curhost, next_host)) { server = cur; break; } } if (server) { protocol_binary_request_get_cluster_config req; cccp_cookie *cookie = calloc(1, sizeof(*cookie)); lcb_log(LOGARGS(cccp, INFO), "Re-Issuing CCCP Command on server struct %p", server); cookie->parent = cccp; memset(&req, 0, sizeof(req)); req.message.header.request.magic = PROTOCOL_BINARY_REQ; req.message.header.request.opcode = CMD_GET_CLUSTER_CONFIG; req.message.header.request.opaque = ++cccp->instance->seqno; lcb_server_start_packet(server, cookie, &req, sizeof(req.bytes)); lcb_server_end_packet(server); lcb_server_send_packets(server); lcb_timer_rearm(cccp->timer, PROVIDER_SETTING(&cccp->base, config_node_timeout)); } else { cccp->cur_connreq = calloc(1, sizeof(*cccp->cur_connreq)); connmgr_req_init(cccp->cur_connreq, next_host->host, next_host->port, socket_connected); cccp->cur_connreq->data = cccp; connmgr_get(cccp->instance->memd_sockpool, cccp->cur_connreq, PROVIDER_SETTING(&cccp->base, config_node_timeout)); } cccp->server_active = 1; return LCB_SUCCESS; }
/** * set_next listener callback which schedules an async call to our config * callback. */ static void async_step_callback(clconfig_listener *listener, clconfig_event_t event, clconfig_info *info) { struct lcb_BOOTSTRAP *bs = (struct lcb_BOOTSTRAP *)listener; if (event != CLCONFIG_EVENT_GOT_NEW_CONFIG) { return; } if (lcbio_timer_armed(bs->tm) && lcbio_timer_get_target(bs->tm) == async_refresh) { lcb_log(LOGARGS(bs->parent, DEBUG), "Timer already present.."); return; } lcb_log(LOGARGS(bs->parent, INFO), "Got async step callback.."); lcbio_timer_set_target(bs->tm, async_refresh); lcbio_async_signal(bs->tm); (void)info; }
static lcb_error_t mcio_error(cccp_provider *cccp, lcb_error_t err) { lcb_log(LOGARGS(cccp, ERR), "Got I/O Error=0x%x", err); if (err == LCB_AUTH_ERROR && cccp->base.parent->config == NULL) { lcb_confmon_provider_failed(&cccp->base, err); return err; } release_socket(cccp, err == LCB_NOT_SUPPORTED); return schedule_next_request(cccp, err, 0); }
static void write_to_file(file_provider *provider, lcbvb_CONFIG *cfg) { FILE *fp; if (provider->filename == NULL || provider->ro_mode) { return; } fp = fopen(provider->filename, "w"); if (fp) { char *json = lcbvb_save_json(cfg); lcb_log(LOGARGS(provider, INFO), LOGFMT "Writing configuration to file", LOGID(provider)); fprintf(fp, "%s%s", json, CONFIG_CACHE_MAGIC); fclose(fp); free(json); } else { int save_errno = errno; lcb_log(LOGARGS(provider, ERROR), LOGFMT "Couldn't open file for writing: %s", LOGID(provider), strerror(save_errno)); } }
static void delayed_disconn(lcb_timer_t tm, lcb_t instance, const void *cookie) { http_provider *http = (http_provider *)cookie; lcb_log(LOGARGS(http, DEBUG), "Stopping HTTP provider %p", http); /** closes the connection and cleans up the timer */ close_current(http); lcb_timer_disarm(http->io_timer); reset_stream_state(http); (void)tm; (void)instance; }
/** * All-purpose callback dispatcher. */ static void timer_callback(lcb_socket_t sock, short which, void *arg) { lcb_DURSET *dset = arg; hrtime_t now = gethrtime(); if (dset->ns_timeout && now > dset->ns_timeout) { dset->next_state = LCBDUR_STATE_TIMEOUT; } switch (dset->next_state) { case LCBDUR_STATE_OBSPOLL: case LCBDUR_STATE_INIT: poll_once(dset); break; case LCBDUR_STATE_TIMEOUT: { lcb_size_t ii; lcb_error_t err = dset->lasterr ? dset->lasterr : LCB_ETIMEDOUT; dset->ns_timeout = 0; dset->next_state = LCBDUR_STATE_IGNORE; lcb_log(LOGARGS(dset, WARN), "Polling durability timed out!"); lcbdur_ref(dset); for (ii = 0; ii < DSET_COUNT(dset); ii++) { lcb_DURITEM *ent = DSET_ENTRIES(dset) + ii; if (ent->done) { continue; } if (RESFLD(ent, rc) == LCB_SUCCESS) { RESFLD(ent, rc) = err; } lcbdur_ent_finish(ent); } lcbdur_unref(dset); break; } case LCBDUR_STATE_IGNORE: break; default: lcb_assert("unexpected state" && 0); break; } (void)sock; (void)which; }