gboolean stg_pol_rainx_get_param(namespace_info_t *ni, const gchar *stgpol, const gchar *param, gint64 *p_val) { const char *val_str = NULL; struct storage_policy_s *sp = storage_policy_init(ni, stgpol); const struct data_security_s *datasec = storage_policy_get_data_security(sp); gboolean ret; if (!datasec) { GRID_INFO("Cannot find datasecurity values for policy [%s]", stgpol); ret = FALSE; } else { if (NULL == (val_str = data_security_get_param(datasec, param))) { GRID_INFO("Cannot get parameter '%s' from data security [%s]", param, data_security_get_name(datasec)); ret = FALSE; } else { *p_val = g_ascii_strtoll(val_str, NULL, 10); ret = TRUE; } } storage_policy_clean(sp); return ret; }
void meta0_backend_migrate(struct meta0_backend_s *m0) { GError *err = NULL; struct sqlx_sqlite3_s *handle=NULL; struct sqlx_sqlite3_s *oldhandle=NULL; struct sqlx_name_s n; n.base = m0->ns; n.type = NAME_SRVTYPE_META0; n.ns = m0->ns; err = sqlx_repository_open_and_lock(m0->repository, &n, SQLX_OPEN_LOCAL, &handle, NULL); if ( err ) { // check if the erreur message is the ENOENT message ; DB doesn't exist if ( (strstr(err->message, strerror(ENOENT)) != NULL) ) { g_clear_error(&err); err = NULL; err = sqlx_repository_open_and_lock(m0->repository, &n, SQLX_OPEN_LOCAL|SQLX_OPEN_CREATE,&handle, NULL); if( !err ) { // Migration (1.7 -> 1.8) from old meta0 database n.base = m0->id; err = sqlx_repository_open_and_lock(m0->repository, &n, SQLX_OPEN_LOCAL, &oldhandle,NULL); if ( ! err ) { GRID_INFO("Start Migrate meta0 database"); err = sqlx_repository_backup_base(oldhandle,handle); if ( err ) { GRID_ERROR("Failed to migrate meta0 database : (%d) %s",err->code,err->message); } else { sqlx_repository_unlock_and_close_noerror(oldhandle); // APPLY schema gint rc; char *errmsg = NULL; rc = sqlite3_exec(handle->db, META0_SCHEMA, NULL, NULL, &errmsg); if (rc != SQLITE_OK && rc != SQLITE_DONE) { GRID_WARN("Failed to apply schema (%d) %s %s",rc,sqlite3_errmsg(handle->db), errmsg); } if(errmsg != NULL) g_free(errmsg); // set table version GRID_INFO("Update version in database"); sqlx_admin_inc_all_versions(handle, 2); } } else { // database 1.7 doesn't existe ; new grid g_clear_error(&err); err = NULL; } } else { GRID_ERROR("Failed to create meta0 database :(%d) %s",err->code,err->message); } } } if ( handle) sqlx_repository_unlock_and_close_noerror(handle); }
GError* meta0_assign_prefix_to_meta1(struct meta0_backend_s *m0, gchar *ns_name, gboolean nocheck) { // GET meta1 list from conscience GList *working_m1list = NULL; GSList *unref_m1list = NULL; GError *error; GPtrArray *new_meta1ref = NULL; GRID_INFO("START Assign prefix"); error = _initContext(m0); if (error) { goto errorLabel; } // build working list , list sorted by score error = _init_assign(ns_name,&working_m1list,&unref_m1list); if ( error ) { goto errorLabel; } if ( nocheck ) { error =_check(working_m1list); if ( error ) { goto errorLabel; } } error = _assign(working_m1list,unref_m1list); if ( error ) { goto errorLabel; } new_meta1ref = _updated_meta1ref(); error = meta0_backend_assign(m0, context->array_meta1_by_prefix, new_meta1ref,FALSE); if ( error ) { GRID_ERROR("failed to update BDD :(%d) %s", error->code, error->message); goto errorLabel; } context->lastAssignTime=g_date_time_new_now_local(); errorLabel : _resetContext(); if (new_meta1ref) { meta0_utils_array_meta1ref_clean(new_meta1ref); } if (working_m1list) { g_list_free(working_m1list); working_m1list=NULL; } if (unref_m1list) { g_slist_free(unref_m1list); unref_m1list=NULL; } GRID_INFO("END ASSIGN"); return error; }
static GError* _load_meta1ref_from_base(struct sqlx_sqlite3_s *sq3, GPtrArray **result) { GError *err = NULL; GPtrArray *array; sqlite3_stmt *stmt; int rc; guint count = 0; array = g_ptr_array_new(); sqlite3_prepare_debug(rc, sq3->db, "SELECT addr,state,prefixes FROM meta1_ref", -1, &stmt, NULL); if (rc != SQLITE_OK && rc != SQLITE_DONE) { if ( rc == SQLITE_ERROR ) { GRID_DEBUG("Missing table meta1ref in DB"); *result = array; return NULL; } return SQLITE_GERROR(sq3->db, rc); } for (;;) { rc = sqlite3_step(stmt); if (rc == SQLITE_ROW) { const unsigned char *url,*prefix_nb,*ref; url = sqlite3_column_text(stmt,0); ref = sqlite3_column_text(stmt,1); prefix_nb = sqlite3_column_text(stmt,2); GRID_INFO("url %s, ref %s,prefix_nb %s ",url,ref,prefix_nb); g_ptr_array_add(array,meta0_utils_pack_meta1ref((gchar *)url,(gchar *)ref,(gchar *)prefix_nb)); count++; } else if (rc == SQLITE_DONE || rc == SQLITE_OK) break; else if (rc == SQLITE_BUSY) sleep(1); else { err = SQLITE_GERROR(sq3->db, rc); break; } } sqlite3_finalize_debug(rc, stmt); if (!err) { *result = array; GRID_INFO("Reloaded %u meta1 in %p (%u)", count, array, array->len); } return err; }
static gboolean _configure_limits(struct sqlx_service_s *ss) { #define CONFIGURE_LIMIT(cfg,real) do { \ real = (cfg > 0 && cfg < real) ? cfg : (limit.rlim_cur - 20) / 3; \ } while (0) struct rlimit limit = {0,0}; if (0 != getrlimit(RLIMIT_NOFILE, &limit)) { GRID_ERROR("Max file descriptor unknown : getrlimit error " "(errno=%d) %s", errno, strerror(errno)); return FALSE; } if (limit.rlim_cur < 64) { GRID_ERROR("Not enough file descriptors allowed [%lu], " "minimum 64 required", (unsigned long) limit.rlim_cur); return FALSE; } GRID_INFO("Limits set to ACTIVES[%u] PASSIVES[%u] BASES[%u]", SRV.max_active, SRV.max_passive, SRV.max_bases); CONFIGURE_LIMIT(ss->cfg_max_passive, ss->max_passive); CONFIGURE_LIMIT(ss->cfg_max_active, ss->max_active); CONFIGURE_LIMIT(ss->cfg_max_bases, ss->max_bases); return TRUE; #undef CONFIGURE_LIMIT }
static gpointer _thread_cb_events(gpointer d) { struct network_server_s *srv = d; time_t now, last; metautils_ignore_signals(); GRID_INFO("EVENTS thread starting pfd=%d", srv->epollfd); now = last = network_server_bogonow(srv); while (srv->flag_continue) { _manage_events(srv); now = network_server_bogonow(srv); if (now > last + 30 || now < last) { _server_shutdown_inactive_connections(srv); last = now; } } /* XXX the server connections are being closed in the main thread that * received the exit signal. They will be removed automatically from * the epoll pool.*/ while (0 < srv->cnx_clients) { _manage_events(srv); _server_shutdown_inactive_connections(srv); } return d; }
static void _manage_timeouts(struct gridd_client_pool_s *pool) { GTimeVal now; struct event_client_s *ec; if (pool->active_count <= 0) return; g_get_current_time(&now); if (pool->last_timeout_check == now.tv_sec) return; for (int i=0; i<pool->active_clients_size ;i++) { if (!(ec = pool->active_clients[i])) continue; EXTRA_ASSERT(ec->client != NULL); EXTRA_ASSERT(i == gridd_client_fd(ec->client)); if (gridd_client_expired(ec->client, &now)) { GRID_INFO("EXPIRED Client fd=%d [%s]", i, gridd_client_url(ec->client)); _pool_unmonitor(pool, i); event_client_free(ec); } } }
static void _task_expire_services_down (gpointer p UNUSED) { guint count = _lru_tree_expire (&srv_rwlock, srv_down, ttl_down_services); if (count) GRID_INFO("Re-enabled %u services", count); }
static void test_content_dedup(gconstpointer test_data) { guint num_duplicates = *(guint*)test_data; void change_chunk_hash(GSList *beans, guint start) { guint8 counter = start; for (GSList *cursor = beans; cursor; cursor = cursor->next) { if (DESCR(cursor->data) == &descr_struct_CHUNKS) { GByteArray *hash = CHUNKS_get_hash(cursor->data); hash->data[0] = counter; CHUNKS_set_hash(cursor->data, hash); // no-op because same pointer counter++; } else if (DESCR(cursor->data) == &descr_struct_CONTENTS_HEADERS) { GByteArray *hash = g_byte_array_sized_new(16); GRID_INFO("---- forging content hash ----"); for (guint8 i = 0; i < 16; i++) { hash->data[i] = i + 1; } CONTENTS_HEADERS_set_hash(cursor->data, hash); } } }
static GError * _m1_action (struct req_args_s *args, gchar ** m1v, GError * (*hook) (const gchar * m1)) { for (gchar ** pm1 = m1v; *pm1; ++pm1) { struct meta1_service_url_s *m1 = meta1_unpack_url (*pm1); if (!m1) continue; if (0 != g_ascii_strcasecmp(m1->srvtype, "meta1")) { meta1_service_url_clean (m1); continue; } struct addr_info_s m1a; if (!grid_string_to_addrinfo (m1->host, NULL, &m1a)) { GRID_INFO ("Invalid META1 [%s] for [%s]", m1->host, hc_url_get (args->url, HCURL_WHOLE)); meta1_service_url_clean (m1); continue; } GError *err = hook (m1->host); meta1_service_url_clean (m1); if (!err) return NULL; else if (err->code == CODE_REDIRECT) g_clear_error (&err); else { g_prefix_error (&err, "META1 error: "); return err; } } return NEWERROR (CODE_UNAVAILABLE, "No meta1 answered"); }
static gboolean _zmq2agent_send_event (time_t now, struct _zmq2agent_ctx_s *ctx, struct event_s *evt, const char *dbg) { int rc; if (ctx->last_error == now) { GRID_DEBUG("ZMQ2AGENT event delayed, stream paused"); return FALSE; } evt->last_sent = now; retry: rc = zmq_send (ctx->zagent, "", 0, more|ZMQ_DONTWAIT); if (rc == 0) { rc = zmq_send (ctx->zagent, evt, HEADER_SIZE, more|ZMQ_DONTWAIT); if (rc == HEADER_SIZE) rc = zmq_send (ctx->zagent, evt->message, evt->size, ZMQ_DONTWAIT); } if (rc < 0) { if (EINTR == (rc = zmq_errno ())) goto retry; ctx->last_error = evt->last_sent; GRID_INFO("EVT:ERR %s (%d) %s", dbg, rc, zmq_strerror(rc)); return FALSE; } else { ++ ctx->q->counter_sent; ctx->last_error = 0; GRID_DEBUG("EVT:SNT %s", dbg); return TRUE; } }
static void _task_expire_services_known (gpointer p UNUSED) { guint count = _lru_tree_expire (&srv_rwlock, srv_known, ttl_known_services); if (count) GRID_INFO("Forgot %u services", count); }
static gboolean _configure_events_queue (struct sqlx_service_s *ss) { if (ss->flag_no_event) { GRID_DEBUG("Events queue disabled, the service disabled it"); return TRUE; } gchar *url = gridcluster_get_eventagent (SRV.ns_name); STRING_STACKIFY (url); if (!url) { GRID_DEBUG("Events queue disabled, no URL configured"); return TRUE; } GError *err = oio_events_queue_factory__create(url, &ss->events_queue); if (!ss->events_queue) { GRID_WARN("Events queue creation failure: (%d) %s", err->code, err->message); return FALSE; } GRID_INFO("Event queue ready, connected to [%s]", url); return TRUE; }
static gs_error_t * hc_upload_content(gs_grid_storage_t *hc, struct hc_url_s *url, const char *local_path, const char *stgpol, const char *sys_metadata, int ac, gboolean is_append) { int in = -1; struct stat64 s; gs_container_t *c = NULL; gs_error_t *e = NULL; /*init the local path */ if (-1 == stat64(local_path, &s)) { e = g_malloc0(sizeof(gs_error_t)); e->code = errno; e->msg = g_strdup_printf("Cannot stat the local file (%s)\n", strerror(errno)); return e; } GRID_DEBUG("Local path %s found\n", local_path); if (-1 == (in = open(local_path, O_RDONLY|O_LARGEFILE))) { e = g_malloc0(sizeof(gs_error_t)); e->code = errno; e->msg = g_strdup_printf("Cannot open the local file (%s)\n", strerror(errno)); goto end_put; } GRID_DEBUG("Local path %s found and opened\n", local_path); if(!(c = gs_get_storage_container(hc, hc_url_get(url, HCURL_REFERENCE), NULL, ac, &e))) { g_printerr("Failed to resolve and/or create meta2 entry for reference %s\n", hc_url_get(url, HCURL_REFERENCE)); goto end_put; } /*upload the content */ if (is_append) { if (!gs_append_content(c, hc_url_get(url, HCURL_PATH), s.st_size, _feed_from_fd, &in, &e)) { goto end_put; } } else { if (!gs_upload(c, hc_url_get(url, HCURL_PATH), s.st_size, _feed_from_fd, &in, NULL, sys_metadata, stgpol, &e)) { goto end_put; } } GRID_INFO("Uploaded a new version of content [%s] in container [%s]\n\n", hc_url_get(url, HCURL_PATH), hc_url_get(url, HCURL_REFERENCE)); GRID_DEBUG("Content successfully uploaded!\n"); end_put: /** FIXME TODO XXX why not (in >= 0) or (in > -1) ? */ if (in > 1) metautils_pclose(&in); if(NULL != c) { gs_container_free(c); c = NULL; } return e; }
static gboolean _configure_limits(struct sqlx_service_s *ss) { guint newval = 0, max = 0, total = 0, available = 0, min = 0; struct rlimit limit = {0, 0}; #define CONFIGURE_LIMIT(cfg,real) do { \ max = MIN(max, available); \ newval = (cfg > 0 && cfg < max) ? cfg : max; \ newval = newval > min? newval : min; \ real = newval; \ available -= real; \ } while (0) if (0 != getrlimit(RLIMIT_NOFILE, &limit)) { GRID_ERROR("Max file descriptor unknown: getrlimit error " "(errno=%d) %s", errno, strerror(errno)); return FALSE; } if (limit.rlim_cur < 64) { GRID_ERROR("Not enough file descriptors allowed [%lu], " "minimum 64 required", (unsigned long) limit.rlim_cur); return FALSE; } // We keep 20 FDs for unexpected cases (sqlite sometimes uses // temporary files, event when we ask for memory journals). total = (limit.rlim_cur - 20); // If user sets outstanding values for the first 2 parameters, // there is still 2% available for the 3rd. max = total * 49 / 100; // This is totally arbitrary. min = total / 100; available = total; // max_bases cannot be changed at runtime, so we set it first and // clamp the other limits accordingly. CONFIGURE_LIMIT( (ss->cfg_max_bases > 0? ss->cfg_max_bases : SQLX_MAX_BASES_PERCENT(total)), ss->max_bases); // max_passive > max_active permits answering to clients while // managing internal procedures (elections, replications...). CONFIGURE_LIMIT( (ss->cfg_max_passive > 0? ss->cfg_max_passive : SQLX_MAX_PASSIVE_PERCENT(total)), ss->max_passive); CONFIGURE_LIMIT( (ss->cfg_max_active > 0? ss->cfg_max_active : SQLX_MAX_ACTIVE_PERCENT(total)), ss->max_active); GRID_INFO("Limits set to ACTIVES[%u] PASSIVES[%u] BASES[%u] " "(%u/%u available file descriptors)", ss->max_active, ss->max_passive, ss->max_bases, ss->max_active + ss->max_passive + ss->max_bases, (guint)limit.rlim_cur); return TRUE; #undef CONFIGURE_LIMIT }
static void* thread_timeout() { time_t begining_time_stamp; time_t current_time_stamp; time(&begining_time_stamp); time(¤t_time_stamp); if (MAX_ACTION_TIMEOUT > difftime(current_time_stamp, begining_time_stamp)) while(!g_stop_thread && g_pending_command) { time(¤t_time_stamp); if (MAX_ACTION_TIMEOUT < difftime(current_time_stamp, begining_time_stamp)) break; sleep(0.1); } if (g_stop_thread) GRID_INFO("Stopped forced!"); else if (g_pending_command == TRUE) GRID_ERROR("No response from last sending command: timeout"); g_main_loop_quit(g_main_loop); return NULL; }
static void _task_expire_local (gpointer p UNUSED) { guint count = _lru_tree_expire (®_rwlock, srv_registered, ttl_expire_local_services); if (count) GRID_INFO("Expired %u local services", count); }
static void _zmq2agent_worker (struct _zmq2agent_ctx_s *ctx) { /* XXX(jfs): a dedicated PRNG avoids locking the glib's PRNG for each call (such global locks are present in the GLib) and opening it with a seed from the glib's PRNG avoids syscalls to the special file /dev/urandom */ GRand *r = g_rand_new_with_seed (g_random_int ()); gint64 last_debug = oio_ext_monotonic_time (); zmq_pollitem_t pi[2] = { {ctx->zpull, -1, ZMQ_POLLIN, 0}, {ctx->zagent, -1, ZMQ_POLLIN, 0}, }; for (gboolean run = TRUE; run ;) { int rc = zmq_poll (pi, 2, 1000); if (rc < 0) { int err = zmq_errno(); if (err != ETERM && err != EINTR) GRID_WARN("ZMQ poll error : (%d) %s", err, zmq_strerror(err)); if (err != EINTR) break; } if (pi[1].revents) _zmq2agent_receive_acks (ctx); _retry_events (ctx); if (pi[0].revents) run = _zmq2agent_receive_events (r, ctx); /* Periodically write stats in the log */ gint64 now = oio_ext_monotonic_time (); if ((now - last_debug) > 2 * G_TIME_SPAN_MINUTE) { GRID_INFO("ZMQ2AGENT recv=%"G_GINT64_FORMAT" sent=%"G_GINT64_FORMAT " ack=%"G_GINT64_FORMAT"+%"G_GINT64_FORMAT" queue=%u", ctx->q->counter_received, ctx->q->counter_sent, ctx->q->counter_ack, ctx->q->counter_ack_notfound, ctx->q->gauge_pending); last_debug = now; } } g_rand_free (r); GRID_INFO ("Thread stopping [NOTIFY-ZMQ2AGENT]"); }
static GError* _load_from_base(struct sqlx_sqlite3_s *sq3, GPtrArray **result) { GError *err = NULL; GPtrArray *array; sqlite3_stmt *stmt; int rc; guint count = 0; sqlite3_prepare_debug(rc, sq3->db, "SELECT prefix,addr,ROWID FROM meta1", -1, &stmt, NULL); if (rc != SQLITE_OK && rc != SQLITE_DONE) return SQLITE_GERROR(sq3->db, rc); array = meta0_utils_array_create(); for (;;) { rc = sqlite3_step(stmt); if (rc == SQLITE_ROW) { gint64 rowid; const guint8 *prefix, *url; gsize prefix_len; prefix_len = sqlite3_column_bytes(stmt, 0); prefix = sqlite3_column_blob(stmt, 0); url = sqlite3_column_text(stmt, 1); rowid = sqlite3_column_int64(stmt, 2); if (prefix_len != 2) GRID_WARN("Invalid prefix for URL [%s] ROWID %"G_GINT64_FORMAT, url, rowid); else { meta0_utils_check_url_from_base((gchar**)&url); meta0_utils_array_add(array, prefix, (gchar*)url); count ++; } } else if (rc == SQLITE_DONE || rc == SQLITE_OK) break; else if (rc == SQLITE_BUSY) sleep(1); else { err = SQLITE_GERROR(sq3->db, rc); break; } } sqlite3_finalize_debug(rc, stmt); if (!err) { *result = array; GRID_INFO("Reloaded %u prefixes in %p (%u)", count, array, array->len); } return err; }
static void m2b_destroy(struct sqlx_sqlite3_s *sq3) { if (sq3) { GRID_INFO("Closing and destroying [%s][%s]", sq3->name.base, sq3->name.type); sq3->deleted = TRUE; sqlx_repository_unlock_and_close_noerror(sq3); } }
static void _q_set_max_pending (struct oio_events_queue_s *self, guint v) { struct _queue_BEANSTALKD_s *q = (struct _queue_BEANSTALKD_s *)self; EXTRA_ASSERT (q != NULL && q->vtable == &vtable_BEANSTALKD); if (q->max_events_in_queue != v) { GRID_INFO("max events in queue set to [%u]", v); q->max_events_in_queue = v; } }
static void _q_set_buffering(struct oio_events_queue_s *self, gint64 v) { struct _queue_BEANSTALKD_s *q = (struct _queue_BEANSTALKD_s *)self; if (q->buffer.delay != v) { GRID_INFO("events buffering delay set to %"G_GINT64_FORMAT"s", v / G_TIME_SPAN_SECOND); oio_events_queue_buffer_set_delay(&(q->buffer), v); } }
// TODO: factorize with the similar function in sqliterepo/synchro.h static void zk_main_watch(zhandle_t *zh, int type, int state, const char *path, void *watcherCtx) { metautils_ignore_signals(); GRID_DEBUG("%s(%p,%d,%d,%s,%p)", __FUNCTION__, zh, type, state, path, watcherCtx); struct zk_manager_s *manager = watcherCtx; const char *zkurl = manager->zk_url; if (type != ZOO_SESSION_EVENT) return; if (state == ZOO_EXPIRED_SESSION_STATE) { GRID_WARN("Zookeeper: (re)connecting to [%s]", zkurl); if (manager->zh) zookeeper_close(manager->zh); /* XXX(jfs): forget the previous ID and reconnect */ manager->zh = zookeeper_init(manager->zk_url, zk_main_watch, SQLX_SYNC_DEFAULT_ZK_TIMEOUT, NULL, manager, 0); if (!manager->zh) { GRID_ERROR("ZooKeeper init failure: (%d) %s", errno, strerror(errno)); abort(); } } else if (state == ZOO_AUTH_FAILED_STATE) { GRID_WARN("Zookeeper: auth problem to [%s]", zkurl); } else if (state == ZOO_ASSOCIATING_STATE) { GRID_DEBUG("Zookeeper: associating to [%s]", zkurl); } else if (state == ZOO_CONNECTED_STATE) { GRID_INFO("Zookeeper: connected to [%s]", zkurl); } else { GRID_INFO("Zookeeper: unmanaged event [%s]", zkurl); } }
struct network_server_s * network_server_init(void) { int wakeup[2]; guint count, maxfd; struct network_server_s *result; wakeup[0] = wakeup[1] = -1; if (0 > pipe(wakeup)) { GRID_ERROR("PIPE creation failure : (%d) %s", errno, strerror(errno)); return NULL; } shutdown(wakeup[0], SHUT_WR); shutdown(wakeup[1], SHUT_RD); fcntl(wakeup[0], F_SETFL, O_NONBLOCK|fcntl(wakeup[0], F_GETFL)); count = _server_count_procs(); maxfd = _server_get_maxfd(); result = g_malloc0(sizeof(struct network_server_s)); result->flag_continue = ~0; result->stats = grid_stats_holder_init(); clock_gettime(CLOCK_MONOTONIC_COARSE, &result->now); result->queue_events = g_async_queue_new(); result->queue_monitor = g_async_queue_new(); result->endpointv = g_malloc0(sizeof(struct endpoint_s*)); g_mutex_init(&result->lock_threads); result->workers_max_idle_delay = SERVER_DEFAULT_MAX_IDLEDELAY; result->workers_minimum = count; result->workers_minimum_spare = count; result->workers_maximum = SERVER_DEFAULT_MAX_WORKERS; result->cnx_max_sys = maxfd; result->cnx_max = (result->cnx_max_sys * 99) / 100; result->cnx_backlog = 50; result->wakeup[0] = wakeup[0]; result->wakeup[1] = wakeup[1]; result->epollfd = epoll_create(4096); // XXX JFS : #slots as a power of 2 ... for efficient modulos result->workers_active_1 = grid_single_rrd_create(result->now.tv_sec, 32); result->workers_active_60 = grid_single_rrd_create(result->now.tv_sec/60, 64); result->atexit_max_open_never_input = 3; result->atexit_max_idle = 2; result->atexit_max_open_persist = 10; GRID_INFO("SERVER ready with epollfd[%d] pipe[%d,%d]", result->epollfd, result->wakeup[0], result->wakeup[1]); return result; }
void network_server_set_max_workers(struct network_server_s *srv, guint max) { EXTRA_ASSERT(srv != NULL); guint emax = CLAMP(max, 1, G_MAXUINT32); if (emax != max) GRID_WARN("Max workers [%u] clamped to [%u]", max, emax); if (srv->workers_maximum != emax) { GRID_INFO("Max workers [%u] changed to [%u]", srv->workers_maximum, emax); srv->workers_maximum = emax; } }
void network_server_set_maxcnx(struct network_server_s *srv, guint max) { EXTRA_ASSERT(srv != NULL); guint emax = CLAMP(max, 2, srv->cnx_max_sys); if (emax != max) GRID_WARN("MAXCNX [%u] clamped to [%u]", max, emax); if (srv->cnx_max != emax) { GRID_INFO("MAXCNX [%u] changed to [%u]", srv->cnx_max, emax); srv->cnx_max = emax; } }
void network_server_set_cnx_backlog(struct network_server_s *srv, guint cnx_bl) { EXTRA_ASSERT(srv != NULL); guint max_bl = MAX(srv->cnx_max - srv->workers_maximum, 0); guint bl = MIN(cnx_bl, max_bl); if (bl != cnx_bl) GRID_WARN("CNX BACKLOG clamped to [%u]", bl); if (bl != srv->cnx_backlog) { GRID_INFO("CNX BACKLOG changed to [%u]", bl); srv->cnx_backlog = bl; } }
gs_error_t * hc_create_container(gs_grid_storage_t *hc, struct hc_url_s *url, const char *stgpol, const char *versioning) { GError *err = NULL; gs_error_t *e = NULL; gs_container_t *c = NULL; struct m2v2_create_params_s params = {stgpol, versioning, FALSE}; c = gs_get_storage_container2(hc, hc_url_get(url, HCURL_REFERENCE), ¶ms, 0, &e); if (c != NULL) { gchar m2[STRLEN_ADDRINFO] = {0}; addr_info_to_string(&(c->meta2_addr), m2, STRLEN_ADDRINFO); err = m2v2_remote_execute_HAS(m2, NULL, url); if (err == NULL) { e = gs_error_new(CODE_CONTAINER_EXISTS, "Failed to create container [%s]: " "container already exists in namespace [%s]\n", hc_url_get(url, HCURL_REFERENCE), hc_url_get(url, HCURL_NS)); goto end_label; } else if (err->code != CODE_CONTAINER_NOTFOUND) { GSERRORCAUSE(&e, err, "Failed to check container existence in meta2: "); goto end_label; } else { GRID_WARN("Container exists in meta1 but not in meta2"); } } gs_error_free(e); e = NULL; c = gs_get_storage_container2(hc, hc_url_get(url, HCURL_REFERENCE), ¶ms, 1, &e); if (c) GRID_INFO("Container [%s] created in namespace [%s].\n\n", hc_url_get(url, HCURL_REFERENCE), hc_url_get(url, HCURL_NS)); end_label: gs_container_free(c); g_clear_error(&err); return e; }
static void _defer(struct gridd_client_pool_s *pool, struct event_client_s *ev) { EXTRA_ASSERT(pool != NULL); EXTRA_ASSERT(pool->vtable == &VTABLE); EXTRA_ASSERT(ev != NULL); EXTRA_ASSERT(pool->pending_clients != NULL); EXTRA_ASSERT(pool->fd_out >= 0); if (pool->closed) { GRID_INFO("Request dropped"); event_client_free(ev); } else { guint8 c = 0; g_async_queue_push(pool->pending_clients, ev); (void) metautils_syscall_write(pool->fd_out, &c, 1); } }
static gpointer _gq2zmq_worker (struct _gq2zmq_ctx_s *ctx) { while (ctx->running (_gq2zmq_has_pending (ctx))) { gchar *tmp = (gchar*) g_async_queue_timeout_pop (ctx->queue, G_TIME_SPAN_SECOND); if (tmp && !_forward_event (ctx->zpush, tmp)) break; } for (;;) { /* manage what remains in the GQueue */ gchar *tmp = g_async_queue_try_pop (ctx->queue); if (!tmp || !_forward_event (ctx->zpush, tmp)) break; } zmq_send (ctx->zpush, "EOF", 0, 0); GRID_INFO ("Thread stopping [NOTIFY-GQ2ZMQ]"); return ctx; }