static gboolean _configure_limits(struct sqlx_service_s *ss) { guint newval = 0, max = 0, total = 0, available = 0, min = 0; struct rlimit limit = {0, 0}; #define CONFIGURE_LIMIT(cfg,real) do { \ max = MIN(max, available); \ newval = (cfg > 0 && cfg < max) ? cfg : max; \ newval = newval > min? newval : min; \ real = newval; \ available -= real; \ } while (0) if (0 != getrlimit(RLIMIT_NOFILE, &limit)) { GRID_ERROR("Max file descriptor unknown: getrlimit error " "(errno=%d) %s", errno, strerror(errno)); return FALSE; } if (limit.rlim_cur < 64) { GRID_ERROR("Not enough file descriptors allowed [%lu], " "minimum 64 required", (unsigned long) limit.rlim_cur); return FALSE; } // We keep 20 FDs for unexpected cases (sqlite sometimes uses // temporary files, event when we ask for memory journals). total = (limit.rlim_cur - 20); // If user sets outstanding values for the first 2 parameters, // there is still 2% available for the 3rd. max = total * 49 / 100; // This is totally arbitrary. min = total / 100; available = total; // max_bases cannot be changed at runtime, so we set it first and // clamp the other limits accordingly. CONFIGURE_LIMIT( (ss->cfg_max_bases > 0? ss->cfg_max_bases : SQLX_MAX_BASES_PERCENT(total)), ss->max_bases); // max_passive > max_active permits answering to clients while // managing internal procedures (elections, replications...). CONFIGURE_LIMIT( (ss->cfg_max_passive > 0? ss->cfg_max_passive : SQLX_MAX_PASSIVE_PERCENT(total)), ss->max_passive); CONFIGURE_LIMIT( (ss->cfg_max_active > 0? ss->cfg_max_active : SQLX_MAX_ACTIVE_PERCENT(total)), ss->max_active); GRID_INFO("Limits set to ACTIVES[%u] PASSIVES[%u] BASES[%u] " "(%u/%u available file descriptors)", ss->max_active, ss->max_passive, ss->max_bases, ss->max_active + ss->max_passive + ss->max_bases, (guint)limit.rlim_cur); return TRUE; #undef CONFIGURE_LIMIT }
static gboolean _configure_limits(struct sqlx_service_s *ss) { #define CONFIGURE_LIMIT(cfg,real) do { \ real = (cfg > 0 && cfg < real) ? cfg : (limit.rlim_cur - 20) / 3; \ } while (0) struct rlimit limit = {0,0}; if (0 != getrlimit(RLIMIT_NOFILE, &limit)) { GRID_ERROR("Max file descriptor unknown : getrlimit error " "(errno=%d) %s", errno, strerror(errno)); return FALSE; } if (limit.rlim_cur < 64) { GRID_ERROR("Not enough file descriptors allowed [%lu], " "minimum 64 required", (unsigned long) limit.rlim_cur); return FALSE; } GRID_INFO("Limits set to ACTIVES[%u] PASSIVES[%u] BASES[%u]", SRV.max_active, SRV.max_passive, SRV.max_bases); CONFIGURE_LIMIT(ss->cfg_max_passive, ss->max_passive); CONFIGURE_LIMIT(ss->cfg_max_active, ss->max_active); CONFIGURE_LIMIT(ss->cfg_max_bases, ss->max_bases); return TRUE; #undef CONFIGURE_LIMIT }
void meta0_backend_migrate(struct meta0_backend_s *m0) { GError *err = NULL; struct sqlx_sqlite3_s *handle=NULL; struct sqlx_sqlite3_s *oldhandle=NULL; struct sqlx_name_s n; n.base = m0->ns; n.type = NAME_SRVTYPE_META0; n.ns = m0->ns; err = sqlx_repository_open_and_lock(m0->repository, &n, SQLX_OPEN_LOCAL, &handle, NULL); if ( err ) { // check if the erreur message is the ENOENT message ; DB doesn't exist if ( (strstr(err->message, strerror(ENOENT)) != NULL) ) { g_clear_error(&err); err = NULL; err = sqlx_repository_open_and_lock(m0->repository, &n, SQLX_OPEN_LOCAL|SQLX_OPEN_CREATE,&handle, NULL); if( !err ) { // Migration (1.7 -> 1.8) from old meta0 database n.base = m0->id; err = sqlx_repository_open_and_lock(m0->repository, &n, SQLX_OPEN_LOCAL, &oldhandle,NULL); if ( ! err ) { GRID_INFO("Start Migrate meta0 database"); err = sqlx_repository_backup_base(oldhandle,handle); if ( err ) { GRID_ERROR("Failed to migrate meta0 database : (%d) %s",err->code,err->message); } else { sqlx_repository_unlock_and_close_noerror(oldhandle); // APPLY schema gint rc; char *errmsg = NULL; rc = sqlite3_exec(handle->db, META0_SCHEMA, NULL, NULL, &errmsg); if (rc != SQLITE_OK && rc != SQLITE_DONE) { GRID_WARN("Failed to apply schema (%d) %s %s",rc,sqlite3_errmsg(handle->db), errmsg); } if(errmsg != NULL) g_free(errmsg); // set table version GRID_INFO("Update version in database"); sqlx_admin_inc_all_versions(handle, 2); } } else { // database 1.7 doesn't existe ; new grid g_clear_error(&err); err = NULL; } } else { GRID_ERROR("Failed to create meta0 database :(%d) %s",err->code,err->message); } } } if ( handle) sqlx_repository_unlock_and_close_noerror(handle); }
static gboolean _configure_with_arguments(struct sqlx_service_s *ss, int argc, char **argv) { // Sanity checks if (ss->sync_mode_solo > SQLX_SYNC_FULL) { GRID_WARN("Invalid SYNC mode for not-replicated bases"); return FALSE; } if (ss->sync_mode_repli > SQLX_SYNC_FULL) { GRID_WARN("Invalid SYNC mode for replicated bases"); return FALSE; } if (!ss->url) { GRID_WARN("No URL!"); return FALSE; } if (!ss->announce) { ss->announce = g_string_new(ss->url->str); GRID_DEBUG("No announce set, using endpoint [%s]", ss->announce->str); } if (!metautils_url_valid_for_bind(ss->url->str)) { GRID_ERROR("Invalid URL as a endpoint [%s]", ss->url->str); return FALSE; } if (!metautils_url_valid_for_connect(ss->announce->str)) { GRID_ERROR("Invalid URL to be announced [%s]", ss->announce->str); return FALSE; } if (argc < 2) { GRID_ERROR("Not enough options, see usage."); return FALSE; } // Positional arguments gsize s = g_strlcpy(ss->ns_name, argv[0], sizeof(ss->ns_name)); if (s >= sizeof(ss->ns_name)) { GRID_WARN("Namespace name too long (given=%"G_GSIZE_FORMAT" max=%u)", s, (unsigned int)sizeof(ss->ns_name)); return FALSE; } GRID_DEBUG("NS configured to [%s]", ss->ns_name); ss->lb_world = oio_lb_local__create_world(); ss->lb = oio_lb__create(); s = g_strlcpy(ss->volume, argv[1], sizeof(ss->volume)); if (s >= sizeof(ss->volume)) { GRID_WARN("Volume name too long (given=%"G_GSIZE_FORMAT" max=%u)", s, (unsigned int) sizeof(ss->volume)); return FALSE; } GRID_DEBUG("Volume configured to [%s]", ss->volume); ss->zk_url = gridcluster_get_zookeeper(ss->ns_name); return TRUE; }
static TCrawlerReq* tl_send_command(TCrawlerBus* conn, GError** error, gchar* crawlerName, gchar* cmd, gchar* alldata) { GError* err = *error; TCrawlerReq* req = NULL; // init request err = tl_init_command(conn, &req, crawlerName); if (err) { GRID_ERROR("Failed to init request, command=%s: %s", cmd, err->message); if (req) crawler_bus_req_clear(&req); g_clear_error(&err); return NULL; } err = tlc_Send_CmdProcEx(req, -1 /*MAX_ACTION_TIMEOUT*1000*/, NULL, NULL, cmd, g_service_name, alldata ); if (err) { crawler_bus_req_clear(&req); return NULL; } return req; }
static void* thread_timeout() { time_t begining_time_stamp; time_t current_time_stamp; time(&begining_time_stamp); time(¤t_time_stamp); if (MAX_ACTION_TIMEOUT > difftime(current_time_stamp, begining_time_stamp)) while(!g_stop_thread && g_pending_command) { time(¤t_time_stamp); if (MAX_ACTION_TIMEOUT < difftime(current_time_stamp, begining_time_stamp)) break; sleep(0.1); } if (g_stop_thread) GRID_INFO("Stopped forced!"); else if (g_pending_command == TRUE) GRID_ERROR("No response from last sending command: timeout"); g_main_loop_quit(g_main_loop); return NULL; }
GError * peer_dump(const gchar *target, struct sqlx_name_s *name, gboolean chunked, peer_dump_cb callback, gpointer cb_arg) { struct gridd_client_s *client; GByteArray *encoded; GError *err = NULL; gboolean on_reply(gpointer ctx, MESSAGE reply) { GError *err2 = NULL; gsize bsize = 0; gint64 remaining = -1; (void) ctx; err2 = metautils_message_extract_strint64(reply, "remaining", &remaining); g_clear_error(&err2); void *b = metautils_message_get_BODY(reply, &bsize); if (b && bsize) { GByteArray *dump = g_byte_array_new(); g_byte_array_append(dump, b, bsize); err2 = callback(dump, remaining, cb_arg); } if (err2 != NULL) { GRID_ERROR("Failed to use result of dump: (%d) %s", err2->code, err2->message); g_clear_error(&err2); return FALSE; } return TRUE; }
static GError* _initContext(struct meta0_backend_s *m0) { GError * error; if ( !context ) { context = g_malloc0(sizeof(struct meta0_assign_context_s)); } else { _resetContext(); } error = meta0_backend_get_all(m0,&(context->array_meta1_by_prefix)); if ( error ) { GRID_ERROR("failed to duplicate current prefix distribution :(%d) %s", error->code, error->message); return error; } GPtrArray *meta1_ref; error = meta0_backend_get_all_meta1_ref(m0,&meta1_ref); if ( error ) { meta0_utils_array_meta1ref_clean(meta1_ref); GRID_ERROR("failed to duplicate current Meta1 reference :(%d) %s", error->code, error->message); return error; } context->map_meta1_ref = _meta1ref_array_to_map(meta1_ref); meta0_utils_array_meta1ref_clean(meta1_ref); context->working_map_meta1_ref=g_hash_table_new_full(g_str_hash, g_str_equal,g_free,_gfree_map_meta0_assign_meta1 ); context->treat_prefixes = g_malloc0(8192); context->replica=0; context->avgscore=0; if ( context->array_meta1_by_prefix->len > 0) { gchar **v =context->array_meta1_by_prefix->pdata[0]; if ( v != NULL ) { for (; *v ;v++) context->replica++; if ( context->replica > 65536) { return NEWERROR(EINVAL, "Invalid nb replica [%d]",context->replica); } } GRID_DEBUG("replica %d",context->replica); } return NULL; }
struct gridd_client_pool_s * gridd_client_pool_create(void) { int fdmon, fd[2]; struct gridd_client_pool_s *pool; if (0 != pipe(fd)) { GRID_WARN("pipe() error: (%d) %s", errno, strerror(errno)); metautils_pclose(&(fd[0])); metautils_pclose(&(fd[1])); return NULL; } if (0 > (fdmon = epoll_create(64))) { GRID_WARN("epoll_create error: (%d) %s", errno, strerror(errno)); metautils_pclose(&(fd[0])); metautils_pclose(&(fd[1])); return NULL; } // TODO FIXME factorize this in metautils struct rlimit limit; memset(&limit, 0, sizeof(limit)); if (0 != getrlimit(RLIMIT_NOFILE, &limit)) limit.rlim_cur = limit.rlim_max = 32768; pool = g_malloc0(sizeof(*pool)); pool->pending_clients = g_async_queue_new(); pool->fdmon = fdmon; pool->active_max = limit.rlim_cur; pool->active_clients_size = limit.rlim_cur; pool->active_clients = g_malloc0(pool->active_clients_size * sizeof(struct event_client_s*)); pool->fd_in = fd[0]; fd[0] = -1; metautils_syscall_shutdown(pool->fd_in, SHUT_WR); sock_set_non_blocking(pool->fd_in, TRUE); pool->fd_out = fd[1]; fd[1] = -1; metautils_syscall_shutdown(pool->fd_out, SHUT_RD); sock_set_non_blocking(pool->fd_out, TRUE); /* then monitors at least the notifications pipe's output */ struct epoll_event ev; memset(&ev, 0, sizeof(ev)); ev.events = EPOLLIN; ev.data.fd = pool->fd_in; if (0 > epoll_ctl(pool->fdmon, EPOLL_CTL_ADD, pool->fd_in, &ev)) { GRID_ERROR("epoll error: (%d) %s", errno, strerror(errno)); gridd_client_pool_destroy(pool); return NULL; } pool->vtable = &VTABLE; return pool; }
GError* meta0_assign_prefix_to_meta1(struct meta0_backend_s *m0, gchar *ns_name, gboolean nocheck) { // GET meta1 list from conscience GList *working_m1list = NULL; GSList *unref_m1list = NULL; GError *error; GPtrArray *new_meta1ref = NULL; GRID_INFO("START Assign prefix"); error = _initContext(m0); if (error) { goto errorLabel; } // build working list , list sorted by score error = _init_assign(ns_name,&working_m1list,&unref_m1list); if ( error ) { goto errorLabel; } if ( nocheck ) { error =_check(working_m1list); if ( error ) { goto errorLabel; } } error = _assign(working_m1list,unref_m1list); if ( error ) { goto errorLabel; } new_meta1ref = _updated_meta1ref(); error = meta0_backend_assign(m0, context->array_meta1_by_prefix, new_meta1ref,FALSE); if ( error ) { GRID_ERROR("failed to update BDD :(%d) %s", error->code, error->message); goto errorLabel; } context->lastAssignTime=g_date_time_new_now_local(); errorLabel : _resetContext(); if (new_meta1ref) { meta0_utils_array_meta1ref_clean(new_meta1ref); } if (working_m1list) { g_list_free(working_m1list); working_m1list=NULL; } if (unref_m1list) { g_slist_free(unref_m1list); unref_m1list=NULL; } GRID_INFO("END ASSIGN"); return error; }
static void _action_report_error(GError *err, const gchar *msg) { GRID_ERROR("%s : (%d) %s", msg, !err?0:err->code, !err?"":err->message); if (err) g_clear_error(&err); grid_main_stop(); return; }
static gboolean _configure_backend(struct sqlx_service_s *ss) { struct sqlx_repo_config_s repository_config = {0}; repository_config.flags = 0; repository_config.flags |= ss->flag_delete_on ? SQLX_REPO_DELETEON : 0; repository_config.flags |= ss->flag_cached_bases ? 0 : SQLX_REPO_NOCACHE; repository_config.flags |= ss->flag_autocreate ? SQLX_REPO_AUTOCREATE : 0; repository_config.sync_solo = ss->sync_mode_solo; repository_config.sync_repli = ss->sync_mode_repli; repository_config.page_size = SQLX_DEFAULT_PAGE_SIZE; if (ss->cfg_page_size >= 512) repository_config.page_size = ss->cfg_page_size; GError *err = sqlx_repository_init(ss->volume, &repository_config, &ss->repository); if (err) { GRID_ERROR("SQLX repository init failure : (%d) %s", err->code, err->message); g_clear_error(&err); return FALSE; } err = sqlx_repository_configure_type(ss->repository, ss->service_config->srvtype, ss->service_config->schema); if (err) { GRID_ERROR("SQLX schema init failure : (%d) %s", err->code, err->message); g_clear_error(&err); return FALSE; } sqlx_repository_configure_open_timeout (ss->repository, ss->open_timeout * G_TIME_SPAN_MILLISECOND); sqlx_repository_configure_hash (ss->repository, ss->service_config->repo_hash_width, ss->service_config->repo_hash_depth); GRID_TRACE("SQLX repository initiated"); return TRUE; }
struct network_server_s * network_server_init(void) { int wakeup[2]; guint count, maxfd; struct network_server_s *result; wakeup[0] = wakeup[1] = -1; if (0 > pipe(wakeup)) { GRID_ERROR("PIPE creation failure : (%d) %s", errno, strerror(errno)); return NULL; } shutdown(wakeup[0], SHUT_WR); shutdown(wakeup[1], SHUT_RD); fcntl(wakeup[0], F_SETFL, O_NONBLOCK|fcntl(wakeup[0], F_GETFL)); count = _server_count_procs(); maxfd = _server_get_maxfd(); result = g_malloc0(sizeof(struct network_server_s)); result->flag_continue = ~0; result->stats = grid_stats_holder_init(); clock_gettime(CLOCK_MONOTONIC_COARSE, &result->now); result->queue_events = g_async_queue_new(); result->queue_monitor = g_async_queue_new(); result->endpointv = g_malloc0(sizeof(struct endpoint_s*)); g_mutex_init(&result->lock_threads); result->workers_max_idle_delay = SERVER_DEFAULT_MAX_IDLEDELAY; result->workers_minimum = count; result->workers_minimum_spare = count; result->workers_maximum = SERVER_DEFAULT_MAX_WORKERS; result->cnx_max_sys = maxfd; result->cnx_max = (result->cnx_max_sys * 99) / 100; result->cnx_backlog = 50; result->wakeup[0] = wakeup[0]; result->wakeup[1] = wakeup[1]; result->epollfd = epoll_create(4096); // XXX JFS : #slots as a power of 2 ... for efficient modulos result->workers_active_1 = grid_single_rrd_create(result->now.tv_sec, 32); result->workers_active_60 = grid_single_rrd_create(result->now.tv_sec/60, 64); result->atexit_max_open_never_input = 3; result->atexit_max_idle = 2; result->atexit_max_open_persist = 10; GRID_INFO("SERVER ready with epollfd[%d] pipe[%d,%d]", result->epollfd, result->wakeup[0], result->wakeup[1]); return result; }
static void cli_action(void) { /* Use the client to get a sqlx service */ GRID_DEBUG("Locating [%s] CID[%s]", oio_url_get(url, OIOURL_WHOLE), oio_url_get(url, OIOURL_HEXID)); gchar **srvurlv = NULL; GError *err = hc_resolve_reference_service(resolver, url, type, &srvurlv); if (err != NULL) { GRID_ERROR("Services resolution error: (%d) %s", err->code, err->message); grid_main_set_status(1); return; } if (!srvurlv || !*srvurlv) { GRID_ERROR("Services resolution error: (%d) %s", 0, "No service found"); grid_main_set_status(1); return; } for (gchar **s=srvurlv; *s ;s++) GRID_DEBUG("Located [%s]", *s); gint rc = 0; for (gchar **s=srvurlv; !rc && *s ;s++) { struct meta1_service_url_s *surl; if (!(surl = meta1_unpack_url(*s))) g_printerr("Invalid service URL from meta1 [%s]\n", *s); else { if (!g_ascii_strcasecmp("destroy", query[0])) { rc = do_destroy2(surl); } else { rc = do_queryv(surl); } g_free(surl); } } g_strfreev(srvurlv); }
static void* thread_command() { GError* error = NULL; char* cn = NULL; /* DBus connexion */ error = tlc_init_connection(&g_tl_conn, g_service_name, SERVICE_PATH, "" /*pour le bus system: =""*/, (TCrawlerBusObjectInfo*) &dbus_glib_crawlerCmd_object_info); if (error) { GRID_ERROR("System D-Bus connection failed: %s", /*g_cfg_action_name, g_service_pid,*/ error->message); exit(EXIT_FAILURE); } if (g_crawler_name) cn = g_string_free(g_crawler_name, FALSE); g_crawler_name=NULL; if (cn == NULL) { if (!g_strcmp0(CTRL_LIST, g_ctrl_command)) { //------------------------- // command will received a list of char* char** listname = NULL; listname = tl_send_command_strv(g_tl_conn, cn, g_ctrl_command, ""); if (listname) { // dump all list to stdout char** ptr = listname; for( ;*ptr;ptr++) { if (ptr == NULL) break; if (*ptr == NULL) continue; g_printf (" %s\n", *ptr); } g_strfreev (listname); } } g_pending_command = FALSE; sleep(1); g_main_loop_quit(g_main_loop); } else { g_req = tl_send_command(g_tl_conn, &error, cn, g_ctrl_command, ""); if (error) g_clear_error(&error); } return NULL; }
static gpointer _worker_clients(gpointer p) { metautils_ignore_signals(); while (grid_main_is_running()) { GError *err = gridd_client_pool_round(PSRV(p)->clients_pool, 1); if (err != NULL) { GRID_ERROR("Clients error : (%d) %s", err->code, err->message); g_clear_error(&err); grid_main_stop(); } } return p; }
static int tcpip_open(const gchar *h, const gchar *p) { GRID_DEBUG("opening tcp ip connection"); struct evutil_addrinfo ai_hint, *ai_res = NULL; int rc; socklen_t ss_len; struct sockaddr_storage ss; int fd; fd = socket(PF_INET, SOCK_STREAM, 0); if (fd < 0) { GRID_ERROR("accept error on fd=%d : %s", fd, strerror(errno)); return -1; } sock_set_linger_default(fd); sock_set_reuseaddr(fd, TRUE); bzero(&ai_hint, sizeof(ai_hint)); ai_hint.ai_flags = AI_NUMERICHOST; ai_hint.ai_family = PF_INET; ai_hint.ai_socktype = SOCK_STREAM; rc = evutil_getaddrinfo(h, p, &ai_hint, &ai_res); if (rc != 0) { errno = rc; return -1; } bzero(&ss, sizeof(ss)); ss_len = ai_res->ai_addrlen; g_memmove(&ss, (ai_res->ai_addr), ss_len); evutil_freeaddrinfo(ai_res); switch (connect(fd, (struct sockaddr *) &ss, ss_len)) { case 0: return fd; case -1: if (errno==EALREADY || errno==EAGAIN || errno==EINPROGRESS || errno==EWOULDBLOCK) { errno = 0; return fd; } return -1; default:/* unexpected */ return -1; } }
gboolean stg_pol_is_rainx(namespace_info_t *ni, const gchar *stgpol) { struct storage_policy_s *sp = storage_policy_init(ni, stgpol); const struct data_security_s *datasec = storage_policy_get_data_security(sp); gboolean ret; if (!datasec) { GRID_ERROR("Cannot find datasecurity values for policy [%s]", stgpol); ret = FALSE; } else { ret = data_security_get_type(datasec) == RAIN; } storage_policy_clean(sp); return ret; }
void sqlx_cache_clean(sqlx_cache_t *cache) { GRID_DEBUG("%s(%p) *** CLEANUP ***", __FUNCTION__, (void*)cache); if (!cache) return; if (cache->bases) { for (guint bd=0; bd < cache->bases_count ;bd++) { sqlx_base_t *base = cache->bases + bd; switch (base->status) { case SQLX_BASE_FREE: EXTRA_ASSERT(base->name == NULL); break; case SQLX_BASE_IDLE: case SQLX_BASE_IDLE_HOT: case SQLX_BASE_USED: sqlx_base_debug(__FUNCTION__, base); break; case SQLX_BASE_CLOSING: GRID_ERROR("Base being closed while the cache is being cleaned"); break; } g_free0 (base->name); base->name = NULL; } g_free(cache->bases); } g_mutex_clear(&cache->lock); if (cache->cond_array) { for (guint i=0; i<cache->cond_count ;i++) g_cond_clear(cache->cond_array + i); g_free(cache->cond_array); } if (cache->bases_by_name) g_tree_destroy(cache->bases_by_name); g_free(cache); }
// TODO: factorize with the similar function in sqliterepo/synchro.h static void zk_main_watch(zhandle_t *zh, int type, int state, const char *path, void *watcherCtx) { metautils_ignore_signals(); GRID_DEBUG("%s(%p,%d,%d,%s,%p)", __FUNCTION__, zh, type, state, path, watcherCtx); struct zk_manager_s *manager = watcherCtx; const char *zkurl = manager->zk_url; if (type != ZOO_SESSION_EVENT) return; if (state == ZOO_EXPIRED_SESSION_STATE) { GRID_WARN("Zookeeper: (re)connecting to [%s]", zkurl); if (manager->zh) zookeeper_close(manager->zh); /* XXX(jfs): forget the previous ID and reconnect */ manager->zh = zookeeper_init(manager->zk_url, zk_main_watch, SQLX_SYNC_DEFAULT_ZK_TIMEOUT, NULL, manager, 0); if (!manager->zh) { GRID_ERROR("ZooKeeper init failure: (%d) %s", errno, strerror(errno)); abort(); } } else if (state == ZOO_AUTH_FAILED_STATE) { GRID_WARN("Zookeeper: auth problem to [%s]", zkurl); } else if (state == ZOO_ASSOCIATING_STATE) { GRID_DEBUG("Zookeeper: associating to [%s]", zkurl); } else if (state == ZOO_CONNECTED_STATE) { GRID_INFO("Zookeeper: connected to [%s]", zkurl); } else { GRID_INFO("Zookeeper: unmanaged event [%s]", zkurl); } }
static struct storage_policy_s * _init_storage_policy(const char *ns, const char *polname) { namespace_info_t *ni = NULL; GError *e = NULL; struct storage_policy_s *sp =NULL; ni = get_namespace_info(ns, &e); if(NULL != e) { GRID_ERROR("Failed to get namespace info : %s", e->message); g_clear_error(&e); return NULL; } sp = storage_policy_init(ni, polname); namespace_info_clear(ni); g_free(ni); return sp; }
GError * sqlx_cache_open_and_lock_base(sqlx_cache_t *cache, const hashstr_t *hname, gint *result) { gint bd; GError *err = NULL; sqlx_base_t *base = NULL; EXTRA_ASSERT(cache != NULL); EXTRA_ASSERT(hname != NULL); EXTRA_ASSERT(result != NULL); gint64 start = oio_ext_monotonic_time(); gint64 deadline = DEFAULT_CACHE_OPEN_TIMEOUT; if (cache->open_timeout > 0) deadline = cache->open_timeout; GRID_TRACE2("%s(%p,%s,%p) delay = %"G_GINT64_FORMAT, __FUNCTION__, (void*)cache, hname ? hashstr_str(hname) : "NULL", (void*)result, deadline); deadline += start; g_mutex_lock(&cache->lock); cache->used = TRUE; retry: bd = sqlx_lookup_id(cache, hname); if (bd < 0) { if (!(err = sqlx_base_reserve(cache, hname, &base))) { bd = base->index; *result = base->index; sqlx_base_debug("OPEN", base); } else { GRID_DEBUG("No base available for [%s] (%d %s)", hashstr_str(hname), err->code, err->message); if (sqlx_expire_first_idle_base(cache, 0) >= 0) { g_clear_error(&err); goto retry; } } } else { base = GET(cache, bd); gint64 now = oio_ext_monotonic_time (); if (now > deadline) { err = NEWERROR (CODE_UNAVAILABLE, "DB busy (after %"G_GINT64_FORMAT" ms)", (now - start) / G_TIME_SPAN_MILLISECOND); } else switch (base->status) { case SQLX_BASE_FREE: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); GRID_ERROR("free base referenced"); g_assert_not_reached(); break; case SQLX_BASE_IDLE: case SQLX_BASE_IDLE_HOT: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); sqlx_base_move_to_list(cache, base, SQLX_BASE_USED); base->count_open ++; base->owner = g_thread_self(); *result = base->index; break; case SQLX_BASE_USED: EXTRA_ASSERT(base->count_open > 0); EXTRA_ASSERT(base->owner != NULL); if (base->owner != g_thread_self()) { GRID_DEBUG("Base [%s] in use by another thread (%X), waiting...", hashstr_str(hname), oio_log_thread_id(base->owner)); /* The lock is held by another thread/request. XXX(jfs): do not use 'now' because it can be a fake clock */ g_cond_wait_until(base->cond, &cache->lock, g_get_monotonic_time() + oio_cache_period_cond_wait); goto retry; } base->owner = g_thread_self(); base->count_open ++; *result = base->index; break; case SQLX_BASE_CLOSING: EXTRA_ASSERT(base->owner != NULL); /* Just wait for a notification then retry XXX(jfs): do not use 'now' because it can be a fake clock */ g_cond_wait_until(base->cond, &cache->lock, g_get_monotonic_time() + oio_cache_period_cond_wait); goto retry; } } if (base) { if (!err) { sqlx_base_debug(__FUNCTION__, base); EXTRA_ASSERT(base->owner == g_thread_self()); EXTRA_ASSERT(base->count_open > 0); } g_cond_signal(base->cond); } g_mutex_unlock(&cache->lock); return err; }
static void main_action(void) { int ret; g_type_init(); g_main_loop = g_main_loop_new (NULL, FALSE); if (!g_console_command) return; //-------------- // check command gchar* command = g_string_free(g_console_command, FALSE); g_ctrl_command = g_malloc0(sizeof(char)*30); g_ctrl_command[0] = '\0'; if (g_crawler_name != NULL) { if (!g_strcmp0("list", command)) g_strlcpy(g_ctrl_command, CTRL_LIST, 30); else if (!g_strcmp0("stop", command)) g_strlcpy(g_ctrl_command, CTRL_STOP, 30); else if (!g_strcmp0("pause", command)) g_strlcpy(g_ctrl_command, CTRL_PAUSE, 30); else if (!g_strcmp0("slow", command)) g_strlcpy(g_ctrl_command, CTRL_SLOW, 30); else if (!g_strcmp0("resume", command)) g_strlcpy(g_ctrl_command, CTRL_RESUME, 30); else if (!g_strcmp0("progress", command)) g_strlcpy(g_ctrl_command, CTRL_PROGRESS, 30); } else { if (!g_strcmp0("list", command)) g_strlcpy(g_ctrl_command, CTRL_LIST, 30); else if (!g_strcmp0("help", command)) g_strlcpy(g_ctrl_command, "help", 30); } //---------------- // execute command if (strlen(g_ctrl_command) == 0) { GRID_INFO("%s (%d) : Unknown command '%s'\n", g_service_name, g_service_pid, command); } else if (!g_strcmp0("help", g_ctrl_command)) { usage(); } else { g_pending_command = TRUE; gboolean bresult = TRUE; g_progress_thread = g_thread_create(thread_command, NULL, TRUE, NULL); if (!g_progress_thread) { GRID_ERROR("%s (%d) : System D-Bus returned progress thread failed to start...", g_service_name, g_service_pid); bresult = FALSE; } g_timeout_thread = g_thread_create(thread_timeout, NULL, TRUE, NULL); if (!g_timeout_thread) { GRID_ERROR("%s (%d) : timeout thread failed to start...", g_service_name, g_service_pid); bresult = FALSE; } if (bresult) { g_main_loop_run(g_main_loop); } } if (g_req) crawler_bus_req_clear(&g_req); if (g_tl_conn) tl_close(&g_tl_conn); g_free(command); g_console_command = NULL; }
static GError * _init_assign(gchar *ns_name, GList **working_m1list,GSList **unref_m1list) { GError *error = NULL; GSList *m1_list = NULL; m1_list = list_namespace_services(ns_name, "meta1", &error); if (!m1_list) { if ( error) { GRID_ERROR("failed to init meta1 service list :(%d) %s", error->code, error->message); goto errorLabel; } } GRID_INFO("nb m1 cs %d",g_slist_length(m1_list)); if ( context->replica > g_slist_length(m1_list)) { GRID_ERROR("Number of meta1 services [%d] less than number of replication [%d]",g_slist_length(m1_list),context->replica); error = NEWERROR(EINVAL, "Number of meta1 services [%d] less than number of replication [%d]",g_slist_length(m1_list),context->replica); goto errorLabel; } if ( context->replica <= 0 ) { GRID_ERROR("Invalid replica number [%d]",context->replica); error = NEWERROR(EINVAL, "Invalid replica number [%d]",context->replica); goto errorLabel; } // Duplicate the current prefix distribution and build a List GSList *prefixByMeta1 = meta0_utils_array_to_list(context->array_meta1_by_prefix); GSList *l=NULL; for (;m1_list;m1_list=m1_list->next) { struct meta0_assign_meta1_s *aM1; struct service_info_s *sInfo; gchar url[128]; url[0] = '\0'; aM1 = g_malloc0(sizeof(struct meta0_assign_meta1_s)); sInfo=m1_list->data; grid_addrinfo_to_string(&(sInfo->addr), url, sizeof(url)); aM1->addr=g_strdup(url); aM1->score=0; aM1->available=FALSE; aM1->used=TRUE; l = prefixByMeta1; for (;l;l=l->next) { struct meta0_info_s *m0info; if (!(m0info = l->data)) continue; if (addr_info_equal(&(m0info->addr),&(sInfo->addr))) { guint16 *p, *max; guint i=0; GArray *pfx = g_array_new(FALSE, FALSE, 2); p = (guint16*) m0info->prefixes; max = (guint16*) (m0info->prefixes + m0info->prefixes_size); for (; p<max; p++) { i++; pfx=g_array_append_vals(pfx,(guint8*)p,1); } aM1->assignPrefixes=pfx; aM1->score=i; GRID_DEBUG("aM1 %s , score %d",aM1->addr,aM1->score); prefixByMeta1=g_slist_remove(prefixByMeta1,m0info); meta0_info_clean(m0info); break; } } struct meta0_assign_meta1_s *m1ref = g_hash_table_lookup(context->map_meta1_ref,aM1->addr); if ( m1ref && !m1ref->used) { // unref meta1 aM1->used=FALSE; if (aM1->score != 0 ) { // meta1 refer always prefixe *unref_m1list=g_slist_prepend(*unref_m1list,aM1); } } else { *working_m1list = g_list_prepend(*working_m1list,aM1); } g_hash_table_insert(context->working_map_meta1_ref,strdup(aM1->addr),aM1); } GRID_TRACE("len working %d, len reste pref %d",g_list_length(*working_m1list),g_slist_length(prefixByMeta1)); guint nb_M1 = g_list_length(*working_m1list) + g_slist_length(prefixByMeta1); //defined the average assign score if (nb_M1 == 0 ) { GRID_ERROR("No Meta1 available"); error = NEWERROR(0, "No Meta1 service available"); goto errorLabel; } context->avgscore = (65536* context->replica)/nb_M1; GRID_DEBUG("average meta1 score %d",context->avgscore); GList *work = g_list_first(*working_m1list); for (;work;work=work->next) { struct meta0_assign_meta1_s *aM1 = work->data; if ( aM1->score > context->avgscore) { aM1->available=TRUE; } } GRID_DEBUG("init meta1 list, find %d meta1",g_list_length(*working_m1list)); GRID_DEBUG("init unref meta1 list, find %d meta1",g_slist_length(*unref_m1list)); meta0_utils_list_clean(prefixByMeta1); errorLabel : if (m1_list) { g_slist_foreach(m1_list, service_info_gclean, NULL); g_slist_free(m1_list); } return error; }
GError * sqlx_cache_open_and_lock_base(sqlx_cache_t *cache, const hashstr_t *hname, gint *result) { gint bd; GError *err = NULL; sqlx_base_t *base = NULL; GRID_TRACE2("%s(%p,%s,%p)", __FUNCTION__, (void*)cache, hname ? hashstr_str(hname) : "NULL", (void*)result); EXTRA_ASSERT(cache != NULL); EXTRA_ASSERT(hname != NULL); EXTRA_ASSERT(result != NULL); gint64 deadline = g_get_monotonic_time(); if (cache->open_timeout >= 0) { deadline += cache->open_timeout * G_TIME_SPAN_MILLISECOND; } else { deadline += 5 * G_TIME_SPAN_MINUTE; } g_mutex_lock(&cache->lock); cache->used = TRUE; retry: bd = sqlx_lookup_id(cache, hname); if (bd < 0) { if (!(err = sqlx_base_reserve(cache, hname, &base))) { bd = base->index; *result = base->index; sqlx_base_debug("OPEN", base); } else { GRID_DEBUG("No base available for [%s] (%d %s)", hashstr_str(hname), err->code, err->message); if (sqlx_expire_first_idle_base(cache, NULL) >= 0) { g_clear_error(&err); goto retry; } } } else { base = GET(cache, bd); switch (base->status) { case SQLX_BASE_FREE: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); GRID_ERROR("free base referenced"); g_assert_not_reached(); break; case SQLX_BASE_IDLE: case SQLX_BASE_IDLE_HOT: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); sqlx_base_move_to_list(cache, base, SQLX_BASE_USED); base->count_open ++; base->owner = g_thread_self(); *result = base->index; break; case SQLX_BASE_USED: EXTRA_ASSERT(base->count_open > 0); EXTRA_ASSERT(base->owner != NULL); if (base->owner != g_thread_self()) { GRID_DEBUG("Base [%s] in use by another thread (%X), waiting...", hashstr_str(hname), oio_log_thread_id(base->owner)); // The lock is held by another thread/request if (g_cond_wait_until(base->cond, &cache->lock, deadline)) { GRID_DEBUG("Retrying to open [%s]", hashstr_str(hname)); goto retry; } else { if (cache->open_timeout > 0) { err = NEWERROR(CODE_UNAVAILABLE, "database currently in use by another request" " (we waited %ldms)", cache->open_timeout); } else { err = NEWERROR(CODE_UNAVAILABLE, "database currently in use by another request"); } GRID_DEBUG("failed to open base: " "in use by another request (thread %X)", oio_log_thread_id(base->owner)); break; } } base->owner = g_thread_self(); base->count_open ++; *result = base->index; break; case SQLX_BASE_CLOSING: EXTRA_ASSERT(base->owner != NULL); // Just wait for a notification then retry if (g_cond_wait_until(base->cond, &cache->lock, deadline)) goto retry; else { err = NEWERROR(CODE_UNAVAILABLE, "Database stuck in closing state"); break; } } } if (base) { if (!err) { sqlx_base_debug(__FUNCTION__, base); EXTRA_ASSERT(base->owner == g_thread_self()); EXTRA_ASSERT(base->count_open > 0); } g_cond_signal(base->cond); } g_mutex_unlock(&cache->lock); return err; }
GError * sqlx_cache_unlock_and_close_base(sqlx_cache_t *cache, gint bd, gboolean force) { GError *err = NULL; sqlx_base_t *base; GRID_TRACE2("%s(%p,%d,%d)", __FUNCTION__, (void*)cache, bd, force); EXTRA_ASSERT(cache != NULL); if (base_id_out(cache, bd)) return NEWERROR(CODE_INTERNAL_ERROR, "invalid base id=%d", bd); g_mutex_lock(&cache->lock); cache->used = TRUE; base = GET(cache,bd); switch (base->status) { case SQLX_BASE_FREE: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); GRID_ERROR("Trying to close a free base"); g_assert_not_reached(); break; case SQLX_BASE_IDLE: case SQLX_BASE_IDLE_HOT: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); GRID_ERROR("Trying to close a closed base"); g_assert_not_reached(); break; case SQLX_BASE_USED: EXTRA_ASSERT(base->count_open > 0); // held by the current thread if (!(-- base->count_open)) { // to be closed if (force) { _expire_base(cache, base); } else { sqlx_base_debug("CLOSING", base); base->owner = NULL; if (base->heat >= cache->heat_threshold) sqlx_base_move_to_list(cache, base, SQLX_BASE_IDLE_HOT); else sqlx_base_move_to_list(cache, base, SQLX_BASE_IDLE); } } break; case SQLX_BASE_CLOSING: EXTRA_ASSERT(base->owner != NULL); EXTRA_ASSERT(base->owner != g_thread_self()); GRID_ERROR("Trying to close a base being closed"); g_assert_not_reached(); break; } if (base && !err) sqlx_base_debug(__FUNCTION__, base); g_cond_signal(base->cond); g_mutex_unlock(&cache->lock); return err; }
static GError* _assign(GList *working_m1list,GSList *unref_m1list) { GError *error = NULL; guint nb_treat_prefixes=0; struct meta0_assign_meta1_s *s_aM1, *d_aM1; //unref meta1 if ( unref_m1list ) { for (;unref_m1list;unref_m1list=unref_m1list->next) { s_aM1=unref_m1list->data; guint8 *prefix=_get_first_prefix_to_assign_meta1(s_aM1); if (!s_aM1->assignPrefixes) continue; do { if(_is_treat_prefix(context->treat_prefixes,prefix)) { GRID_ERROR("prefix [%02X%02X] already treat",prefix[0],prefix[1]); error = NEWERROR(0, "Failed to remove Meta1 service"); } d_aM1 =_select_dest_assign_m1(working_m1list,s_aM1,NULL,TRUE,FALSE); if ( ! d_aM1 ) { d_aM1 =_select_dest_assign_m1(working_m1list,s_aM1,NULL,TRUE,TRUE); if ( ! d_aM1 ) { error = NEWERROR(0, "Failed to assign prefix from meta1 %s : Not enough META1 to meet the requirements (distance, number)",s_aM1->addr); return error; } } _replace(s_aM1,d_aM1); nb_treat_prefixes++; } while ( s_aM1->assignPrefixes); } } gboolean loop = TRUE; do { s_aM1=NULL; d_aM1=NULL; // sort meta1 list working_m1list=g_list_sort(working_m1list,meta0_assign_sort_by_score); // election high meta1 and prefix s_aM1 = _select_source_assign_m1(working_m1list,context->treat_prefixes,context->avgscore); if (s_aM1) { d_aM1 =_select_dest_assign_m1(working_m1list,s_aM1,NULL,FALSE,FALSE); if ( d_aM1 ) { _replace(s_aM1,d_aM1); nb_treat_prefixes++; } else { _remove_first_prefix_to_assign_meta1(s_aM1); } } else { loop = FALSE; } if ( nb_treat_prefixes == 65536 ) loop = FALSE; } while (loop==TRUE); GRID_TRACE("END Assign prefix,nb treat=%d",nb_treat_prefixes); return NULL; }
static void __out_start(struct chunk_transfer_s *ct) { gint64 header_size = 0; int dst_fd; struct evkeyval *kv; struct evkeyvalq *src; GSList *dsts_out = NULL; /* struct evbuffer */ GSList *lc = NULL; GSList *lb = NULL; struct timeval tv_read, tv_write; GSList *l = NULL; if (chunk_transfer_get_dst_status(ct) != CNX_NONE) return; GRID_DEBUG("Starting the output..."); const char *content_length = chunk_transfer_find_req_header(ct, "Content-Length"); ct->dst_size = ct->dst_size_remaining = g_ascii_strtoll(content_length, NULL, 10); if(chunk_transfer_get_target_rawx_count(ct) < 1) { GRID_ERROR("ERROR, no destination rawx..."); return; } GRID_DEBUG("ok we have targets, prepare to send data"); for(l = ct->dst_rawx; l && l->data; l = l->next) { gchar dst[128]; gchar port_str[16]; guint16 port = 0; struct bufferevent* bevent = NULL; addr_info_get_addr(&(((service_info_t*)l->data)->addr), dst, sizeof(dst), &port); bzero(port_str, sizeof(port_str)); g_snprintf(port_str, sizeof(port_str), "%d", port); GRID_DEBUG("addr extracted: %s %s", dst, port_str); dst_fd = tcpip_open(dst, port_str); GRID_DEBUG("Destination opened"); /* ***** */ bevent = bufferevent_socket_new(ct->evt_base, dst_fd, 0); GRID_DEBUG("buffer event created"); tv_write.tv_sec = 3; tv_write.tv_usec = 0; tv_read.tv_sec = 3; tv_read.tv_usec = 0; bufferevent_set_timeouts(bevent, &tv_read, &tv_write); bufferevent_setcb(bevent, __dst_cb_in, __dst_cb_out, __dst_cb_error, ct); bufferevent_disable(bevent, EV_READ|EV_WRITE); /* Write the HTTP request and the grid headers */ /* WARN: don't do prepend if you want to keep all in good order !! */ dsts_out = g_slist_append(dsts_out, bufferevent_get_output(bevent)); ct->dst_bevents = g_slist_append(ct->dst_bevents, bevent); } if(ct->dst_chunks) { GRID_DEBUG("dst_chunks filled, its ok"); } else { GRID_DEBUG("no dst_chunks defined"); } for (lc = ct->dst_chunks, lb = dsts_out; lc && lc->data && lb && lb->data; lc = lc->next, lb = lb->next) { gchar idstr[65]; bzero(idstr, sizeof(idstr)); container_id_to_string(lc->data, idstr, sizeof(idstr)); evbuffer_add_printf(lb->data, "PUT /%s HTTP/1.0\r\n", idstr); GRID_DEBUG("Sending put order"); /* Add missing headers (xattr not returned by rawx) */ evbuffer_add_printf(lb->data, "chunkid: %s\n", idstr); evbuffer_add_printf(lb->data, "chunkpos: %"G_GUINT32_FORMAT"\n", ct->source_chunk->position); evbuffer_add_printf(lb->data, "chunksize: %"G_GINT64_FORMAT"\n", ct->source_chunk->size); /* container id str */ gchar cidstr[65]; bzero(cidstr, sizeof(cidstr)); container_id_t cid; chunk_transfer_get_container_id(ct, cid); container_id_to_string(cid, cidstr, sizeof(cidstr)); evbuffer_add_printf(lb->data, "containerid: %s\n", cidstr); evbuffer_add_printf(lb->data, "contentpath: %s\n", chunk_transfer_get_content_path(ct)); evbuffer_add_printf(lb->data, "chunknb: %"G_GUINT32_FORMAT"\n", chunk_transfer_get_content_nb_chunks(ct)); evbuffer_add_printf(lb->data, "contentsize: %"G_GINT64_FORMAT"\n", chunk_transfer_get_content_size(ct)); if(ct->attrs->usr_metadata) evbuffer_add_printf(lb->data, "contentmetadata: %s\n", ct->attrs->usr_metadata); if(ct->attrs->sys_metadata) evbuffer_add_printf(lb->data, "contentmetadata-sys: %s\n", ct->attrs->sys_metadata); /* **** */ evbuffer_add_printf(lb->data, "Connection: close\r\n"); evbuffer_add_printf(lb->data, "Content-Type: application/octet-stream\r\n"); evbuffer_add_printf(lb->data, "Content-Length: %"G_GINT64_FORMAT"\r\n", ct->dst_size); } src = ct->src_req->input_headers; TAILQ_FOREACH(kv, src, next) { GRID_DEBUG("headers found : %s | %s", kv->key, kv->value); if (g_str_has_prefix(kv->key, "X-Grid-") || g_str_has_prefix(kv->key, "container") || g_str_has_prefix(kv->key, "content") || g_str_has_prefix(kv->key, "chunk")) { for(lb = dsts_out; lb && lb->data; lb = lb->next) { evbuffer_add_printf(lb->data, "%s: %s\r\n", kv->key, kv->value); } } }
GError* meta0_assign_disable_meta1(struct meta0_backend_s *m0, gchar *ns_name, char **m1urls, gboolean nocheck) { GList *working_m1list = NULL; GSList *unref_m1list = NULL; GPtrArray *new_meta1ref = NULL; GError *error; gchar * urls = g_strjoinv(" ",m1urls); GRID_INFO("START disable meta1 %s",urls); g_free(urls); error = _initContext(m0); if (error) goto errorLabel; if ( nocheck ) { error =_check(NULL); if ( error ) goto errorLabel; } error =_unref_meta1(m1urls); if ( error ) goto errorLabel; error = _init_assign(ns_name,&working_m1list,&unref_m1list); if ( error ) goto errorLabel; error = _assign(working_m1list,unref_m1list); if ( error ) goto errorLabel; new_meta1ref = _updated_meta1ref(); error = meta0_backend_assign(m0, context->array_meta1_by_prefix, new_meta1ref ,FALSE); if ( error ) { GRID_ERROR("failed to update BDD :(%d) %s", error->code, error->message); goto errorLabel; } context->lastAssignTime=g_date_time_new_now_local(); errorLabel : _resetContext(); if (new_meta1ref) { meta0_utils_array_meta1ref_clean(new_meta1ref); } if (working_m1list) { g_list_free(working_m1list); working_m1list=NULL; } if (unref_m1list) { g_slist_free(unref_m1list); unref_m1list=NULL; } GRID_INFO("END DISABLE META1"); return error; }
GError* meta0_assign_fill(struct meta0_backend_s *m0, gchar *ns_name, guint replicas, gboolean nodist) { GError *error; GList *working_m1list = NULL; GSList *unref_m1list = NULL; GPtrArray *new_meta1ref = NULL; guint idx; struct meta0_assign_meta1_s *d_aM1; GRID_INFO("START fill meta0 db , replica %d",replicas); error = _initContext(m0); if (error) goto errorLabel; context->replica=replicas; error = _init_assign(ns_name,&working_m1list,&unref_m1list); if ( error ) goto errorLabel; error =_check(NULL); if ( error ) goto errorLabel; while (replicas--) { for (idx=0; idx<65536 ;idx++) { working_m1list=g_list_sort(working_m1list,meta0_assign_sort_by_score); d_aM1 =_select_dest_assign_m1(working_m1list,NULL,(guint8*)(&idx),TRUE, nodist); if ( ! d_aM1 ) { error = NEWERROR(0, "Not enough META1 to meet the requirements (distance, number) (happened at prefix %u)", idx); goto errorLabel; } meta0_utils_array_add(context->array_meta1_by_prefix,(guint8*)(&idx),d_aM1->addr); _increase_score(d_aM1); } } new_meta1ref = _updated_meta1ref(); error = meta0_backend_assign(m0, context->array_meta1_by_prefix, new_meta1ref,TRUE); if ( error ) { GRID_ERROR("failed to update BDD :(%d) %s", error->code, error->message); goto errorLabel; } context->lastAssignTime=g_date_time_new_now_local(); errorLabel : _resetContext(); if (new_meta1ref) { meta0_utils_array_meta1ref_clean(new_meta1ref); } if (working_m1list) { g_list_free(working_m1list); working_m1list=NULL; } if (unref_m1list) { g_slist_free(unref_m1list); unref_m1list=NULL; } GRID_INFO("END FILL"); return error; }