static GError* _resolve_reference_service(struct hc_resolver_s *r, struct hashstr_s *hk, struct hc_url_s *u, const gchar *s, gchar ***result) { GError *err; gchar **m1urlv = NULL; GRID_TRACE2("%s(%s,%s,%s)", __FUNCTION__, hashstr_str(hk), hc_url_get(u, HCURL_WHOLE), s); /* Try to hit the cache for the service itself */ *result = hc_resolver_get_cached(r, r->services.cache, hk); if (NULL != *result) { return NULL; } /* now attempt a real resolution */ err = _resolve_meta1(r, u, &m1urlv); g_assert((err!=NULL) ^ (m1urlv!=NULL)); if (NULL != err) return err; err = _resolve_service_through_many_meta1(m1urlv, u, s, result); g_assert((err!=NULL) ^ (*result!=NULL)); if (!err) { /* fill the cache */ if (!(r->flags & HC_RESOLVER_NOCACHE)) hc_resolver_store(r, r->services.cache, hk, *result); } g_strfreev(m1urlv); return err; }
static GError * _resolve_m1_through_many_m0(gchar **urlv, const guint8 *prefix, gchar ***result) { GError *err; guint i, last; gchar *url; GRID_TRACE2("%s(%02X%02X)", __FUNCTION__, prefix[0], prefix[1]); for (last=g_strv_length(urlv); last ;last--) { /* pick a random URL */ i = rand() % last; url = urlv[i]; if (!(err = _resolve_m1_through_one_m0(url, prefix, result))) return NULL; if (err->code < 100) g_error_free(err); /* swap 'i' and 'last' */ urlv[i] = urlv[last-1]; urlv[last-1] = url; } return NEWERROR(500, "No META0 answered"); }
GError * sqlx_cache_unlock_and_close_base(sqlx_cache_t *cache, gint bd, gboolean force) { GError *err = NULL; GRID_TRACE2("%s(%p,%d,%d)", __FUNCTION__, (void*)cache, bd, force); EXTRA_ASSERT(cache != NULL); if (base_id_out(cache, bd)) return NEWERROR(CODE_INTERNAL_ERROR, "invalid base id=%d", bd); g_mutex_lock(&cache->lock); cache->used = TRUE; sqlx_base_t *base; base = GET(cache,bd); switch (base->status) { case SQLX_BASE_FREE: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); err = NEWERROR(CODE_INTERNAL_ERROR, "base not used"); break; case SQLX_BASE_IDLE: case SQLX_BASE_IDLE_HOT: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); err = NEWERROR(CODE_INTERNAL_ERROR, "base closed"); break; case SQLX_BASE_USED: EXTRA_ASSERT(base->count_open > 0); // held by the current thread if (!(-- base->count_open)) { // to be closed if (force) { _expire_base(cache, base); } else { sqlx_base_debug("CLOSING", base); base->owner = NULL; if (base->heat >= cache->heat_threshold) sqlx_base_move_to_list(cache, base, SQLX_BASE_IDLE_HOT); else sqlx_base_move_to_list(cache, base, SQLX_BASE_IDLE); } } break; case SQLX_BASE_CLOSING: EXTRA_ASSERT(base->owner != NULL); EXTRA_ASSERT(base->owner != g_thread_self()); err = NEWERROR(CODE_INTERNAL_ERROR, "base being closed"); break; } if (base && !err) sqlx_base_debug(__FUNCTION__, base); g_cond_signal(base->cond); g_mutex_unlock(&cache->lock); return err; }
static GError * _resolve_meta1(struct hc_resolver_s *r, struct hc_url_s *u, gchar ***result) { struct hashstr_s *hk; GError *err = NULL; GRID_TRACE2("%s(%s)", __FUNCTION__, hc_url_get(u, HCURL_WHOLE)); hk = hashstr_printf("meta1|%s|%.4s", hc_url_get(u, HCURL_NSPHYS), hc_url_get(u, HCURL_HEXID)); /* Try to hit the cache */ if (!(*result = hc_resolver_get_cached(r, r->csm0.cache, hk))) { /* get a meta0, then store it in the cache */ gchar **m0urlv = NULL; err = _resolve_meta0(r, hc_url_get(u, HCURL_NSPHYS), &m0urlv); if (err != NULL) g_prefix_error(&err, "M0 resolution error: "); else { err = _resolve_m1_through_many_m0(m0urlv, hc_url_get_id(u), result); if (!err) hc_resolver_store(r, r->csm0.cache, hk, *result); g_strfreev(m0urlv); } } g_free(hk); return err; }
static GError* _resolve_meta0(struct hc_resolver_s *r, const gchar *ns, gchar ***result) { struct hashstr_s *hk; GError *err = NULL; GRID_TRACE2("%s(%s)", __FUNCTION__, ns); hk = hashstr_printf("meta0|%s", ns); /* Try to hit the cache */ if (!(*result = hc_resolver_get_cached(r, r->csm0.cache, hk))) { GSList *allm0; /* Now attempt a real resolution */ if (!(allm0 = list_namespace_services2(ns, "meta0", &err))) { if (!err) err = NEWERROR(500, "No meta0 available"); *result = NULL; } else { *result = _srvlit_to_urlv(allm0); g_slist_foreach(allm0, service_info_gclean, NULL); g_slist_free(allm0); allm0 = NULL; /* then fill the cache */ hc_resolver_store(r, r->csm0.cache, hk, *result); err = NULL; } } g_free(hk); return err; }
GError* hc_resolve_reference_directory(struct hc_resolver_s *r, struct hc_url_s *url, gchar ***result) { GRID_TRACE2("%s(%s)", __FUNCTION__, hc_url_get(url, HCURL_WHOLE)); EXTRA_ASSERT(r != NULL); EXTRA_ASSERT(url != NULL); EXTRA_ASSERT(result != NULL); if (!hc_url_get_id(url) || !hc_url_has(url, HCURL_NS)) return NEWERROR(CODE_BAD_REQUEST, "Incomplete URL [%s]", hc_url_get(url, HCURL_WHOLE)); GError *err = NULL; gchar **m1v = NULL, **m0v = NULL; if (!(err = _resolve_meta0(r, hc_url_get(url, HCURL_NS), &m0v))) err = _resolve_meta1(r, url, &m1v); if (err) { if (m0v) g_strfreev (m0v); if (m1v) g_strfreev (m1v); return err; } *result = g_malloc0(sizeof(gchar*) * (g_strv_length(m0v) + g_strv_length(m1v) + 1)); gchar **d = *result; for (gchar **p=m0v; *p ;++p) { *(d++) = *p; } g_free (m0v); // pointers reused for (gchar **p=m1v; *p ;++p) { *(d++) = *p; } g_free (m1v); // pointers reused return NULL; }
static GError * _resolve_m1_through_many_m0(gchar **urlv, const guint8 *prefix, gchar ***result) { guint i, last; gchar *url; GRID_TRACE2("%s(%02X%02X)", __FUNCTION__, prefix[0], prefix[1]); for (last=g_strv_length(urlv); last ;last--) { /* pick a random URL */ i = rand() % last; url = urlv[i]; GError *err = _resolve_m1_through_one_m0(url, prefix, result); EXTRA_ASSERT((err!=NULL) ^ (*result!=NULL)); if (!err) return NULL; if (!CODE_IS_NETWORK_ERROR(err->code)) return err; g_error_free(err); /* swap 'i' and 'last' */ urlv[i] = urlv[last-1]; urlv[last-1] = url; } return NEWERROR(CODE_INTERNAL_ERROR, "No META0 answered"); }
GError* hc_resolve_reference_service(struct hc_resolver_s *r, struct hc_url_s *url, const gchar *srvtype, gchar ***result) { GError *err; struct hashstr_s *hk; GRID_TRACE2("%s(%s,%s)", __FUNCTION__, hc_url_get(url, HCURL_WHOLE), srvtype); g_assert(r != NULL); g_assert(url != NULL); g_assert(srvtype != NULL); g_assert(result != NULL); g_assert(*result == NULL); if (!hc_url_get_id(url) || !hc_url_has(url, HCURL_NS)) return NEWERROR(400, "Incomplete URL [%s]", hc_url_get(url, HCURL_WHOLE)); hk = hashstr_printf("%s|%s|%s", srvtype, hc_url_get(url, HCURL_NSPHYS), hc_url_get(url, HCURL_HEXID)); err = _resolve_reference_service(r, hk, url, srvtype, result); g_free(hk); return err; }
static GError * _resolve_service_through_many_meta1(gchar **urlv, struct hc_url_s *u, const gchar *s, gchar ***result) { guint i, last; gchar *url; GRID_TRACE2("%s(%s,%s)", __FUNCTION__, hc_url_get(u, HCURL_WHOLE), s); for (last=g_strv_length(urlv); last ;last--) { /* pick a random URL */ i = rand() % last; url = urlv[i]; GError *err = _resolve_service_through_one_m1(url, u, s, result); g_assert((err!=NULL) ^ (*result!=NULL)); if (!err) return NULL; if (err->code >= 100) return err; g_clear_error(&err); /* swap 'i' and 'last' */ urlv[i] = urlv[last-1]; urlv[last-1] = url; } return NEWERROR(500, "No META0 answered"); }
static GError * _resolve_m1_through_one_m0(const gchar *m0, const guint8 *prefix, gchar ***result) { GError *err = NULL; struct addr_info_s ai; GRID_TRACE2("%s(%s,%02X%02X)", __FUNCTION__, m0, prefix[0], prefix[1]); meta1_strurl_get_address(m0, &ai); do { GSList *lmap = meta0_remote_get_meta1_one(&ai, _timeout(&rc_resolver_timeout_m0, 30.0), prefix, &err); if (!lmap) { if (err) return err; return NEWERROR(500, "No meta1 found"); } else { *result = _m0list_to_urlv(lmap); g_slist_foreach(lmap, meta0_info_gclean, NULL); g_slist_free(lmap); err = NULL; } } while (0); return err; }
static enum http_rc_e handler_action (struct http_request_s *rq, struct http_reply_ctx_s *rp) { // Get a request id for the current request const gchar *reqid = g_tree_lookup (rq->tree_headers, PROXYD_HEADER_REQID); if (reqid) oio_ext_set_reqid(reqid); else oio_ext_set_random_reqid(); // Then parse the request to find a handler struct oio_url_s *url = NULL; struct oio_requri_s ruri = {NULL, NULL, NULL, NULL}; oio_requri_parse (rq->req_uri, &ruri); struct path_matching_s **matchings = _metacd_match (rq->cmd, ruri.path); GRID_TRACE2("URI path[%s] query[%s] fragment[%s] matches[%u]", ruri.path, ruri.query, ruri.fragment, g_strv_length((gchar**)matchings)); GQuark gq_count = gq_count_unexpected; GQuark gq_time = gq_time_unexpected; enum http_rc_e rc; if (!*matchings) { rp->set_content_type ("application/json"); rp->set_body_gstr (g_string_new("{\"status\":404,\"message\":\"No handler found\"}")); rp->set_status (HTTP_CODE_NOT_FOUND, "No handler found"); rp->finalize (); rc = HTTPRC_DONE; } else { struct req_args_s args = {0}; args.req_uri = &ruri; args.matchings = matchings; args.rq = rq; args.rp = rp; args.url = url = _metacd_load_url (&args); rp->subject(oio_url_get(url, OIOURL_HEXID)); gq_count = (*matchings)->last->gq_count; gq_time = (*matchings)->last->gq_time; GRID_TRACE("%s %s URL %s", __FUNCTION__, ruri.path, oio_url_get(args.url, OIOURL_WHOLE)); req_handler_f handler = (*matchings)->last->u; rc = (*handler) (&args); } gint64 spent = oio_ext_monotonic_time () - rq->client->time.evt_in; network_server_stat_push4 (rq->client->server, TRUE, gq_count, 1, gq_count_all, 1, gq_time, spent, gq_time_all, spent); path_matching_cleanv (matchings); oio_requri_clear (&ruri); oio_url_pclean (&url); oio_ext_set_reqid (NULL); return rc; }
void sqlx_cache_set_max_bases(sqlx_cache_t *cache, guint max) { GRID_TRACE2("%s(%p,%u)", __FUNCTION__, cache, max); EXTRA_ASSERT(cache != NULL); EXTRA_ASSERT(max < 65536); sqlx_cache_reset_bases(cache, max); }
static gpointer _row_to_bean(const struct bean_descriptor_s *descr, sqlite3_stmt *stmt) { const struct field_descriptor_s *fd; gpointer res; int col, s; res = _bean_create(descr); for (fd=descr->fields; fd->name ;fd++) { col = fd->position; #if 0 GRID_TRACE2("SQL column[%d,%d/%s,%s] field[%d,%ld,%d/%s,%s]", col, sqlite3_column_type(stmt, col), sqlite_strtype(sqlite3_column_type(stmt, col)), sqlite3_column_name(stmt, col), fd->position, fd->offset, fd->type, bean_strtype(fd->type), fd->name); #endif if (sqlite3_column_type(stmt, col) == SQLITE_NULL) { _bean_del_field(res, fd->position); continue; } _bean_set_field(res, fd->position); gpointer pf = ((guint8*)res) + descr->offset_fields + fd->offset; switch (fd->type) { case FT_BOOL: *((gboolean*)pf) = sqlite3_column_int(stmt, col); continue; case FT_INT: *((gint64*)pf) = sqlite3_column_int64(stmt, col); continue; case FT_REAL: *((gdouble*)pf) = sqlite3_column_double(stmt, col); continue; case FT_TEXT: s = sqlite3_column_bytes(stmt, col); g_string_append_len(GSTR(pf), (const gchar*)sqlite3_column_text(stmt, col), s); continue; case FT_BLOB: s = sqlite3_column_bytes(stmt, col); g_byte_array_append(GBA(pf), (guint8*)sqlite3_column_blob(stmt, col), s); continue; default: g_assert_not_reached(); continue; } } return res; }
GError* hc_resolve_reference_directory(struct hc_resolver_s *r, struct hc_url_s *url, gchar ***result) { GRID_TRACE2("%s(%s)", __FUNCTION__, hc_url_get(url, HCURL_WHOLE)); g_assert(r != NULL); g_assert(url != NULL); g_assert(result != NULL); if (!hc_url_get_id(url) || !hc_url_has(url, HCURL_NS)) return NEWERROR(400, "Incomplete URL [%s]", hc_url_get(url, HCURL_WHOLE)); return _resolve_meta1(r, url, result); }
static GError * _resolve_service_through_one_m1(const gchar *m1, struct hc_url_s *u, const gchar *s, gchar ***result) { GError *err = NULL; struct addr_info_s ai; GRID_TRACE2("%s(%s,%s,%s)", __FUNCTION__, m1, hc_url_get(u, HCURL_WHOLE), s); meta1_strurl_get_address(m1, &ai); *result = meta1v2_remote_list_reference_services(&ai, &err, u, s); EXTRA_ASSERT((err!=NULL) ^ (*result!=NULL)); return err; }
static void sqlx_base_move_to_list(sqlx_cache_t *cache, sqlx_base_t *base, enum sqlx_base_status_e status) { register enum sqlx_base_status_e status0; if (status != (status0 = base->status)) { sqlx_base_remove_from_list(cache, base); sqlx_base_add_to_list(cache, base, status); } GRID_TRACE2("BASE [%d/%s] moved from %s to %s", base->index, hashstr_str(base->name), sqlx_status_to_str(status0), sqlx_status_to_str(status)); }
static void sqlx_base_debug_func(const gchar *from, sqlx_base_t *base) { (void) from; (void) base; EXTRA_ASSERT(base); GRID_TRACE2("BASE [%d/%s]" " %"G_GUINT32_FORMAT " LIST=%s [%d,%d]" " (%s)", base->index, (base->name ? hashstr_str(base->name) : ""), base->count_open, sqlx_status_to_str(base->status), base->link.prev, base->link.next, from); }
void hc_decache_reference_service(struct hc_resolver_s *r, struct hc_url_s *url, const gchar *srvtype) { struct hashstr_s *hk; GRID_TRACE2("%s(%s,%s)", __FUNCTION__, hc_url_get(url, HCURL_WHOLE), srvtype); EXTRA_ASSERT(r != NULL); EXTRA_ASSERT(url != NULL); EXTRA_ASSERT(srvtype != NULL); if (r->flags & HC_RESOLVER_NOCACHE) return; hk = hashstr_printf("%s|%s|%s", srvtype, hc_url_get(url, HCURL_NS), hc_url_get(url, HCURL_HEXID)); hc_resolver_forget(r, r->services.cache, hk); g_free(hk); }
static void load_table_row(sqlite3 *db, const hashstr_t *name, gint64 rowid, Row_t *row, Table_t *table) { int rc; sqlite3_stmt *stmt = NULL; gchar *sql; GRID_TRACE2("%s(%p,%s,%"G_GINT64_FORMAT",%p,%p)", __FUNCTION__, db, hashstr_str(name), rowid, row, table); sql = g_strdup_printf("SELECT * FROM %s WHERE ROWID = ?", hashstr_str(name)); sqlite3_prepare_debug(rc, db, sql, -1, &stmt, NULL); g_free(sql); sqlite3_bind_int64(stmt, 1, rowid); while (SQLITE_ROW == (rc = sqlite3_step(stmt))) load_statement(stmt, row, table); sqlite3_finalize_debug(rc, stmt); }
void hc_decache_reference(struct hc_resolver_s *r, struct hc_url_s *url) { struct hashstr_s *hk; GRID_TRACE2("%s(%s)", __FUNCTION__, hc_url_get(url, HCURL_WHOLE)); g_assert(r != NULL); g_assert(url != NULL); if (r->flags & HC_RESOLVER_NOCACHE) return; hk = hashstr_printf("meta0|%s", hc_url_get(url, HCURL_NSPHYS)); hc_resolver_forget(r, r->csm0.cache, hk); g_free(hk); hk = hashstr_printf("meta1|%s|%.4s", hc_url_get(url, HCURL_NSPHYS), hc_url_get(url, HCURL_HEXID)); hc_resolver_forget(r, r->csm0.cache, hk); g_free(hk); }
static GError* _load(struct meta0_backend_s *m0) { GError *err = NULL; struct sqlx_sqlite3_s *sq3 = NULL; GRID_TRACE2("%s(%p)", __FUNCTION__, m0); err = _open_and_lock(m0,M0V2_OPENBASE_MASTERSLAVE, &sq3); if (err != NULL) { return err; } err = _load_from_base(sq3, &(m0->array_by_prefix)); if (err != NULL) g_prefix_error(&err, "Query error: "); err = _load_meta1ref_from_base(sq3, &(m0->array_meta1_ref)); if (err != NULL) g_prefix_error(&err, "Query error: "); _unlock_and_close(sq3); return err; }
GError * sqlx_cache_open_and_lock_base(sqlx_cache_t *cache, const hashstr_t *hname, gint *result) { gint bd; GError *err = NULL; sqlx_base_t *base = NULL; GTimeVal *deadline = g_alloca(sizeof(GTimeVal)); GRID_TRACE2("%s(%p,%s,%p)", __FUNCTION__, (void*)cache, hname ? hashstr_str(hname) : "NULL", (void*)result); EXTRA_ASSERT(cache != NULL); EXTRA_ASSERT(hname != NULL); EXTRA_ASSERT(result != NULL); if (cache->open_timeout >= 0) { g_get_current_time(deadline); g_time_val_add(deadline, cache->open_timeout * 1000); } else { // wait forever deadline = NULL; } g_mutex_lock(cache->lock); cache->used = TRUE; retry: bd = sqlx_lookup_id(cache, hname); if (bd < 0) { if (!(err = sqlx_base_reserve(cache, hname, &base))) { bd = base->index; *result = base->index; sqlx_base_debug("OPEN", base); } else { GRID_DEBUG("No base available for [%s] (%d %s)", hashstr_str(hname), err->code, err->message); if (sqlx_expire_first_idle_base(cache, NULL) >= 0) { g_clear_error(&err); goto retry; } } } else { base = GET(cache, bd); switch (base->status) { case SQLX_BASE_FREE: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); GRID_ERROR("free base referenced"); g_assert_not_reached(); break; case SQLX_BASE_IDLE: case SQLX_BASE_IDLE_HOT: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); sqlx_base_move_to_list(cache, base, SQLX_BASE_USED); base->count_open ++; base->owner = g_thread_self(); *result = base->index; break; case SQLX_BASE_USED: EXTRA_ASSERT(base->count_open > 0); EXTRA_ASSERT(base->owner != NULL); if (base->owner != g_thread_self()) { // The lock is held by another thread/request GRID_DEBUG("Base [%s] in use by another thread (%X), waiting...", hashstr_str(hname), compute_thread_id(base->owner)); /* This is to avoid server thread starvation, * due to all threads waiting on the same base. */ if (cache->max_waiting > 0 && base->count_waiting >= cache->max_waiting) { err = NEWERROR(CODE_UNAVAILABLE, "database currently in use by another request, " "and %d others threads already waiting", base->count_waiting); break; } base->count_waiting++; if (g_cond_timed_wait(base->cond, cache->lock, deadline)) { // Thread was woken up before deadline GRID_DEBUG("Retrying to open [%s]", hashstr_str(hname)); base->count_waiting--; goto retry; } else { // Deadline has been reached base->count_waiting--; if (cache->open_timeout > 0) { err = NEWERROR(CODE_UNAVAILABLE, "database currently in use by another request" " (we waited %ldms)", cache->open_timeout); } else { err = NEWERROR(CODE_UNAVAILABLE, "database currently in use by another request"); } GRID_DEBUG("failed to open base: " "in use by another request (thread %X)", compute_thread_id(base->owner)); break; } } base->owner = g_thread_self(); base->count_open ++; *result = base->index; break; case SQLX_BASE_CLOSING: EXTRA_ASSERT(base->owner != NULL); // Just wait for a notification then retry if (g_cond_timed_wait(base->cond, cache->lock, deadline)) goto retry; else { err = NEWERROR(CODE_UNAVAILABLE, "Database stuck in closing state"); break; } } } if (base) { if (!err) { sqlx_base_debug(__FUNCTION__, base); EXTRA_ASSERT(base->owner == g_thread_self()); EXTRA_ASSERT(base->count_open > 0); } g_cond_signal(base->cond); } g_mutex_unlock(cache->lock); return err; }
static int _client_manage_input(struct network_client_s *client) { guint total, size; int _notify(void) { if (!client->transport.notify_input) return RC_PROCESSED; if (!data_slab_sequence_has_data(&(client->input))) { /* drain the data */ data_slab_sequence_clean_data(&(client->input)); return RC_PROCESSED; } GRID_TRACE2("fd=%d passing %u/%"G_GSIZE_FORMAT" to transport %p", client->fd, total, data_slab_sequence_size(&(client->input)), client->transport.notify_input); return client->transport.notify_input(client); } EXTRA_ASSERT(client != NULL); EXTRA_ASSERT(client->fd >= 0); for (size=SLAB_STARTSIZE, total=0; total < ROUND_MAXSIZE ;) { int rc; struct data_slab_s *in = data_slab_make_empty(size); switch (rc = _ds_feed(client->fd, in)) { case RC_ERROR: data_slab_free(in); return RC_ERROR; case RC_NODATA: /* no more data to expect */ case RC_NOTREADY: if (!in->data.buffer.end) data_slab_free(in); else { data_slab_sequence_append(&(client->input), in); total += in->data.buffer.end; } in = NULL; if (RC_NODATA == _notify()) rc = RC_NODATA; return rc; case RC_PROCESSED: if (!in->data.buffer.end) data_slab_free(in); else { data_slab_sequence_append(&(client->input), in); total += in->data.buffer.end; } size = SLAB_MAXSIZE; in = NULL; break; default: g_assert_not_reached(); } } return _notify(); }
struct conscience_srv_s * conscience_srvtype_refresh(struct conscience_srvtype_s *srvtype, struct service_info_s *si) { g_assert_nonnull (srvtype); g_assert_nonnull (si); struct conscience_srvid_s srvid; memcpy(&(srvid.addr), &(si->addr), sizeof(addr_info_t)); struct service_tag_s *tag_first = service_info_get_tag(si->tags, NAME_TAGNAME_RAWX_FIRST); gboolean really_first = FALSE; /*register the service if necessary */ struct conscience_srv_s *p_srv = conscience_srvtype_get_srv(srvtype, &srvid); if (!p_srv) { p_srv = conscience_srvtype_register_srv(srvtype, NULL, &srvid); g_assert_nonnull (p_srv); really_first = tag_first && tag_first->type == STVT_BOOL && tag_first->value.b; } /* refresh the tags: create missing, replace existing * (but the tags are not flushed before) */ if (si->tags) { TRACE("Refreshing tags for srv [%.*s]", (int)(LIMIT_LENGTH_SRVDESCR), p_srv->description); const guint max = si->tags->len; for (guint i = 0; i < max; i++) { struct service_tag_s *tag = g_ptr_array_index(si->tags, i); if (tag == tag_first) continue; struct service_tag_s *orig = conscience_srv_ensure_tag(p_srv, tag->name); service_tag_copy(orig, tag); } } p_srv->score.timestamp = oio_ext_monotonic_seconds (); if (si->score.value == SCORE_UNSET || si->score.value == SCORE_UNLOCK) { if (really_first) { GRID_TRACE2("SRV first [%s]", p_srv->description); p_srv->score.value = 0; p_srv->locked = TRUE; } else { if (si->score.value == SCORE_UNLOCK) { if (p_srv->locked) { GRID_TRACE2("SRV unlocked [%s]", p_srv->description); p_srv->locked = FALSE; p_srv->score.value = CLAMP (p_srv->score.value, SCORE_DOWN, SCORE_MAX); } else { GRID_TRACE2("SRV already unlocked [%s]", p_srv->description); } } else { /* UNSET, a.k.a. regular computation */ if (p_srv->locked) { GRID_TRACE2("SRV untouched [%s]", p_srv->description); } else { GError *err = NULL; if (!conscience_srv_compute_score(p_srv, &err)) { GRID_TRACE2("SRV error [%s]: (%d) %s", p_srv->description, err->code, err->message); g_clear_error (&err); } else { GRID_TRACE2("SRV refreshed [%s]", p_srv->description); } } } } } else { /* LOCK */ p_srv->score.value = CLAMP(si->score.value, SCORE_DOWN, SCORE_MAX); if (p_srv->locked) { GRID_TRACE2("SRV already locked [%s]", p_srv->description); } else { p_srv->locked = TRUE; GRID_TRACE2("SRV locked [%s]", p_srv->description); } } return p_srv; }
GError* _open_and_lock(struct meta1_backend_s *m1, struct oio_url_s *url, enum m1v2_open_type_e how, struct sqlx_sqlite3_s **handle) { EXTRA_ASSERT(m1 != NULL); EXTRA_ASSERT(url != NULL); EXTRA_ASSERT(handle != NULL); GRID_TRACE2("%s(%p,%p,%d,%p)", __FUNCTION__, (void*)m1, oio_url_get (url, OIOURL_HEXID), how, (void*)handle); if (!oio_url_has (url, OIOURL_HEXID)) return NEWERROR (CODE_BAD_REQUEST, "Partial URL (missing HEXID)"); if (!m1b_check_ns_url (m1, url)) return NEWERROR(CODE_NAMESPACE_NOTMANAGED, "Invalid NS"); gchar base[5]; const guint8 *cid = oio_url_get_id(url); g_snprintf(base, sizeof(base), "%02X%02X", cid[0], cid[1]); if (!meta1_prefixes_is_managed(m1->prefixes, cid)) return NEWERROR(CODE_RANGE_NOTFOUND, "prefix [%s] not managed", base); /* Now open/lock the base in a way suitable for our op */ struct sqlx_name_s n = {.base=base, .type=NAME_SRVTYPE_META1, .ns=m1->ns_name}; GError *err = sqlx_repository_open_and_lock(m1->repo, &n, m1_to_sqlx(how), handle, NULL); if (err != NULL) { if (!CODE_IS_REDIRECT(err->code)) g_prefix_error(&err, "Open/Lock error: "); return err; } EXTRA_ASSERT(*handle != NULL); GRID_TRACE("Opened and locked [%s][%s] -> [%s][%s]", base, NAME_SRVTYPE_META1, (*handle)->name.base, (*handle)->name.type); return NULL; } GError* __create_user(struct sqlx_sqlite3_s *sq3, struct oio_url_s *url) { if (!oio_url_has_fq_container (url)) return NEWERROR(CODE_BAD_REQUEST, "Partial URL"); static const gchar *sql = "INSERT INTO users ('cid','account','user') VALUES (?,?,?)"; GError *err = NULL; sqlite3_stmt *stmt = NULL; int rc; EXTRA_ASSERT(sq3 != NULL); EXTRA_ASSERT(sq3->db != NULL); /* Prepare the statement */ sqlite3_prepare_debug(rc, sq3->db, sql, -1, &stmt, NULL); if (rc != SQLITE_OK) err = M1_SQLITE_GERROR(sq3->db, rc); else { sqlite3_bind_blob(stmt, 1, oio_url_get_id(url), oio_url_get_id_size(url), NULL); sqlite3_bind_text(stmt, 2, oio_url_get(url, OIOURL_ACCOUNT), -1, NULL); sqlite3_bind_text(stmt, 3, oio_url_get(url, OIOURL_USER), -1, NULL); /* Run the results */ do { rc = sqlite3_step(stmt); } while (rc == SQLITE_ROW); if (rc != SQLITE_OK && rc != SQLITE_DONE) { err = M1_SQLITE_GERROR(sq3->db, rc); if (rc == SQLITE_CONSTRAINT) { g_prefix_error(&err, "Already created? "); err->code = CODE_CONTAINER_EXISTS; } } sqlite3_finalize_debug(rc, stmt); } if (err) GRID_DEBUG("User creation failed : (%d) %s", err->code, err->message); return err; } GError* __info_user(struct sqlx_sqlite3_s *sq3, struct oio_url_s *url, gboolean ac, struct oio_url_s ***result) { GError *err = NULL; sqlite3_stmt *stmt = NULL; GPtrArray *gpa; int rc; gboolean found; EXTRA_ASSERT(sq3 != NULL); EXTRA_ASSERT(sq3->db != NULL); EXTRA_ASSERT(url != NULL); retry: /* Prepare the statement */ sqlite3_prepare_debug(rc, sq3->db, "SELECT account,user FROM users WHERE cid = ?", -1, &stmt, NULL); if (rc != SQLITE_OK) return M1_SQLITE_GERROR(sq3->db, rc); (void) sqlite3_bind_blob(stmt, 1, oio_url_get_id (url), oio_url_get_id_size (url), NULL); /* Run the results */ found = FALSE; gpa = result ? g_ptr_array_new() : NULL; do { if (SQLITE_ROW == (rc = sqlite3_step(stmt))) { found = TRUE; if (!gpa) continue; struct oio_url_s *u = oio_url_empty (); oio_url_set (u, OIOURL_NS, oio_url_get (url, OIOURL_NS)); oio_url_set (u, OIOURL_ACCOUNT, (char*)sqlite3_column_text(stmt, 0)); oio_url_set (u, OIOURL_USER, (char*)sqlite3_column_text(stmt, 1)); oio_url_set (u, OIOURL_HEXID, oio_url_get (url, OIOURL_HEXID)); g_ptr_array_add(gpa, u); } } while (rc == SQLITE_ROW); if (rc != SQLITE_DONE && rc != SQLITE_OK) { err = M1_SQLITE_GERROR(sq3->db, rc); g_prefix_error(&err, "DB error: "); } sqlite3_finalize_debug(rc,stmt); stmt = NULL; if (err) { if (gpa) { g_ptr_array_set_free_func (gpa, (GDestroyNotify)oio_url_clean); g_ptr_array_free (gpa, TRUE); } return err; } if (!found) { if (gpa) g_ptr_array_free (gpa, TRUE); if (ac) { ac = FALSE; /* do not retry */ err = __create_user (sq3, url); if (!err) goto retry; } return NEWERROR(CODE_USER_NOTFOUND, "no such container"); } if (gpa) *result = (struct oio_url_s**) metautils_gpa_to_array(gpa, TRUE); return NULL; }
GError * sqlx_cache_open_and_lock_base(sqlx_cache_t *cache, const hashstr_t *hname, gint *result) { gint bd; GError *err = NULL; sqlx_base_t *base = NULL; EXTRA_ASSERT(cache != NULL); EXTRA_ASSERT(hname != NULL); EXTRA_ASSERT(result != NULL); gint64 start = oio_ext_monotonic_time(); gint64 deadline = DEFAULT_CACHE_OPEN_TIMEOUT; if (cache->open_timeout > 0) deadline = cache->open_timeout; GRID_TRACE2("%s(%p,%s,%p) delay = %"G_GINT64_FORMAT, __FUNCTION__, (void*)cache, hname ? hashstr_str(hname) : "NULL", (void*)result, deadline); deadline += start; g_mutex_lock(&cache->lock); cache->used = TRUE; retry: bd = sqlx_lookup_id(cache, hname); if (bd < 0) { if (!(err = sqlx_base_reserve(cache, hname, &base))) { bd = base->index; *result = base->index; sqlx_base_debug("OPEN", base); } else { GRID_DEBUG("No base available for [%s] (%d %s)", hashstr_str(hname), err->code, err->message); if (sqlx_expire_first_idle_base(cache, 0) >= 0) { g_clear_error(&err); goto retry; } } } else { base = GET(cache, bd); gint64 now = oio_ext_monotonic_time (); if (now > deadline) { err = NEWERROR (CODE_UNAVAILABLE, "DB busy (after %"G_GINT64_FORMAT" ms)", (now - start) / G_TIME_SPAN_MILLISECOND); } else switch (base->status) { case SQLX_BASE_FREE: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); GRID_ERROR("free base referenced"); g_assert_not_reached(); break; case SQLX_BASE_IDLE: case SQLX_BASE_IDLE_HOT: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); sqlx_base_move_to_list(cache, base, SQLX_BASE_USED); base->count_open ++; base->owner = g_thread_self(); *result = base->index; break; case SQLX_BASE_USED: EXTRA_ASSERT(base->count_open > 0); EXTRA_ASSERT(base->owner != NULL); if (base->owner != g_thread_self()) { GRID_DEBUG("Base [%s] in use by another thread (%X), waiting...", hashstr_str(hname), oio_log_thread_id(base->owner)); /* The lock is held by another thread/request. XXX(jfs): do not use 'now' because it can be a fake clock */ g_cond_wait_until(base->cond, &cache->lock, g_get_monotonic_time() + oio_cache_period_cond_wait); goto retry; } base->owner = g_thread_self(); base->count_open ++; *result = base->index; break; case SQLX_BASE_CLOSING: EXTRA_ASSERT(base->owner != NULL); /* Just wait for a notification then retry XXX(jfs): do not use 'now' because it can be a fake clock */ g_cond_wait_until(base->cond, &cache->lock, g_get_monotonic_time() + oio_cache_period_cond_wait); goto retry; } } if (base) { if (!err) { sqlx_base_debug(__FUNCTION__, base); EXTRA_ASSERT(base->owner == g_thread_self()); EXTRA_ASSERT(base->count_open > 0); } g_cond_signal(base->cond); } g_mutex_unlock(&cache->lock); return err; }
static enum http_rc_e _registration (struct req_args_s *args, enum reg_op_e op, struct json_object *jsrv) { GError *err; if (!jsrv || !json_object_is_type (jsrv, json_type_object)) return _reply_common_error (args, BADREQ("Expected: json object")); if (!push_queue) return _reply_bad_gateway(args, SYSERR("Service upstream disabled")); if (NULL != (err = _cs_check_tokens(args))) return _reply_notfound_error (args, err); struct service_info_s *si = NULL; err = service_info_load_json_object (jsrv, &si, TRUE); if (err) { g_prefix_error (&err, "JSON error: "); if (err->code == CODE_BAD_REQUEST) return _reply_format_error (args, err); else return _reply_system_error (args, err); } if (!si->type[0]) { service_info_clean (si); return _reply_format_error (args, BADREQ("Service type not specified")); } if (!si->ns_name[0]) { GRID_TRACE2("%s NS forced to %s", __FUNCTION__, si->ns_name); g_strlcpy (si->ns_name, nsname, sizeof(si->ns_name)); } else if (!validate_namespace (si->ns_name)) { service_info_clean (si); return _reply_format_error (args, BADNS()); } gchar *k = service_info_key (si); STRING_STACKIFY(k); GRID_TRACE2("%s op=%s score=%d key=[%s]", __FUNCTION__, _regop_2str(op), si->score.value, k); switch (op) { case REGOP_PUSH: si->score.value = SCORE_UNSET; if (!service_is_known (k)) { service_learn (k); service_tag_set_value_boolean (service_info_ensure_tag ( si->tags, NAME_TAGNAME_RAWX_FIRST), TRUE); } break; case REGOP_LOCK: si->score.value = CLAMP(si->score.value, SCORE_DOWN, SCORE_MAX); break; case REGOP_UNLOCK: si->score.value = SCORE_UNLOCK; break; default: g_assert_not_reached(); } if (cs_expire_local_services > 0) { struct service_info_s *v = service_info_dup (si); v->score.timestamp = oio_ext_monotonic_seconds (); PUSH_DO( const struct service_info_s *si0 = lru_tree_get(srv_registered, k); if (si0) v->score.value = si0->score.value; lru_tree_insert (srv_registered, g_strdup(k), v); ); }
static GError* _cache_load_from_m0(struct meta1_prefixes_set_s *m1ps, const gchar *ns_name, const struct addr_info_s *local_addr, struct addr_info_s *m0_addr, GArray **updated_prefixes, gboolean *meta0_ok) { GError *err = NULL; GSList *m0info_list = NULL; EXTRA_ASSERT(m1ps != NULL); GRID_TRACE2("%s(%p,%s,%p,%p)", __FUNCTION__, m1ps, ns_name, local_addr, m0_addr); (void)ns_name; gchar m0[STRLEN_ADDRINFO]; grid_addrinfo_to_string (m0_addr, m0, sizeof(m0)); err = meta0_remote_get_meta1_all(m0, &m0info_list); if (err) { g_prefix_error(&err, "Remote error: "); return err; } if (!m0info_list) { GRID_DEBUG("META0 has no prefix configured!"); return NULL; } *meta0_ok = TRUE; guint8 *cache = _cache_from_m0l(m0info_list, local_addr); GPtrArray *by_prefix = meta0_utils_list_to_array(m0info_list); g_mutex_lock(&m1ps->lock); GRID_DEBUG("Got %u prefixes from M0, %u in place", by_prefix->len, m1ps->by_prefix ? m1ps->by_prefix->len : 0); if ( m1ps->by_prefix ) { guint prefix; *updated_prefixes = g_array_new(FALSE, FALSE, sizeof(guint16)); for( prefix=0 ; prefix <65536 ;prefix++) { if ( _cache_is_managed(m1ps->cache,(guint8 *)&prefix) != _cache_is_managed( cache,(guint8 *)&prefix)) { g_array_append_vals(*updated_prefixes, &prefix, 1); } } } SWAP_PTR(m1ps->by_prefix, by_prefix); SWAP_PTR(m1ps->cache, cache); g_mutex_unlock(&m1ps->lock); if (by_prefix) meta0_utils_array_clean(by_prefix); by_prefix = NULL; if (cache) g_free(cache); cache = NULL; g_slist_foreach(m0info_list, meta0_info_gclean, NULL); g_slist_free(m0info_list); return NULL; }
GError * sqlx_cache_open_and_lock_base(sqlx_cache_t *cache, const hashstr_t *hname, gint *result) { gint bd; GError *err = NULL; sqlx_base_t *base = NULL; GRID_TRACE2("%s(%p,%s,%p)", __FUNCTION__, (void*)cache, hname ? hashstr_str(hname) : "NULL", (void*)result); EXTRA_ASSERT(cache != NULL); EXTRA_ASSERT(hname != NULL); EXTRA_ASSERT(result != NULL); gint64 deadline = g_get_monotonic_time(); if (cache->open_timeout >= 0) { deadline += cache->open_timeout * G_TIME_SPAN_MILLISECOND; } else { deadline += 5 * G_TIME_SPAN_MINUTE; } g_mutex_lock(&cache->lock); cache->used = TRUE; retry: bd = sqlx_lookup_id(cache, hname); if (bd < 0) { if (!(err = sqlx_base_reserve(cache, hname, &base))) { bd = base->index; *result = base->index; sqlx_base_debug("OPEN", base); } else { GRID_DEBUG("No base available for [%s] (%d %s)", hashstr_str(hname), err->code, err->message); if (sqlx_expire_first_idle_base(cache, NULL) >= 0) { g_clear_error(&err); goto retry; } } } else { base = GET(cache, bd); switch (base->status) { case SQLX_BASE_FREE: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); GRID_ERROR("free base referenced"); g_assert_not_reached(); break; case SQLX_BASE_IDLE: case SQLX_BASE_IDLE_HOT: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); sqlx_base_move_to_list(cache, base, SQLX_BASE_USED); base->count_open ++; base->owner = g_thread_self(); *result = base->index; break; case SQLX_BASE_USED: EXTRA_ASSERT(base->count_open > 0); EXTRA_ASSERT(base->owner != NULL); if (base->owner != g_thread_self()) { GRID_DEBUG("Base [%s] in use by another thread (%X), waiting...", hashstr_str(hname), oio_log_thread_id(base->owner)); // The lock is held by another thread/request if (g_cond_wait_until(base->cond, &cache->lock, deadline)) { GRID_DEBUG("Retrying to open [%s]", hashstr_str(hname)); goto retry; } else { if (cache->open_timeout > 0) { err = NEWERROR(CODE_UNAVAILABLE, "database currently in use by another request" " (we waited %ldms)", cache->open_timeout); } else { err = NEWERROR(CODE_UNAVAILABLE, "database currently in use by another request"); } GRID_DEBUG("failed to open base: " "in use by another request (thread %X)", oio_log_thread_id(base->owner)); break; } } base->owner = g_thread_self(); base->count_open ++; *result = base->index; break; case SQLX_BASE_CLOSING: EXTRA_ASSERT(base->owner != NULL); // Just wait for a notification then retry if (g_cond_wait_until(base->cond, &cache->lock, deadline)) goto retry; else { err = NEWERROR(CODE_UNAVAILABLE, "Database stuck in closing state"); break; } } } if (base) { if (!err) { sqlx_base_debug(__FUNCTION__, base); EXTRA_ASSERT(base->owner == g_thread_self()); EXTRA_ASSERT(base->count_open > 0); } g_cond_signal(base->cond); } g_mutex_unlock(&cache->lock); return err; }