GError* m2v2_remote_execute_DESTROY_many(gchar **targets, struct oio_url_s *url, guint32 flags) { if (!targets) return NEWERROR(CODE_INTERNAL_ERROR, "invalid target array (NULL)"); // TODO: factorize with sqlx_remote_execute_DESTROY_many GByteArray *req = m2v2_remote_pack_DESTROY(url, flags | M2V2_DESTROY_LOCAL); struct gridd_client_s **clients = gridd_client_create_many(targets, req, NULL, NULL); metautils_gba_unref(req); req = NULL; if (clients == NULL) return NEWERROR(0, "Failed to create gridd clients"); gridd_clients_start(clients); GError *err = gridd_clients_loop(clients); for (struct gridd_client_s **p = clients; !err && p && *p ;p++) { if (!(err = gridd_client_error(*p))) continue; GRID_DEBUG("Database destruction attempts failed: (%d) %s", err->code, err->message); if (err->code == CODE_CONTAINER_NOTFOUND || err->code == CODE_NOT_FOUND) { g_clear_error(&err); continue; } } gridd_clients_free(clients); return err; }
GError* meta2_backend_poll_service(struct meta2_backend_s *m2, const gchar *type, struct service_info_s **si) { struct grid_lb_iterator_s *iter; EXTRA_ASSERT(m2 != NULL); EXTRA_ASSERT(type != NULL); EXTRA_ASSERT(si != NULL); if (!(iter = grid_lbpool_get_iterator(m2->backend.lb, type))) return NEWERROR(CODE_SRVTYPE_NOTMANAGED, "no such service"); struct lb_next_opt_ext_s opt_ext; memset(&opt_ext, 0, sizeof(opt_ext)); opt_ext.req.distance = 0; opt_ext.req.max = 1; opt_ext.req.duplicates = TRUE; opt_ext.req.stgclass = NULL; opt_ext.req.strict_stgclass = TRUE; struct service_info_s **siv = NULL; if (!grid_lb_iterator_next_set2(iter, &siv, &opt_ext)) return NEWERROR(CODE_SRVTYPE_NOTMANAGED, "no service available"); *si = service_info_dup(siv[0]); service_info_cleanv(siv, FALSE); return NULL; }
static GError* _check(GList *working_m1list) { GError *error = NULL; if ( working_m1list ) { working_m1list=g_list_sort(working_m1list,meta0_assign_sort_by_score); struct meta0_assign_meta1_s *hM1 = working_m1list->data; struct meta0_assign_meta1_s *lM1 = (g_list_last(working_m1list))->data; guint highscore = hM1->score; guint lowscore = lM1->score; GRID_TRACE("check delta highscore %d ,lowscore %d",highscore,lowscore); if ( (highscore - lowscore) < (context->avgscore * trigger_assignment )/ 100 ) { GRID_WARN("New assign not necessary, high score %d , low score %d, average %d", highscore, lowscore, context->avgscore); error = NEWERROR(0, "New assign not necessary"); return error; } } if ( context->lastAssignTime ) { GRID_TRACE("last time %s",g_date_time_format (context->lastAssignTime,"%Y-%m-%d %H:%M")); GDateTime *currentTime, *ltime; currentTime=g_date_time_new_now_local(); ltime = g_date_time_add_minutes(context->lastAssignTime,period_between_two_assign); GRID_TRACE("currentTime :%s , last time + %d min :%s, comp :%d",g_date_time_format (currentTime,"%Y-%m-%d %H:%M"),period_between_two_assign,g_date_time_format (ltime,"%Y-%m-%d %H:%M"), g_date_time_compare(ltime,currentTime)); if (g_date_time_compare(ltime,currentTime) > 0 ) { GRID_WARN("delay between two meta1 assign not respected. Try later. last date [%s]",g_date_time_format (context->lastAssignTime,"%Y-%m-%d %H:%M")); error = NEWERROR(0,"delay between two meta1 assign not respected. Try later."); return error; } } return NULL; }
void org(int size, char *label, char *op, int *errorPtr) { int newLoc; char backRef; char *eval(); if (size) NEWERROR(*errorPtr, INV_SIZE_CODE); if (!*op) { NEWERROR(*errorPtr, SYNTAX); return; } op = eval(op, &newLoc, &backRef, errorPtr); if (*errorPtr < SEVERE && !backRef) { NEWERROR(*errorPtr, INV_FORWARD_REF); } else if (*errorPtr < ERROR) { if (isspace(*op) || !*op) { /* Check for an odd value, adjust to one higher */ if (newLoc & 1) { NEWERROR(*errorPtr, ODD_ADDRESS); newLoc++; } loc = newLoc; /* Define the label attached to this directive, if any */ if (*label) define(label, loc, pass2, errorPtr); /* Show new location counter on listing */ listLoc(); } else NEWERROR(*errorPtr, SYNTAX); } }
static int _meta2_filter_check_ns_name(struct gridd_filter_ctx_s *ctx, struct gridd_reply_ctx_s *reply, int optional) { (void) reply; TRACE_FILTER(); const struct meta2_backend_s *backend = meta2_filter_ctx_get_backend(ctx); const char *req_ns = oio_url_get(meta2_filter_ctx_get_url(ctx), OIOURL_NS); if (!backend || !backend->ns_name[0]) { GRID_DEBUG("Missing information for namespace checking"); meta2_filter_ctx_set_error(ctx, NEWERROR(CODE_INTERNAL_ERROR, "Missing backend information, cannot check namespace")); return FILTER_KO; } if (!req_ns) { if (optional) return FILTER_OK; GRID_DEBUG("Missing namespace name in request"); meta2_filter_ctx_set_error(ctx, NEWERROR(CODE_BAD_REQUEST, "Bad Request: Missing namespace name information")); return FILTER_KO; } if (0 != g_ascii_strcasecmp(backend->ns_name, req_ns)) { meta2_filter_ctx_set_error(ctx, NEWERROR(CODE_BAD_REQUEST, "Request namespace [%s] does not match server namespace [%s]", req_ns, backend->ns_name)); return FILTER_KO; } return FILTER_OK; }
/********************************************************************** * * Function branch builds the following instructions: * BCC (BHS) BGT BLT BRA * BCS (BLO) BHI BMI BSR * BEQ BLE BNE BVC * BGE BLS BPL BVS * ***********************************************************************/ int branch(int mask, int size, opDescriptor *source, opDescriptor *dest, int *errorPtr) { bool shortDisp; int disp; disp = source->data - loc - 2; shortDisp = false; if ( ((size == SHORT_SIZE) || (size == BYTE_SIZE_M)) || (size != LONG_SIZE && size != WORD_SIZE && source->backRef && disp >= -128 && disp <= 127 && disp)) shortDisp = true; if (pass2) { if (shortDisp) { output((int) (mask | (disp & 0xFF)), WORD_SIZE); loc += 2; if (disp < -128 || disp > 127 || !disp) NEWERROR(*errorPtr, INV_BRANCH_DISP); } else { output((int) (mask), WORD_SIZE); loc += 2; output((int) (disp), WORD_SIZE); loc += 2; if (disp < -32768 || disp > 32767) NEWERROR(*errorPtr, INV_BRANCH_DISP); } } else loc += (shortDisp) ? 2 : 4; return NORMAL; }
GError * gridd_client_exec_and_concat (const gchar *to, gdouble seconds, GByteArray *req, GByteArray **out) { if (!to) { g_byte_array_unref (req); return NEWERROR(CODE_INTERNAL_ERROR, "No target"); } GByteArray *tmp = NULL; if (out) tmp = g_byte_array_new(); struct gridd_client_s *client = gridd_client_create(to, req, out ? tmp : NULL, out ? (client_on_reply)_cb_exec_and_concat : NULL); g_byte_array_unref (req); if (!client) { if (tmp) g_byte_array_free (tmp, TRUE); return NEWERROR(CODE_INTERNAL_ERROR, "client creation"); } if (seconds > 0.0) gridd_client_set_timeout (client, seconds); GError *err = gridd_client_run (client); gridd_client_free (client); if (!err && out) { *out = tmp; tmp = NULL; } if (tmp) metautils_gba_unref (tmp); return err; }
static GError* _poll_services(struct grid_lbpool_s *lbp, const gchar *srvtype, struct lb_next_opt_ext_s *opt_ext, GSList **result, gboolean use_beans) { struct grid_lb_iterator_s *iter = NULL; struct service_info_s **psi, **siv = NULL; if (!lbp || !srvtype) return NEWERROR(CODE_INTERNAL_ERROR, "Invalid parameter"); if (!(iter = grid_lbpool_get_iterator(lbp, srvtype))) return NEWERROR(CODE_POLICY_NOT_SATISFIABLE, "No RAWX available"); if (!grid_lb_iterator_next_set2(iter, &siv, opt_ext)) return NEWERROR(CODE_PLATFORM_ERROR, "Cannot get services" " list for the specified storage policy"); if (use_beans) { for(psi=siv; *psi; ++psi) *result = g_slist_prepend(*result, _gen_chunk_bean(*psi)); } else { for(psi=siv; *psi; ++psi) *result = g_slist_prepend(*result, _gen_chunk_info(*psi)); } service_info_cleanv(siv, FALSE); return NULL; }
static GError* location_from_chunk_id(const gchar *chunk_id, const gchar *ns_name, struct oio_lb_pool_s *pool, oio_location_t *location) { g_assert_nonnull(location); GError *err = NULL; if (chunk_id == NULL || strlen(chunk_id) <= 0) return NEWERROR(CODE_INTERNAL_ERROR, "emtpy chunk id"); gchar *netloc = NULL; oio_parse_chunk_url(chunk_id, NULL, &netloc, NULL); if (pool) { gchar *key = oio_make_service_key(ns_name, NAME_SRVTYPE_RAWX, netloc); struct oio_lb_item_s *item = oio_lb_pool__get_item(pool, key); g_free(key); if (item) { *location = item->location; g_free(item); goto out; } } addr_info_t ai = {{0}}; if (!err && !grid_string_to_addrinfo(netloc, &ai)) err = NEWERROR(CODE_INTERNAL_ERROR, "could not parse [%s] to addrinfo", netloc); if (!err) *location = location_from_addr_info(&ai); out: g_free(netloc); return err; }
GError* zsocket_resolve(const gchar *zname, int *ztype) { static struct named_type_s { const gchar *zname; int ztype; } defs[] = { {"PUB", ZMQ_PUB}, {"SUB", ZMQ_SUB}, {"PUSH", ZMQ_PUSH}, {"PULL", ZMQ_PULL}, {NULL,0} }; g_assert(zname != NULL); g_assert(ztype != NULL); if (!g_str_has_prefix(zname, "zmq:")) return NEWERROR(EINVAL, "Invalid ZMQ socket type [%s]", zname); zname += sizeof("zmq:")-1; for (struct named_type_s *nt = defs; nt->zname ;++nt) { if (!g_ascii_strcasecmp(zname, nt->zname)) { *ztype = nt->ztype; return NULL; } } return NEWERROR(EINVAL, "Invalid ZMQ socket type [%s]", zname); }
static GError * _jarray_to_beans (GSList **out, struct json_object *jv, jbean_mapper map) { if (!json_object_is_type(jv, json_type_array)) return NEWERROR(CODE_BAD_REQUEST, "Invalid JSON, exepecting array of beans"); GSList *l = NULL; int vlen = json_object_array_length (jv); for (int i=0; i<vlen ;++i) { struct json_object *j = json_object_array_get_idx (jv, i); if (!json_object_is_type (j, json_type_object)) return NEWERROR(CODE_BAD_REQUEST, "Invalid JSON for a bean"); gpointer bean = NULL; GError *err = map(j, &bean); EXTRA_ASSERT((bean != NULL) ^ (err != NULL)); if (err) { _bean_cleanl2 (l); return err; } l = g_slist_prepend(l, bean); } *out = g_slist_reverse (l); return NULL; }
enum http_rc_e action_dir_srv_unlink (struct req_args_s *args) { const gchar *type = TYPE(); if (!type) return _reply_format_error (args, NEWERROR( CODE_BAD_REQUEST, "No service type provided")); GError *hook (const gchar * m1) { struct addr_info_s m1a; if (!grid_string_to_addrinfo (m1, NULL, &m1a)) return NEWERROR (CODE_NETWORK_ERROR, "Invalid M1 address"); GError *err = NULL; meta1v2_remote_unlink_service (&m1a, &err, args->url, type); return err; } GError *err = _m1_locate_and_action (args, hook); if (!err || CODE_IS_NETWORK_ERROR(err->code)) { /* Also decache on timeout, a majority of request succeed, * and it will probably silently succeed */ hc_decache_reference_service (resolver, args->url, type); } if (!err) return _reply_success_json (args, NULL); return _reply_common_error (args, err); }
static GError * _body_parse_error (GString *b) { g_assert (b != NULL); struct json_tokener *tok = json_tokener_new (); struct json_object *jbody = json_tokener_parse_ex (tok, b->str, b->len); json_tokener_free (tok); tok = NULL; if (!jbody) return NEWERROR(0, "No error explained"); struct json_object *jcode, *jmsg; struct oio_ext_json_mapping_s map[] = { {"status", &jcode, json_type_int, 0}, {"message", &jmsg, json_type_string, 0}, {NULL, NULL, 0, 0} }; GError *err = oio_ext_extract_json(jbody, map); if (!err) { int code = 0; const char *msg = "Unknown error"; if (jcode) code = json_object_get_int64 (jcode); if (jmsg) msg = json_object_get_string (jmsg); err = NEWERROR(code, "(code=%d) %s", code, msg); } json_object_put (jbody); return err; }
static int _connect(const gchar *url, GError **err) { struct addr_info_s ai; memset(&ai, 0, sizeof(ai)); if (!grid_string_to_addrinfo(url, NULL, &ai)) { *err = NEWERROR(EINVAL, "invalid URL"); return -1; } if (!ai.port) { *err = NEWERROR(EINVAL, "no port"); return -1; } int fd = addrinfo_connect_nopoll(&ai, 1000, err); if (0 > fd) return -1; sock_set_linger_default(fd); sock_set_nodelay(fd, TRUE); sock_set_tcpquickack(fd, TRUE); *err = NULL; return fd; }
GError * sqlx_cache_unlock_and_close_base(sqlx_cache_t *cache, gint bd, gboolean force) { GError *err = NULL; GRID_TRACE2("%s(%p,%d,%d)", __FUNCTION__, (void*)cache, bd, force); EXTRA_ASSERT(cache != NULL); if (base_id_out(cache, bd)) return NEWERROR(CODE_INTERNAL_ERROR, "invalid base id=%d", bd); g_mutex_lock(&cache->lock); cache->used = TRUE; sqlx_base_t *base; base = GET(cache,bd); switch (base->status) { case SQLX_BASE_FREE: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); err = NEWERROR(CODE_INTERNAL_ERROR, "base not used"); break; case SQLX_BASE_IDLE: case SQLX_BASE_IDLE_HOT: EXTRA_ASSERT(base->count_open == 0); EXTRA_ASSERT(base->owner == NULL); err = NEWERROR(CODE_INTERNAL_ERROR, "base closed"); break; case SQLX_BASE_USED: EXTRA_ASSERT(base->count_open > 0); // held by the current thread if (!(-- base->count_open)) { // to be closed if (force) { _expire_base(cache, base); } else { sqlx_base_debug("CLOSING", base); base->owner = NULL; if (base->heat >= cache->heat_threshold) sqlx_base_move_to_list(cache, base, SQLX_BASE_IDLE_HOT); else sqlx_base_move_to_list(cache, base, SQLX_BASE_IDLE); } } break; case SQLX_BASE_CLOSING: EXTRA_ASSERT(base->owner != NULL); EXTRA_ASSERT(base->owner != g_thread_self()); err = NEWERROR(CODE_INTERNAL_ERROR, "base being closed"); break; } if (base && !err) sqlx_base_debug(__FUNCTION__, base); g_cond_signal(base->cond); g_mutex_unlock(&cache->lock); return err; }
static GError * _local_factory_open (struct oio_sqlx_client_factory_s *self, const struct oio_url_s *u, struct oio_sqlx_client_s **out) { g_assert (self != NULL); struct oio_sqlx_client_factory_LOCAL_s *factory = (struct oio_sqlx_client_factory_LOCAL_s*) self; g_assert (factory->vtable == &vtable_factory_LOCAL); g_assert (out != NULL); g_assert (u != NULL); sqlite3 *db = NULL; int flags = SQLITE_OPEN_NOMUTEX|SQLITE_OPEN_READWRITE|SQLITE_OPEN_CREATE; int rc = sqlite3_open_v2(":memory:", &db, flags, NULL); if (rc != SQLITE_OK) { sqlite3_close (db); return NEWERROR(CODE_INTERNAL_ERROR, "DB ERROR (open): (%d) %s", rc, sqlite3_errmsg(db)); } /* apply the schema */ rc = _sqlx_exec (db, factory->schema); if (SQLITE_OK != rc && SQLITE_DONE != rc) { sqlite3_close (db); return NEWERROR(CODE_INTERNAL_ERROR, "DB ERROR (schema): (%d) %s", rc, sqlite3_errmsg(db)); } struct oio_sqlx_client_LOCAL_s *s = SLICE_NEW0 (struct oio_sqlx_client_LOCAL_s); s->vtable = &vtable_LOCAL; s->db = db; *out = (struct oio_sqlx_client_s*) s; return NULL; }
/*********************************************************************** * OFFSET directive. (ck) ***********************************************************************/ int offset(int size, char *label, char *op, int *errorPtr) { int newLoc; bool backRef; if (size) NEWERROR(*errorPtr, INV_SIZE_CODE); if (!*op) { NEWERROR(*errorPtr, SYNTAX); return NORMAL; } if (*label) // if label NEWERROR(*errorPtr, LABEL_ERROR); op = eval(op, &newLoc, &backRef, errorPtr); if (*errorPtr < SEVERE && !backRef) { NEWERROR(*errorPtr, INV_FORWARD_REF); } else if (*errorPtr < ERRORN) { if (isspace(*op) || !*op) { if (!offsetMode) { // if not currently processing an Offset directive locOffset = loc; // save current location offsetMode = true; } loc = newLoc; // Show new location counter on listing listLoc(); } else NEWERROR(*errorPtr, SYNTAX); } return NORMAL; }
static void split_chunk_url(const gchar *url, gchar **host, gint *port, gchar **chunk_hexid, GError **err) { gchar **toks = NULL; gchar **hp = NULL; toks = g_strsplit(url + 7, "/", 2); // skip "http://" and get "host:port" if (!toks || g_strv_length(toks) != 2) { if (err) *err = NEWERROR(0, "Unparsable chunk URL format: '%s'", url); goto end; } hp = g_strsplit(toks[0], ":", 2); // split host and port if (!hp || g_strv_length(hp) != 2) { if (err) *err = NEWERROR(0, "Could not extract host and port: '%s'", toks[0]); goto end; } *host = g_strdup(hp[0]); *port = atoi(hp[1]); *chunk_hexid = g_strdup(strrchr(url, '/')); end: g_strfreev(hp); g_strfreev(toks); }
void equ(int size, char *label, char *op, int *errorPtr) { int value; char backRef; char *eval(); if (size) NEWERROR(*errorPtr, INV_SIZE_CODE); if (!*op) { NEWERROR(*errorPtr, SYNTAX); return; } op = eval(op, &value, &backRef, errorPtr); if (*errorPtr < SEVERE && !backRef) { NEWERROR(*errorPtr, INV_FORWARD_REF); } else if (*errorPtr < ERROR) { if (isspace(*op) || !*op) if (!*label) { NEWERROR(*errorPtr, LABEL_REQUIRED); } else { define(label, value, pass2, errorPtr); if (pass2 && listFlag && *errorPtr < MINOR) { sprintf(listPtr, "=%08X ", value); listPtr += 10; } } else NEWERROR(*errorPtr, SYNTAX); } }
static GError * _get_peers(struct sqlx_service_s *ss, struct sqlx_name_s *n, gboolean nocache, gchar ***result) { (void) nocache; GSList *peers; GError *err; if (!n || !result) return NEWERROR(CODE_INTERNAL_ERROR, "BUG [%s:%s:%d]", __FUNCTION__, __FILE__, __LINE__); if (g_ascii_strcasecmp(n->type, NAME_SRVTYPE_META0)) return NEWERROR(CODE_BAD_REQUEST, "Invalid type name"); if (g_ascii_strcasecmp(n->base, ss->ns_name)) return NEWERROR(CODE_BAD_REQUEST, "Invalid base name, expected [%s]", ss->ns_name); err = list_zk_children_node(m0zkmanager, NULL, &peers); if (err) { g_slist_free_full(peers, g_free); *result = NULL; g_prefix_error(&err, "ZooKeeper error: "); return err; } if (!(*result = strv_filter(ss, peers))) return NEWERROR(CODE_CONTAINER_NOTFOUND, "Base not managed"); return NULL; }
GError * metautils_message_extract_strint64(MESSAGE msg, const gchar *n, gint64 *i64) { gchar *end, dst[24]; EXTRA_ASSERT (i64 != NULL); *i64 = 0; memset(dst, 0, sizeof(dst)); GError *err = metautils_message_extract_string(msg, n, dst, sizeof(dst)); if (err != NULL) { g_prefix_error(&err, "field: "); return err; } end = NULL; *i64 = g_ascii_strtoll(dst, &end, 10); switch (*i64) { case G_MININT64: case G_MAXINT64: return (errno == ERANGE) ? NEWERROR(CODE_BAD_REQUEST, "Invalid number") : NULL; case 0: return (end == dst) ? NEWERROR(CODE_BAD_REQUEST, "Invalid number") : NULL; default: return NULL; } }
enum http_rc_e action_dir_srv_renew (struct req_args_s *args, struct json_object *jargs) { (void) jargs; const gchar *type = TYPE(); if (!type) return _reply_format_error (args, NEWERROR(CODE_BAD_REQUEST, "No service type provided")); gboolean autocreate = _request_has_flag (args, PROXYD_HEADER_MODE, "autocreate"); gboolean dryrun = _request_has_flag (args, PROXYD_HEADER_MODE, "dryrun"); gchar **urlv = NULL; GError *hook (const gchar * m1) { struct addr_info_s m1a; if (!grid_string_to_addrinfo (m1, NULL, &m1a)) return NEWERROR (CODE_NETWORK_ERROR, "Invalid M1 address"); GError *err = NULL; urlv = meta1v2_remote_poll_reference_service (&m1a, &err, args->url, type, dryrun, autocreate); return err; } GError *err = _m1_locate_and_action (args, hook); if (!err || CODE_IS_NETWORK_ERROR(err->code)) { /* Also decache on timeout, a majority of request succeed, * and it will probably silently succeed */ hc_decache_reference_service (resolver, args->url, type); } if (err) return _reply_common_error (args, err); EXTRA_ASSERT (urlv != NULL); return _reply_success_json (args, _pack_and_freev_m1url_list (NULL, urlv)); }
int sock_connect (const char *url, GError **err) { struct sockaddr_storage sas; gsize sas_len = sizeof(sas); if (!grid_string_to_sockaddr (url, (struct sockaddr*) &sas, &sas_len)) { g_error_transmit(err, NEWERROR(EINVAL, "invalid URL")); return -1; } int fd = socket_nonblock(sas.ss_family, SOCK_STREAM, 0); if (0 > fd) { g_error_transmit(err, NEWERROR(EINVAL, "socket error: (%d) %s", errno, strerror(errno))); return -1; } sock_set_reuseaddr(fd, TRUE); if (0 != metautils_syscall_connect (fd, (struct sockaddr*)&sas, sas_len)) { if (errno != EINPROGRESS && errno != 0) { g_error_transmit(err, NEWERROR(EINVAL, "connect error: (%d) %s", errno, strerror(errno))); metautils_pclose (&fd); return -1; } } sock_set_linger_default(fd); sock_set_nodelay(fd, TRUE); sock_set_tcpquickack(fd, TRUE); *err = NULL; return fd; }
static GError * _send_and_read_reply (int fd, struct iovec *iov, unsigned int iovcount) { if (!_send(fd, iov, iovcount)) return NEWERROR(CODE_NETWORK_ERROR, "send error: (%d) %s", errno, strerror(errno)); GError *err = NULL; guint8 buf[256]; int r = sock_to_read (fd, 1000, buf, sizeof(buf)-1, &err); if (r < 0) return NEWERROR(CODE_NETWORK_ERROR, "read error: (%d) %s", err->code, err->message); if (r == 0) return NEWERROR(CODE_NETWORK_ERROR, "read error: closed by peer: (%d) %s", errno, strerror(errno)); buf[r+1] = 0; if (!_is_success((gchar*) buf)) return NEWERROR(CODE_BAD_REQUEST, "reply error: unexpected"); return NULL; }
GError* service_info_from_chunk_id(struct grid_lbpool_s *glp, const gchar *chunk_id, service_info_t **srvinfo) { GError *err = NULL; struct service_info_s *si = NULL; if (chunk_id == NULL || strlen(chunk_id) <= 0) return NEWERROR(CODE_INTERNAL_ERROR, "emtpy chunk id"); // TODO FIXME Factorizes this with client/c/lib/loc_context.c and // TODO FIXME meta2v2/meta2_utils_lb.c, rawx-mover/src/main.c char **tok = g_regex_split_simple( "(([[:digit:]]{1,3}\\.){3}[[:digit:]]{1,3}:[[:digit:]]{1,5})", chunk_id, 0, 0); if (!tok || g_strv_length(tok) < 3) err = NEWERROR(CODE_INTERNAL_ERROR, "could not parse chunk id"); if (err == NULL) { si = grid_lbpool_get_service_from_url(glp, "rawx", tok[1]); if (si == NULL) err = NEWERROR(CODE_INTERNAL_ERROR, "unable to find service info from %s", tok[1]); else *srvinfo = si; } g_strfreev(tok); return err; }
char * instLookup(char *p, instruction * (*instPtrPtr), char *sizePtr, int *errorPtr) { char opcode[8]; int i, hi, lo, mid, cmp; i = 0; do { if (i < 7) opcode[i++] = *p; p++; } while (isalpha(*p)); opcode[i] = '\0'; if (*p == '.') if (isspace(p[2]) || !p[2]) { if (p[1] == 'B') *sizePtr = BYTE; else if (p[1] == 'W') *sizePtr = WORD; else if (p[1] == 'L') *sizePtr = LONG; else if (p[1] == 'S') *sizePtr = SHORT; else { *sizePtr = 0; NEWERROR(*errorPtr, INV_SIZE_CODE); } p += 2; } else { NEWERROR(*errorPtr, SYNTAX); return NULL; } else if (!isspace(*p) && *p) { NEWERROR(*errorPtr, SYNTAX); return NULL; } else *sizePtr = 0; lo = 0; hi = tableSize - 1; do { mid = (hi + lo) / 2; cmp = strcmp(opcode, instTable[mid].mnemonic); if (cmp > 0) lo = mid + 1; else if (cmp < 0) hi = mid - 1; } while (cmp && (hi >= lo)); if (!cmp) { *instPtrPtr = &instTable[mid]; return p; } else { NEWERROR(*errorPtr, INV_OPCODE); return NULL; } }
void dcb(int size, char *label, char *op, int *errorPtr) { int blockSize, blockVal, i; char *eval(); char backRef; if (size == SHORT) { NEWERROR(*errorPtr, INV_SIZE_CODE); size = WORD; } else if (!size) size = WORD; /* Move location counter to a word boundary and fix the listing if doing DCB.W or DCB.L (but not if doing DCB.B, so DCB.B's can be contiguous) */ if ((size & (WORD | LONG)) && (loc & 1)) { loc++; listLoc(); } /* Define the label attached to this directive, if any */ if (*label) define(label, loc, pass2, errorPtr); /* Evaluate the size of the block (in bytes, words, or longwords) */ op = eval(op, &blockSize, &backRef, errorPtr); if (*errorPtr < SEVERE && !backRef) { NEWERROR(*errorPtr, INV_FORWARD_REF); return; } if (*errorPtr > SEVERE) return; if (*op != ',') { NEWERROR(*errorPtr, SYNTAX); return; } if (blockSize < 0) { NEWERROR(*errorPtr, INV_LENGTH); return; } /* Evaluate the data to put in block */ op = eval(++op, &blockVal, &backRef, errorPtr); if (*errorPtr < SEVERE) { if (!isspace(*op) && *op) { NEWERROR(*errorPtr, SYNTAX); return; } /* On pass 2, output the block of values directly to the object file (without putting them in the listing) */ if (pass2) for (i = 0; i < blockSize; i++) { outputObj(loc, blockVal, size); loc += size; } else loc += blockSize * size; } }
GError* m2v2_json_load_single_header (struct json_object *j, gpointer *pbean) { GError *err = NULL; GByteArray *id = NULL, *hash = NULL; struct bean_CONTENTS_HEADERS_s *header = NULL; struct json_object *jid, *jhash, *jsize, *jctime, *jmtime, *jmethod, *jtype; struct oio_ext_json_mapping_s mapping[] = { {"id", &jid, json_type_string, 1}, {"hash", &jhash, json_type_string, 1}, {"size", &jsize, json_type_int, 1}, {"ctime", &jctime, json_type_int, 0}, {"mtime", &jmtime, json_type_int, 0}, {"chunk-method", &jmethod, json_type_string, 1}, {"mime-type", &jtype, json_type_string, 1}, {NULL, NULL, 0, 0} }; *pbean = NULL; if (NULL != (err = oio_ext_extract_json (j, mapping))) return err; id = metautils_gba_from_hexstring(json_object_get_string(jid)); if (!id) { err = NEWERROR(CODE_BAD_REQUEST, "Invalid header, not hexa id"); goto exit; } hash = metautils_gba_from_hexstring(json_object_get_string(jhash)); if (!hash || hash->len != 16) { err = NEWERROR(CODE_BAD_REQUEST, "Invalid header, not hexa16 hash"); goto exit; } header = _bean_create (&descr_struct_CONTENTS_HEADERS); CONTENTS_HEADERS_set2_id (header, id->data, id->len); CONTENTS_HEADERS_set2_hash (header, hash->data, hash->len); CONTENTS_HEADERS_set_size (header, json_object_get_int64(jsize)); if (jctime) CONTENTS_HEADERS_set_ctime (header, json_object_get_int64(jctime)); if (jmtime) CONTENTS_HEADERS_set_mtime (header, json_object_get_int64(jmtime)); CONTENTS_HEADERS_set2_chunk_method (header, json_object_get_string(jmethod)); CONTENTS_HEADERS_set2_mime_type (header, json_object_get_string(jtype)); *pbean = header; header = NULL; exit: metautils_gba_unref (id); metautils_gba_unref (hash); _bean_clean (header); return err; }
GError * metautils_message_extract_cid(MESSAGE msg, const gchar *n, container_id_t *cid) { gsize fsize = 0; void *f = metautils_message_get_field(msg, n, &fsize); if (!f || !fsize) return NEWERROR(CODE_BAD_REQUEST, "Missing container ID at '%s'", n); if (fsize != sizeof(container_id_t)) return NEWERROR(CODE_BAD_REQUEST, "Invalid container ID at '%s'", n); memcpy(cid, f, sizeof(container_id_t)); return NULL; }
static GError * _get_peers(struct sqlx_service_s *ss, struct sqlx_name_s *n, gboolean nocache, gchar ***result) { EXTRA_ASSERT(ss != NULL); EXTRA_ASSERT(result != NULL); gint retries = 1; gchar **peers = NULL; GError *err = NULL; gint64 seq = 1; struct oio_url_s *u = oio_url_empty (); oio_url_set(u, OIOURL_NS, ss->ns_name); if (!sqlx_name_extract (n, u, NAME_SRVTYPE_META2, &seq)) { oio_url_pclean (&u); return NEWERROR(CODE_BAD_REQUEST, "Invalid type name: '%s'", n->type); } retry: if (nocache) hc_decache_reference_service(ss->resolver, u, n->type); peers = NULL; err = hc_resolve_reference_service(ss->resolver, u, n->type, &peers); if (NULL != err) { g_prefix_error(&err, "Peer resolution error: "); oio_url_clean(u); return err; } gchar **out = filter_services_and_clean(ss, peers, seq, n->type); if (!out) { if (retries-- > 0) { nocache = TRUE; goto retry; } err = NEWERROR(CODE_CONTAINER_NOTFOUND, "Base not managed"); } if (err) { if (out) g_strfreev (out); *result = NULL; } else { *result = out; } oio_url_clean(u); return err; }