resource_service_fn_t *rs_simple_create(void *arg, tbx_inip_file_t *kf, char *section) { service_manager_t *ess = (service_manager_t *)arg; rs_simple_priv_t *rss; resource_service_fn_t *rs; //** Create the new RS list tbx_type_malloc_clear(rss, rs_simple_priv_t, 1); assert_result(apr_pool_create(&(rss->mpool), NULL), APR_SUCCESS); apr_thread_mutex_create(&(rss->lock), APR_THREAD_MUTEX_DEFAULT, rss->mpool); apr_thread_mutex_create(&(rss->update_lock), APR_THREAD_MUTEX_DEFAULT, rss->mpool); apr_thread_cond_create(&(rss->cond), rss->mpool); rss->rid_mapping = apr_hash_make(rss->mpool); rss->mapping_updates = apr_hash_make(rss->mpool); rss->ds = lookup_service(ess, ESS_RUNNING, ESS_DS); rss->da = lookup_service(ess, ESS_RUNNING, ESS_DA); //** Set the resource service fn ptrs tbx_type_malloc_clear(rs, resource_service_fn_t, 1); rs->priv = rss; rs->get_rid_config = rss_get_rid_config; rs->register_mapping_updates = rss_mapping_register; rs->unregister_mapping_updates = rss_mapping_unregister; rs->translate_cap_set = rss_translate_cap_set; rs->query_new = rs_query_base_new; rs->query_dup = rs_query_base_dup; rs->query_add = rs_query_base_add; rs->query_append = rs_query_base_append; rs->query_destroy = rs_query_base_destroy; rs->query_print = rs_query_base_print; rs->query_parse = rs_query_base_parse; rs->get_rid_value = rs_simple_get_rid_value; rs->data_request = rs_simple_request; rs->destroy_service = rs_simple_destroy; rs->type = RS_TYPE_SIMPLE; //** This is the file to use for loading the RID table rss->fname = tbx_inip_get_string(kf, section, "fname", NULL); rss->dynamic_mapping = tbx_inip_get_integer(kf, section, "dynamic_mapping", 0); rss->check_interval = tbx_inip_get_integer(kf, section, "check_interval", 300); rss->check_timeout = tbx_inip_get_integer(kf, section, "check_timeout", 60); rss->min_free = tbx_inip_get_integer(kf, section, "min_free", 100*1024*1024); //** Set the modify time to force a change rss->modify_time = 0; //** Load the RID table assert_result(_rs_simple_refresh(rs), 0); //** Launch the check thread tbx_thread_create_assert(&(rss->check_thread), NULL, rss_check_thread, (void *)rs, rss->mpool); return(rs); }
void mq_ongoing_host_inc(mq_ongoing_t *on, mq_msg_t *remote_host, char *my_id, int id_len, int heartbeat) { ongoing_hb_t *oh; ongoing_table_t *table; mq_msg_hash_t hash; char *remote_host_string; apr_thread_mutex_lock(on->lock); char *str = mq_address_to_string(remote_host); log_printf(5, "remote_host=%s\n", str); free(str); hash = mq_msg_hash(remote_host); table = apr_hash_get(on->table, &hash, sizeof(mq_msg_hash_t)); //** Look up the remote host if (tbx_log_level() > 5) { remote_host_string = mq_address_to_string(remote_host); log_printf(5, "remote_host=%s hb=%d table=%p\n", remote_host_string, heartbeat, table); free(remote_host_string); } if (table == NULL) { //** New host so add it tbx_type_malloc_clear(table, ongoing_table_t, 1); table->table = apr_hash_make(on->mpool); assert(table->table != NULL); table->remote_host = mq_msg_new(); mq_msg_append_msg(table->remote_host, remote_host, MQF_MSG_AUTO_FREE); table->remote_host_hash = hash; apr_hash_set(on->table, &(table->remote_host_hash), sizeof(mq_msg_hash_t), table); } table->count++; oh = apr_hash_get(table->table, my_id, id_len); //** Look up the id if (oh == NULL) { //** New host so add it tbx_type_malloc_clear(oh, ongoing_hb_t, 1); tbx_type_malloc(oh->id, char, id_len); memcpy(oh->id, my_id, id_len); oh->id_len = id_len; oh->heartbeat = heartbeat / on->send_divisor; if (oh->heartbeat < 1) oh->heartbeat = 1; if (tbx_log_level() > 5) { remote_host_string = mq_address_to_string(remote_host); log_printf(5, "remote_host=%s final hb=%d \n", remote_host_string, oh->heartbeat); free(remote_host_string); } oh->next_check = apr_time_now() + apr_time_from_sec(oh->heartbeat); apr_hash_set(table->table, oh->id, id_len, oh); }
tbx_ns_chksum_t *tbx_ns_chksum_new() { tbx_ns_chksum_t *nsc; tbx_type_malloc_clear(nsc, tbx_ns_chksum_t, 1); return(nsc); }
gop_thread_pool_context_t *gop_tp_context_create(char *tp_name, int min_threads, int max_threads, int max_recursion_depth) { // char buffer[1024]; gop_thread_pool_context_t *tpc; apr_interval_time_t dt; int i; log_printf(15, "count=%d\n", _tp_context_count); tbx_type_malloc_clear(tpc, gop_thread_pool_context_t, 1); if (tbx_atomic_inc(_tp_context_count) == 0) { apr_pool_create(&_tp_pool, NULL); apr_thread_mutex_create(&_tp_lock, APR_THREAD_MUTEX_DEFAULT, _tp_pool); thread_pool_stats_init(); } if (thread_local_depth_key == NULL) apr_threadkey_private_create(&thread_local_depth_key,_thread_pool_destructor, _tp_pool); tpc->pc = gop_hp_context_create(&_tp_base_portal); //** Really just used for the submit default_thread_pool_config(tpc); if (min_threads > 0) tpc->min_threads = min_threads; if (max_threads > 0) tpc->max_threads = max_threads + 1; //** Add one for the recursion depth starting offset being 1 tpc->recursion_depth = max_recursion_depth + 1; //** The min recusion normally starts at 1 so just slap an extra level and we don't care about 0|1 starting location tpc->max_concurrency = tpc->max_threads - tpc->recursion_depth; if (tpc->max_concurrency <= 0) { tpc->max_threads += 5 - tpc->max_concurrency; //** MAke sure we have at least 5 threads for work tpc->max_concurrency = tpc->max_threads - tpc->recursion_depth; log_printf(0, "Specified max threads and recursion depth don't work. Adjusting max_threads=%d\n", tpc->max_threads); } dt = tpc->min_idle * 1000000; assert_result(apr_thread_pool_create(&(tpc->tp), tpc->min_threads, tpc->max_threads, _tp_pool), APR_SUCCESS); apr_thread_pool_idle_wait_set(tpc->tp, dt); apr_thread_pool_threshold_set(tpc->tp, 0); tpc->name = (tp_name == NULL) ? NULL : strdup(tp_name); tbx_atomic_set(tpc->n_ops, 0); tbx_atomic_set(tpc->n_completed, 0); tbx_atomic_set(tpc->n_started, 0); tbx_atomic_set(tpc->n_submitted, 0); tbx_atomic_set(tpc->n_running, 0); tbx_type_malloc(tpc->overflow_running_depth, int, tpc->recursion_depth); tbx_type_malloc(tpc->reserve_stack, tbx_stack_t *, tpc->recursion_depth); for (i=0; i<tpc->recursion_depth; i++) { tpc->overflow_running_depth[i] = -1; tpc->reserve_stack[i] = tbx_stack_new(); } return(tpc); }
rss_rid_entry_t *rss_load_entry(tbx_inip_group_t *grp) { rss_rid_entry_t *rse; tbx_inip_element_t *ele; char *key, *value; // SO noisy //log_printf(0, "loading\n"); //** Create the new RS list tbx_type_malloc_clear(rse, rss_rid_entry_t, 1); rse->status = RS_STATUS_UP; rse->attr = tbx_list_create(1, &tbx_list_string_compare, tbx_list_string_dup, tbx_list_simple_free, tbx_list_simple_free); //** Now cycle through the attributes ele = tbx_inip_ele_first(grp); while (ele != NULL) { key = tbx_inip_ele_get_key(ele); value = tbx_inip_ele_get_value(ele); if (strcmp(key, "rid_key") == 0) { //** This is the RID so store it separate rse->rid_key = strdup(value); tbx_list_insert(rse->attr, key, rse->rid_key); //QWERTY tbx_list_insert(rse->attr, key, strdup(value)); } else if (strcmp(key, "ds_key") == 0) { //** This is what gets passed to the data service rse->ds_key = strdup(value); } else if (strcmp(key, "status") == 0) { //** Current status rse->status = atoi(value); } else if (strcmp(key, "space_free") == 0) { //** Free space rse->space_free = tbx_stk_string_get_integer(value); } else if (strcmp(key, "space_used") == 0) { //** Used bytes rse->space_used = tbx_stk_string_get_integer(value); } else if (strcmp(key, "space_total") == 0) { //** Total bytes rse->space_total = tbx_stk_string_get_integer(value); } else if ((key != NULL) && (value != NULL)) { //** Anything else is an attribute tbx_list_insert(rse->attr, key, strdup(value)); } log_printf(15, "rss_load_entry: key=%s value=%s\n", key, value); ele = tbx_inip_ele_next(ele); } //log_printf(0, "rse->ds_key=%s rse->attr=%p\n", rse->ds_key, rse->attr); //** Make sure we have an RID and DS link if ((rse->rid_key == NULL) || (rse->ds_key == NULL)) { log_printf(1, "rss_load_entry: missing RID or ds_key! rid=%s ds_key=%s\n", rse->rid_key, rse->ds_key); rs_simple_rid_free(rse); rse = NULL; } return(rse); }
lio_data_block_t *data_block_create_with_id(lio_data_service_fn_t *ds, ex_id_t id) { lio_data_block_t *b; tbx_type_malloc_clear(b, lio_data_block_t, 1); b->ds = ds; b->id = id; b->cap = ds_cap_set_create(b->ds); log_printf(15, "b->id=" XIDT " ref_count=%d b=%p\n", b->id, b->ref_count, b); return(b); }
thread_local_stats_t *_thread_local_stats_ptr() { thread_local_stats_t *my = NULL; apr_threadkey_private_get((void *)&my, thread_local_stats_key); if (my == NULL ) { tbx_type_malloc_clear(my, thread_local_stats_t, 1); apr_thread_mutex_lock(_tp_lock); my->concurrent_max = _tp_concurrent_max; memcpy(my->depth_concurrent, _tp_depth_concurrent_max, sizeof(_tp_depth_concurrent_max)); //** Set to the current global apr_thread_mutex_unlock(_tp_lock); apr_threadkey_private_set(my, thread_local_stats_key); } return(my); }
mq_socket_context_t *zero_socket_context_new() { mq_socket_context_t *ctx; tbx_type_malloc_clear(ctx, mq_socket_context_t, 1); ctx->arg = zctx_new(); assert(ctx->arg != NULL); zctx_set_linger(ctx->arg, 0); ctx->create_socket = zero_create_socket; ctx->destroy = zero_socket_context_destroy; //** Disable the CZMQ SIGINT/SIGTERM signale handler apr_signal(SIGINT, NULL); apr_signal(SIGTERM, NULL); return(ctx); }
int data_block_set_attr(lio_data_block_t *b, char *key, char *val) { lio_data_block_attr_t *attr; //** See if the key exists attr = db_find_key(b->attr_stack, key); if (attr == NULL) { //** See if we need to add the attribute tbx_type_malloc_clear(attr, lio_data_block_attr_t, 1); attr->key = strdup(key); } if (attr->value != NULL) free(attr->value); //** Free the old value attr->value = (val != NULL) ? strdup(val) : NULL; //** Store the new one if (b->attr_stack == NULL) b->attr_stack = tbx_stack_new(); tbx_stack_push(b->attr_stack, attr); return(0); }
mq_socket_t *zero_create_simple_router_socket(mq_socket_context_t *ctx) { mq_socket_t *s; tbx_type_malloc_clear(s, mq_socket_t, 1); s->type = MQ_SIMPLE_ROUTER; s->arg = zsocket_new((zctx_t *)ctx->arg, ZMQ_ROUTER); zsocket_set_linger(s->arg, 0); zsocket_set_sndhwm(s->arg, 100000); zsocket_set_rcvhwm(s->arg, 100000); s->destroy = zero_native_destroy; s->bind = zero_native_bind; s->connect = zero_native_connect; s->disconnect = zero_native_disconnect; s->poll_handle = zero_native_poll_handle; s->monitor = zero_native_monitor; s->send = zero_native_send; s->recv = zero_simple_router_recv; return(s); }
resource_service_fn_t *rs_remote_server_create(void *arg, tbx_inip_file_t *fd, char *section) { service_manager_t *ess = (service_manager_t *)arg; resource_service_fn_t *rs; rs_remote_server_priv_t *rsrs; rs_create_t *rs_create; mq_command_table_t *ctable; char *stype, *ctype; if (section == NULL) section = "rs_remote_server"; tbx_type_malloc_clear(rs, resource_service_fn_t, 1); tbx_type_malloc_clear(rsrs, rs_remote_server_priv_t, 1); rs->priv = (void *)rsrs; //** Make the locks and cond variables assert_result(apr_pool_create(&(rsrs->mpool), NULL), APR_SUCCESS); apr_thread_mutex_create(&(rsrs->lock), APR_THREAD_MUTEX_DEFAULT, rsrs->mpool); apr_thread_cond_create(&(rsrs->cond), rsrs->mpool); rsrs->pending = tbx_stack_new(); memset(&(rsrs->my_map_version), 0, sizeof(rsrs->my_map_version)); memset(&(rsrs->notify_map_version), 0, sizeof(rsrs->notify_map_version)); rsrs->notify_map_version.lock = rsrs->lock; rsrs->notify_map_version.cond = rsrs->cond; //** Get the host name we bind to rsrs->hostname= tbx_inip_get_string(fd, section, "address", NULL); //** Start the child RS. The update above should have dumped a RID config for it to load stype = tbx_inip_get_string(fd, section, "rs_local", NULL); if (stype == NULL) { //** Oops missing child RS log_printf(0, "ERROR: Mising child RS section=%s key=rs_local!\n", section); tbx_log_flush(); free(stype); abort(); } //** and load it ctype = tbx_inip_get_string(fd, stype, "type", RS_TYPE_SIMPLE); rs_create = lookup_service(ess, RS_SM_AVAILABLE, ctype); rsrs->rs_child = (*rs_create)(ess, fd, stype); if (rsrs->rs_child == NULL) { log_printf(1, "ERROR loading child RS! type=%s section=%s\n", ctype, stype); tbx_log_flush(); abort(); } free(ctype); free(stype); //** Get the MQC rsrs->mqc = lookup_service(ess, ESS_RUNNING, ESS_MQ); assert(rsrs->mqc != NULL); //** Make the server portal rsrs->server_portal = mq_portal_create(rsrs->mqc, rsrs->hostname, MQ_CMODE_SERVER); ctable = mq_portal_command_table(rsrs->server_portal); mq_command_set(ctable, RSR_GET_RID_CONFIG_KEY, RSR_GET_RID_CONFIG_SIZE, rs, rsrs_rid_config_cb); mq_command_set(ctable, RSR_GET_UPDATE_CONFIG_KEY, RSR_GET_UPDATE_CONFIG_SIZE, rs, rsrs_rid_config_cb); mq_command_set(ctable, RSR_ABORT_KEY, RSR_ABORT_SIZE, rs, rsrs_abort_cb); mq_portal_install(rsrs->mqc, rsrs->server_portal); //** Launch the config changes thread tbx_thread_create_assert(&(rsrs->monitor_thread), NULL, rsrs_monitor_thread, (void *)rs, rsrs->mpool); //** Set up the fn ptrs. This is just for syncing the rid configuration and state //** so very little is implemented rs->destroy_service = rs_remote_server_destroy; rs->type = RS_TYPE_REMOTE_SERVER; return(rs); }
void print_rid_summary(char *config, int base) { tbx_list_t *table; tbx_list_iter_t it; tbx_inip_group_t *ig; tbx_inip_file_t *kf; tbx_inip_element_t *ele; char *key, *value; char fbuf[20], ubuf[20], tbuf[20]; char *state[5] = { "UP ", "IGNORE ", "NO_SPACE", "DOWN ", "INVALID " }; int n, n_usable; rid_summary_t *rsum; ex_off_t space_total, space_free, space_used; ex_off_t up_total, up_free, up_used; space_total = space_free = space_used = 0; up_total = up_free = up_used = n_usable = 0; //** Create the table where we hold the info table = tbx_list_create(0, &tbx_list_string_compare, NULL, NULL, free); //** Open the file kf = tbx_inip_string_read(config); assert(kf); //** And load it ig = tbx_inip_group_first(kf); while (ig != NULL) { key = tbx_inip_group_get(ig); if (strcmp("rid", key) == 0) { //** Found a resource tbx_type_malloc_clear(rsum, rid_summary_t, 1); //** Now cycle through the attributes ele = tbx_inip_ele_first(ig); while (ele != NULL) { key = tbx_inip_ele_get_key(ele); value = tbx_inip_ele_get_value(ele); if (strcmp(key, "rid_key") == 0) { //** This is the RID so store it separate rsum->rid = value; } else if (strcmp(key, "ds_key") == 0) { //** Data service key rsum->ds_key = value; } else if (strcmp(key, "host") == 0) { //** Host rsum->host = value; } else if (strcmp(key, "status") == 0) { //** Free space sscanf(value, "%d", &n); rsum->status = ((n>=0) && (n<=3)) ? n : 4; } else if (strcmp(key, "space_free") == 0) { //** Free space sscanf(value, XOT, &(rsum->free)); space_free += rsum->free; } else if (strcmp(key, "space_used") == 0) { //** Used space sscanf(value, XOT, &(rsum->used)); space_used += rsum->used; } else if (strcmp(key, "space_total") == 0) { //** Total space sscanf(value, XOT, &(rsum->total)); space_total += rsum->total; } ele = tbx_inip_ele_next(ele); } if (rsum->status == 0) { n_usable++; up_free += rsum->free; up_used += rsum->used; up_total += rsum->total; } tbx_list_insert(table, rsum->rid, rsum); } ig = tbx_inip_group_next(ig); } //** Now print the summary printf(" RID State Host Used Free Total\n"); printf("-------------------- -------- ------------------------------ --------- --------- ---------\n"); it = tbx_list_iter_search(table, NULL, 0); while (tbx_list_next(&it, (tbx_list_key_t **)&key, (tbx_list_data_t **)&rsum) == 0) { printf("%-20s %8s %-30s %8s %8s %8s\n", rsum->rid, state[rsum->status], rsum->host, tbx_stk_pretty_print_double_with_scale(base, (double)rsum->used, ubuf), tbx_stk_pretty_print_double_with_scale(base, (double)rsum->free, fbuf), tbx_stk_pretty_print_double_with_scale(base, (double)rsum->total, tbuf)); } printf("-------------------------------------------------------------- --------- --------- ---------\n"); printf("Usable Resources:%4d %8s %8s %8s\n", n_usable, tbx_stk_pretty_print_double_with_scale(base, (double)up_used, ubuf), tbx_stk_pretty_print_double_with_scale(base, (double)up_free, fbuf), tbx_stk_pretty_print_double_with_scale(base, (double)up_total, tbuf)); printf("Total Resources: %4d %8s %8s %8s\n", tbx_list_key_count(table), tbx_stk_pretty_print_double_with_scale(base, (double)space_used, ubuf), tbx_stk_pretty_print_double_with_scale(base, (double)space_free, fbuf), tbx_stk_pretty_print_double_with_scale(base, (double)space_total, tbuf)); tbx_list_destroy(table); //** Close the file tbx_inip_destroy(kf); }
op_generic_t *rs_simple_request(resource_service_fn_t *arg, data_attr_t *da, rs_query_t *rsq, data_cap_set_t **caps, rs_request_t *req, int req_size, rs_hints_t *hints_list, int fixed_size, int n_rid, int ignore_fixed_err, int timeout) { rs_simple_priv_t *rss = (rs_simple_priv_t *)arg->priv; rsq_base_t *query_global = (rsq_base_t *)rsq; rsq_base_t *query_local; kvq_table_t kvq_global, kvq_local, *kvq; apr_hash_t *pick_from; rid_change_entry_t *rid_change; ex_off_t change; op_status_t status; opque_t *que; rss_rid_entry_t *rse; rsq_base_ele_t *q; int slot, rnd_off, i, j, k, i_unique, i_pickone, found, err_cnt, loop, loop_end; int state, *a, *b, *op_state, unique_size; tbx_stack_t *stack; log_printf(15, "rs_simple_request: START rss->n_rids=%d n_rid=%d req_size=%d fixed_size=%d\n", rss->n_rids, n_rid, req_size, fixed_size); for (i=0; i<req_size; i++) req[i].rid_key = NULL; //** Clear the result in case of an error apr_thread_mutex_lock(rss->lock); i = _rs_simple_refresh(arg); //** Check if we need to refresh the data if (i != 0) { apr_thread_mutex_unlock(rss->lock); return(gop_dummy(op_failure_status)); } //** Determine the query sizes and make the processing arrays memset(&kvq, 0, sizeof(kvq)); rs_query_count(arg, rsq, &i, &(kvq_global.n_unique), &(kvq_global.n_pickone)); log_printf(15, "rs_simple_request: n_unique=%d n_pickone=%d\n", kvq_global.n_unique, kvq_global.n_pickone); tbx_log_flush(); //** Make space the for the uniq and pickone fields. //** Make sure we have space for at least 1 more than we need of each to pass to the routines even though they aren't used j = (kvq_global.n_pickone == 0) ? 1 : kvq_global.n_pickone + 1; tbx_type_malloc_clear(kvq_global.pickone, kvq_ele_t, j); unique_size = kvq_global.n_unique + 1; tbx_type_malloc_clear(kvq_global.unique, kvq_ele_t *, unique_size); log_printf(15, "MALLOC j=%d\n", unique_size); for (i=0; i<unique_size; i++) { tbx_type_malloc_clear(kvq_global.unique[i], kvq_ele_t, n_rid); } //** We don't allow these on the local but make a temp space anyway kvq_local.n_pickone = 0; tbx_type_malloc_clear(kvq_local.pickone, kvq_ele_t, 1); kvq_global.n_unique = 0; tbx_type_malloc_clear(kvq_local.unique, kvq_ele_t *, 1); tbx_type_malloc_clear(kvq_local.unique[0], kvq_ele_t, n_rid); status = op_success_status; que = new_opque(); stack = tbx_stack_new(); err_cnt = 0; found = 0; // max_size = (req_size > fixed_size) ? req_size : fixed_size; for (i=0; i < n_rid; i++) { found = 0; loop_end = 1; query_local = NULL; rnd_off = tbx_random_get_int64(0, rss->n_rids-1); //rnd_off = 0; //FIXME if (hints_list != NULL) { query_local = (rsq_base_t *)hints_list[i].local_rsq; if (query_local != NULL) { loop_end = 2; rs_query_count(arg, query_local, &j, &(kvq_local.n_unique), &(kvq_local.n_pickone)); if ((kvq_local.n_unique != 0) && (kvq_local.n_pickone != 0)) { log_printf(0, "Unsupported use of pickone/unique in local RSQ hints_list[%d]=%s!\n", i, hints_list[i].fixed_rid_key); status.op_status = OP_STATE_FAILURE; status.error_code = RS_ERROR_FIXED_NOT_FOUND; hints_list[i].status = RS_ERROR_HINTS_INVALID_LOCAL; err_cnt++; continue; } } if (i<fixed_size) { //** Use the fixed list for assignment rse = tbx_list_search(rss->rid_table, hints_list[i].fixed_rid_key); if (rse == NULL) { log_printf(0, "Missing element in hints list[%d]=%s! Ignoring check.\n", i, hints_list[i].fixed_rid_key); hints_list[i].status = RS_ERROR_FIXED_NOT_FOUND; continue; //** Skip the check } rnd_off = rse->slot; } } //** See if we use a restrictive list. Ususally used when rebalancing space pick_from = (hints_list != NULL) ? hints_list[i].pick_from : NULL; rid_change = NULL; change = 0; for (k=0; k<req_size; k++) { if (req[k].rid_index == i) { change += req[k].size; } } for (j=0; j<rss->n_rids; j++) { slot = (rnd_off+j) % rss->n_rids; rse = rss->random_array[slot]; if (pick_from != NULL) { rid_change = apr_hash_get(pick_from, rse->rid_key, APR_HASH_KEY_STRING); log_printf(15, "PICK_FROM != NULL i=%d j=%d slot=%d rse->rid_key=%s rse->status=%d rid_change=%p\n", i, j, slot, rse->rid_key, rse->status, rid_change); if (rid_change == NULL) continue; //** Not in our list so skip to the next ex_off_t delta = rid_change->delta - change; log_printf(15, "PICK_FROM != NULL i=%d j=%d slot=%d rse->rid_key=%s rse->status=%d rc->state=%d (" XOT ") > " XOT "????\n", i, j, slot, rse->rid_key, rse->status, rid_change->state, delta, rid_change->tolerance); //** Make sure we don't overshoot the target if (rid_change->state == 1) continue; //** Already converged RID if (rid_change->delta <= 0) continue; //** Need to move data OFF this RID if ((change - rid_change->delta) > rid_change->tolerance) continue; //**delta>0 if we made it here } log_printf(15, "i=%d j=%d slot=%d rse->rid_key=%s rse->status=%d\n", i, j, slot, rse->rid_key, rse->status); if ((rse->status != RS_STATUS_UP) && (i>=fixed_size)) continue; //** Skip this if disabled and not in the fixed list tbx_stack_empty(stack, 1); q = query_global->head; kvq = &kvq_global; for (loop=0; loop<loop_end; loop++) { i_unique = 0; i_pickone = 0; while (q != NULL) { state = -1; switch (q->op) { case RSQ_BASE_OP_KV: state = rss_test(q, rse, i, kvq->unique[i_unique], &(kvq->pickone[i_pickone])); log_printf(0, "KV: key=%s val=%s i_unique=%d i_pickone=%d loop=%d rss_test=%d rse->rid_key=%s\n", q->key, q->val, i_unique, i_pickone, loop, state, rse->rid_key); tbx_log_flush(); if ((q->key_op & RSQ_BASE_KV_UNIQUE) || (q->val_op & RSQ_BASE_KV_UNIQUE)) i_unique++; if ((q->key_op & RSQ_BASE_KV_PICKONE) || (q->val_op & RSQ_BASE_KV_PICKONE)) i_pickone++; break; case RSQ_BASE_OP_NOT: a = (int *)tbx_stack_pop(stack); state = (*a == 0) ? 1 : 0; //log_printf(0, "NOT(%d)=%d\n", *a, state); free(a); break; case RSQ_BASE_OP_AND: a = (int *)tbx_stack_pop(stack); b = (int *)tbx_stack_pop(stack); state = (*a) && (*b); //log_printf(0, "%d AND %d = %d\n", *a, *b, state); free(a); free(b); break; case RSQ_BASE_OP_OR: a = (int *)tbx_stack_pop(stack); b = (int *)tbx_stack_pop(stack); state = a || b; //log_printf(0, "%d OR %d = %d\n", *a, *b, state); free(a); free(b); break; } tbx_type_malloc(op_state, int, 1); *op_state = state; tbx_stack_push(stack, (void *)op_state); log_printf(15, " stack_size=%d loop=%d push state=%d\n",tbx_stack_count(stack), loop, state); tbx_log_flush(); q = q->next; } if (query_local != NULL) { q = query_local->head; kvq = &kvq_local; } } op_state = (int *)tbx_stack_pop(stack); state = -1; if (op_state != NULL) { state = *op_state; free(op_state); } if (op_state == NULL) { log_printf(1, "rs_simple_request: ERROR processing i=%d EMPTY STACK\n", i); found = 0; status.op_status = OP_STATE_FAILURE; status.error_code = RS_ERROR_EMPTY_STACK; } else if (state == 1) { //** Got one log_printf(15, "rs_simple_request: processing i=%d ds_key=%s\n", i, rse->ds_key); found = 1; if ((i<fixed_size) && hints_list) hints_list[i].status = RS_ERROR_OK; for (k=0; k<req_size; k++) { if (req[k].rid_index == i) { log_printf(15, "rs_simple_request: i=%d ds_key=%s, rid_key=%s size=" XOT "\n", i, rse->ds_key, rse->rid_key, req[k].size); req[k].rid_key = strdup(rse->rid_key); req[k].gop = ds_allocate(rss->ds, rse->ds_key, da, req[k].size, caps[k], timeout); opque_add(que, req[k].gop); } } if (rid_change != NULL) { //** Flag that I'm tweaking things. The caller does the source pending/delta half rid_change->delta -= change; rid_change->state = ((llabs(rid_change->delta) <= rid_change->tolerance) || (rid_change->tolerance == 0)) ? 1 : 0; } break; //** Got one so exit the RID scan and start the next one } else if (i<fixed_size) { //** This should have worked so flag an error if (hints_list) { log_printf(1, "Match fail in fixed list[%d]=%s!\n", i, hints_list[i].fixed_rid_key); hints_list[i].status = RS_ERROR_FIXED_MATCH_FAIL; } else { log_printf(1, "Match fail in fixed list and no hints are provided!\n"); } status.op_status = OP_STATE_FAILURE; status.error_code = RS_ERROR_FIXED_MATCH_FAIL; if (ignore_fixed_err == 0) err_cnt++; break; //** Skip to the next in the list } else { found = 0; } } if ((found == 0) && (i>=fixed_size)) break; } //** Clean up log_printf(15, "FREE j=%d\n", unique_size); for (i=0; i<unique_size; i++) { free(kvq_global.unique[i]); } free(kvq_global.unique); free(kvq_global.pickone); free(kvq_local.unique[0]); free(kvq_local.unique); free(kvq_local.pickone); tbx_stack_free(stack, 1); log_printf(15, "rs_simple_request: END n_rid=%d\n", n_rid); //callback_t *cb = (callback_t *)que->qd.list->top->data; //op_generic_t *gop = (op_generic_t *)cb->priv; //log_printf(15, "top gid=%d reg=%d\n", gop_id(gop), gop_id(req[0].gop)); apr_thread_mutex_unlock(rss->lock); if ((found == 0) || (err_cnt>0)) { opque_free(que, OP_DESTROY); if (status.error_code == 0) { log_printf(1, "rs_simple_request: Can't find enough RIDs! requested=%d found=%d err_cnt=%d\n", n_rid, found, err_cnt); status.op_status = OP_STATE_FAILURE; status.error_code = RS_ERROR_NOT_ENOUGH_RIDS; } return(gop_dummy(status)); } return(opque_get_gop(que)); }
host_portal_t *create_hportal(portal_context_t *hpc, void *connect_context, char *hostport, int min_conn, int max_conn, apr_time_t dt_connect) { host_portal_t *hp; log_printf(15, "create_hportal: hpc=%p\n", hpc); tbx_type_malloc_clear(hp, host_portal_t, 1); assert_result(apr_pool_create(&(hp->mpool), NULL), APR_SUCCESS); char host[sizeof(hp->host)]; int port; char *hp2 = strdup(hostport); char *bstate; int fin; host[0] = '\0'; strncpy(host, tbx_stk_string_token(hp2, HP_HOSTPORT_SEPARATOR, &bstate, &fin), sizeof(host)-1); host[sizeof(host)-1] = '\0'; port = atoi(bstate); free(hp2); log_printf(15, "create_hportal: hostport: %s host=%s port=%d min=%d max=%d dt=" TT "\n", hostport, host, port, min_conn, max_conn, dt_connect); strncpy(hp->host, host, sizeof(hp->host)-1); hp->host[sizeof(hp->host)-1] = '\0'; //** Check if we can resolve the host's IP address char in_addr[6]; if (tbx_dnsc_lookup(host, in_addr, NULL) != 0) { log_printf(1, "create_hportal: Can\'t resolve host address: %s:%d\n", host, port); hp->invalid_host = 0; // hp->invalid_host = 1; } else { hp->invalid_host = 0; } hp->port = port; snprintf(hp->skey, sizeof(hp->skey), "%s", hostport); hp->connect_context = hpc->fn->dup_connect_context(connect_context); hp->context = hpc; hp->min_conn = min_conn; hp->max_conn = max_conn; hp->dt_connect = dt_connect; hp->sleeping_conn = 0; hp->workload = 0; hp->executing_workload = 0; hp->cmds_processed = 0; hp->n_conn = 0; hp->conn_list = tbx_stack_new(); hp->closed_que = tbx_stack_new(); hp->que = tbx_stack_new(); hp->direct_list = tbx_stack_new(); hp->pause_until = 0; hp->stable_conn = max_conn; hp->closing_conn = 0; hp->failed_conn_attempts = 0; hp->successful_conn_attempts = 0; hp->abort_conn_attempts = hpc->abort_conn_attempts; apr_thread_mutex_create(&(hp->lock), APR_THREAD_MUTEX_DEFAULT, hp->mpool); apr_thread_cond_create(&(hp->cond), hp->mpool); return(hp); }
int main(int argc, char **argv) { int i, j, ftype, rg_mode, start_option, start_index, prefix_len, nosort, err; ex_off_t fcount; char *fname; ls_entry_t *lse; tbx_list_t *table; // lio_path_tuple_t tuple; os_regex_table_t *rp_single, *ro_single; os_object_iter_t *it; tbx_list_iter_t lit; opque_t *q; op_generic_t *gop; char *keys[] = { "system.owner", "system.exnode.size", "system.modify_data", "os.create", "os.link_count" }; char *vals[5]; int v_size[5]; int n_keys = 5; int recurse_depth = 0; int obj_types = OS_OBJECT_ANY; int return_code = 0; //printf("argc=%d\n", argc); if (argc < 2) { printf("\n"); printf("lio_ls LIO_COMMON_OPTIONS [-rd recurse_depth] [-ns] LIO_PATH_OPTIONS\n"); lio_print_options(stdout); lio_print_path_options(stdout); printf("\n"); printf(" -rd recurse_depth - Max recursion depth on directories. Defaults to %d\n", recurse_depth); printf(" -t object_types - Types of objects to list bitwise OR of 1=Files, 2=Directories, 4=symlink, 8=hardlink. Default is %d.\n", obj_types); printf(" -ns - Don't sort the output\n"); return(1); } lio_init(&argc, &argv); //*** Parse the args rp_single = ro_single = NULL; nosort = 0; rg_mode = lio_parse_path_options(&argc, argv, lio_gc->auto_translate, &tuple, &rp_single, &ro_single); i=1; do { start_option = i; if (strcmp(argv[i], "-rd") == 0) { //** Recurse depth i++; recurse_depth = atoi(argv[i]); i++; } else if (strcmp(argv[i], "-t") == 0) { //** Object types i++; obj_types = atoi(argv[i]); i++; } else if (strcmp(argv[i], "-ns") == 0) { //** Strip off the path prefix i++; nosort = 1; } } while ((start_option < i) && (i<argc)); start_index = i; if (rg_mode == 0) { if (i>=argc) { info_printf(lio_ifd, 0, "Missing directory!\n"); return(2); } } else { start_index--; //** Ther 1st entry will be the rp created in lio_parse_path_options } fcount = 0; q = new_opque(); table = tbx_list_create(0, &tbx_list_string_compare, NULL, tbx_list_no_key_free, tbx_list_no_data_free); for (j=start_index; j<argc; j++) { log_printf(5, "path_index=%d argc=%d rg_mode=%d\n", j, argc, rg_mode); if (rg_mode == 0) { //** Create the simple path iterator tuple = lio_path_resolve(lio_gc->auto_translate, argv[j]); lio_path_wildcard_auto_append(&tuple); rp_single = os_path_glob2regex(tuple.path); } else { rg_mode = 0; //** Use the initial rp } for (i=0; i<n_keys; i++) v_size[i] = -tuple.lc->max_attr; memset(vals, 0, sizeof(vals)); it = lio_create_object_iter_alist(tuple.lc, tuple.creds, rp_single, ro_single, obj_types, recurse_depth, keys, (void **)vals, v_size, n_keys); if (it == NULL) { info_printf(lio_ifd, 0, "ERROR: Failed with object_iter creation\n"); return_code = EIO; goto finished; } while ((ftype = lio_next_object(tuple.lc, it, &fname, &prefix_len)) > 0) { tbx_type_malloc_clear(lse, ls_entry_t, 1); lse->fname = fname; lse->ftype = ftype; lse->prefix_len = prefix_len; memcpy(lse->v_size, v_size, sizeof(v_size)); memcpy(lse->vals, vals, sizeof(vals)); for (i=0; i<n_keys; i++) v_size[i] = -tuple.lc->max_attr; memset(vals, 0, sizeof(vals)); //** Check if we have a link. If so we need to resolve the link path if ((ftype & OS_OBJECT_SYMLINK) > 0) { lse->link_size = -64*1024; gop = gop_lio_get_attr(tuple.lc, tuple.creds, lse->fname, NULL, "os.link", (void **)&(lse->link), &(lse->link_size)); gop_set_private(gop, lse); opque_add(q, gop); if (nosort == 1) opque_waitall(q); } if (fcount == 0) { info_printf(lio_ifd, 0, " Perms Ref Owner Size Creation date Modify date Filename [-> link]\n"); info_printf(lio_ifd, 0, "---------- --- ---------- ---------- ------------------------ ------------------------ ------------------------------\n"); } fcount++; if (nosort == 1) { ls_format_entry(lio_ifd, lse); } else { tbx_list_insert(table, lse->fname, lse); } } lio_destroy_object_iter(tuple.lc, it); lio_path_release(&tuple); if (rp_single != NULL) { os_regex_table_destroy(rp_single); rp_single = NULL; } if (ro_single != NULL) { os_regex_table_destroy(ro_single); ro_single = NULL; } } //** Wait for any readlinks to complete err = (opque_task_count(q) > 0) ? opque_waitall(q) : OP_STATE_SUCCESS; if (err != OP_STATE_SUCCESS) { info_printf(lio_ifd, 0, "ERROR: Failed with readlink operation!\n"); return_code = EIO; } //** Now sort and print things if needed if (nosort == 0) { lit = tbx_list_iter_search(table, NULL, 0); while ((tbx_list_next(&lit, (tbx_list_key_t **)&fname, (tbx_list_data_t **)&lse)) == 0) { ls_format_entry(lio_ifd, lse); } } tbx_list_destroy(table); if (fcount == 0) return_code = 2; finished: opque_free(q, OP_DESTROY); lio_shutdown(); return(return_code); }