Ejemplo n.º 1
0
gop_thread_pool_context_t *gop_tp_context_create(char *tp_name, int min_threads, int max_threads, int max_recursion_depth)
{
//  char buffer[1024];
    gop_thread_pool_context_t *tpc;
    apr_interval_time_t dt;
    int i;

    log_printf(15, "count=%d\n", _tp_context_count);

    tbx_type_malloc_clear(tpc, gop_thread_pool_context_t, 1);

    if (tbx_atomic_inc(_tp_context_count) == 0) {
        apr_pool_create(&_tp_pool, NULL);
        apr_thread_mutex_create(&_tp_lock, APR_THREAD_MUTEX_DEFAULT, _tp_pool);
        thread_pool_stats_init();
    }

    if (thread_local_depth_key == NULL) apr_threadkey_private_create(&thread_local_depth_key,_thread_pool_destructor, _tp_pool);
    tpc->pc = gop_hp_context_create(&_tp_base_portal);  //** Really just used for the submit

    default_thread_pool_config(tpc);
    if (min_threads > 0) tpc->min_threads = min_threads;
    if (max_threads > 0) tpc->max_threads = max_threads + 1;  //** Add one for the recursion depth starting offset being 1
    tpc->recursion_depth = max_recursion_depth + 1;  //** The min recusion normally starts at 1 so just slap an extra level and we don't care about 0|1 starting location
    tpc->max_concurrency = tpc->max_threads - tpc->recursion_depth;
    if (tpc->max_concurrency <= 0) {
        tpc->max_threads += 5 - tpc->max_concurrency;  //** MAke sure we have at least 5 threads for work
        tpc->max_concurrency = tpc->max_threads - tpc->recursion_depth;
        log_printf(0, "Specified max threads and recursion depth don't work. Adjusting max_threads=%d\n", tpc->max_threads);
    }

    dt = tpc->min_idle * 1000000;
    assert_result(apr_thread_pool_create(&(tpc->tp), tpc->min_threads, tpc->max_threads, _tp_pool), APR_SUCCESS);
    apr_thread_pool_idle_wait_set(tpc->tp, dt);
    apr_thread_pool_threshold_set(tpc->tp, 0);

    tpc->name = (tp_name == NULL) ? NULL : strdup(tp_name);
    tbx_atomic_set(tpc->n_ops, 0);
    tbx_atomic_set(tpc->n_completed, 0);
    tbx_atomic_set(tpc->n_started, 0);
    tbx_atomic_set(tpc->n_submitted, 0);
    tbx_atomic_set(tpc->n_running, 0);

    tbx_type_malloc(tpc->overflow_running_depth, int, tpc->recursion_depth);
    tbx_type_malloc(tpc->reserve_stack, tbx_stack_t *, tpc->recursion_depth);
    for (i=0; i<tpc->recursion_depth; i++) {
        tpc->overflow_running_depth[i] = -1;
        tpc->reserve_stack[i] = tbx_stack_new();
    }

    return(tpc);
}
Ejemplo n.º 2
0
int data_block_set_attr(lio_data_block_t *b, char *key, char *val)
{
    lio_data_block_attr_t *attr;

    //** See if the key exists
    attr = db_find_key(b->attr_stack, key);

    if (attr == NULL) {  //** See if we need to add the attribute
        tbx_type_malloc_clear(attr, lio_data_block_attr_t, 1);
        attr->key = strdup(key);
    }

    if (attr->value != NULL) free(attr->value);  //** Free the old value
    attr->value = (val != NULL) ? strdup(val) : NULL;  //** Store the new one

    if (b->attr_stack == NULL) b->attr_stack = tbx_stack_new();
    tbx_stack_push(b->attr_stack, attr);

    return(0);
}
Ejemplo n.º 3
0
void shutdown_direct(host_portal_t *hp)
{
    host_portal_t *shp;
    host_connection_t *hc;

    if (tbx_stack_count(hp->direct_list) == 0) return;

    tbx_stack_move_to_top(hp->direct_list);
    while ((shp = (host_portal_t *)tbx_stack_pop(hp->direct_list)) != NULL) {
        hportal_lock(shp);
        _reap_hportal(shp, 0);  //** Clean up any closed connections

        if ((shp->n_conn == 0) && (tbx_stack_count(shp->que) == 0)) { //** if not used so remove it
            tbx_stack_delete_current(hp->direct_list, 0, 0);  //**Already closed
        } else {     //** Force it to close
            tbx_stack_free(shp->que, 1);  //** Empty the que so we don't respawn connections
            shp->que = tbx_stack_new();

            tbx_stack_move_to_top(shp->conn_list);
            hc = (host_connection_t *)tbx_stack_get_current_data(shp->conn_list);

            hportal_unlock(shp);
            apr_thread_mutex_unlock(hp->context->lock);

            close_hc(hc, 0);

            apr_thread_mutex_lock(hp->context->lock);
            hportal_lock(shp);
        }

        hportal_unlock(shp);
        destroy_hportal(shp);

//     tbx_stack_move_to_top(hp->direct_list);
    }
}
Ejemplo n.º 4
0
resource_service_fn_t *rs_remote_server_create(void *arg, tbx_inip_file_t *fd, char *section)
{
    service_manager_t *ess = (service_manager_t *)arg;
    resource_service_fn_t *rs;
    rs_remote_server_priv_t *rsrs;
    rs_create_t *rs_create;
    mq_command_table_t *ctable;
    char *stype, *ctype;

    if (section == NULL) section = "rs_remote_server";

    tbx_type_malloc_clear(rs, resource_service_fn_t, 1);
    tbx_type_malloc_clear(rsrs, rs_remote_server_priv_t, 1);
    rs->priv = (void *)rsrs;

    //** Make the locks and cond variables
    assert_result(apr_pool_create(&(rsrs->mpool), NULL), APR_SUCCESS);
    apr_thread_mutex_create(&(rsrs->lock), APR_THREAD_MUTEX_DEFAULT, rsrs->mpool);
    apr_thread_cond_create(&(rsrs->cond), rsrs->mpool);

    rsrs->pending = tbx_stack_new();
    memset(&(rsrs->my_map_version), 0, sizeof(rsrs->my_map_version));
    memset(&(rsrs->notify_map_version), 0, sizeof(rsrs->notify_map_version));
    rsrs->notify_map_version.lock = rsrs->lock;
    rsrs->notify_map_version.cond = rsrs->cond;

    //** Get the host name we bind to
    rsrs->hostname= tbx_inip_get_string(fd, section, "address", NULL);

    //** Start the child RS.   The update above should have dumped a RID config for it to load
    stype = tbx_inip_get_string(fd, section, "rs_local", NULL);
    if (stype == NULL) {  //** Oops missing child RS
        log_printf(0, "ERROR: Mising child RS  section=%s key=rs_local!\n", section);
        tbx_log_flush();
        free(stype);
        abort();
    }

    //** and load it
    ctype = tbx_inip_get_string(fd, stype, "type", RS_TYPE_SIMPLE);
    rs_create = lookup_service(ess, RS_SM_AVAILABLE, ctype);
    rsrs->rs_child = (*rs_create)(ess, fd, stype);
    if (rsrs->rs_child == NULL) {
        log_printf(1, "ERROR loading child RS!  type=%s section=%s\n", ctype, stype);
        tbx_log_flush();
        abort();
    }
    free(ctype);
    free(stype);

    //** Get the MQC
    rsrs->mqc = lookup_service(ess, ESS_RUNNING, ESS_MQ); assert(rsrs->mqc != NULL);

    //** Make the server portal
    rsrs->server_portal = mq_portal_create(rsrs->mqc, rsrs->hostname, MQ_CMODE_SERVER);
    ctable = mq_portal_command_table(rsrs->server_portal);
    mq_command_set(ctable, RSR_GET_RID_CONFIG_KEY, RSR_GET_RID_CONFIG_SIZE, rs, rsrs_rid_config_cb);
    mq_command_set(ctable, RSR_GET_UPDATE_CONFIG_KEY, RSR_GET_UPDATE_CONFIG_SIZE, rs, rsrs_rid_config_cb);
    mq_command_set(ctable, RSR_ABORT_KEY, RSR_ABORT_SIZE, rs, rsrs_abort_cb);
    mq_portal_install(rsrs->mqc, rsrs->server_portal);

    //** Launch the config changes thread
    tbx_thread_create_assert(&(rsrs->monitor_thread), NULL, rsrs_monitor_thread, (void *)rs, rsrs->mpool);

    //** Set up the fn ptrs.  This is just for syncing the rid configuration and state
    //** so very little is implemented
    rs->destroy_service = rs_remote_server_destroy;

    rs->type = RS_TYPE_REMOTE_SERVER;

    return(rs);
}
Ejemplo n.º 5
0
op_generic_t *rs_simple_request(resource_service_fn_t *arg, data_attr_t *da, rs_query_t *rsq, data_cap_set_t **caps, rs_request_t *req, int req_size, rs_hints_t *hints_list, int fixed_size, int n_rid, int ignore_fixed_err, int timeout)
{
    rs_simple_priv_t *rss = (rs_simple_priv_t *)arg->priv;
    rsq_base_t *query_global = (rsq_base_t *)rsq;
    rsq_base_t *query_local;
    kvq_table_t kvq_global, kvq_local, *kvq;
    apr_hash_t *pick_from;
    rid_change_entry_t *rid_change;
    ex_off_t change;
    op_status_t status;
    opque_t *que;
    rss_rid_entry_t *rse;
    rsq_base_ele_t *q;
    int slot, rnd_off, i, j, k, i_unique, i_pickone, found, err_cnt, loop, loop_end;
    int state, *a, *b, *op_state, unique_size;
    tbx_stack_t *stack;

    log_printf(15, "rs_simple_request: START rss->n_rids=%d n_rid=%d req_size=%d fixed_size=%d\n", rss->n_rids, n_rid, req_size, fixed_size);

    for (i=0; i<req_size; i++) req[i].rid_key = NULL;  //** Clear the result in case of an error

    apr_thread_mutex_lock(rss->lock);
    i = _rs_simple_refresh(arg);  //** Check if we need to refresh the data
    if (i != 0) {
        apr_thread_mutex_unlock(rss->lock);
        return(gop_dummy(op_failure_status));
    }

    //** Determine the query sizes and make the processing arrays
    memset(&kvq, 0, sizeof(kvq));
    rs_query_count(arg, rsq, &i, &(kvq_global.n_unique), &(kvq_global.n_pickone));

    log_printf(15, "rs_simple_request: n_unique=%d n_pickone=%d\n", kvq_global.n_unique, kvq_global.n_pickone);
    tbx_log_flush();

    //** Make space the for the uniq and pickone fields.
    //** Make sure we have space for at least 1 more than we need of each to pass to the routines even though they aren't used
    j = (kvq_global.n_pickone == 0) ? 1 : kvq_global.n_pickone + 1;
    tbx_type_malloc_clear(kvq_global.pickone, kvq_ele_t, j);

    unique_size = kvq_global.n_unique + 1;
    tbx_type_malloc_clear(kvq_global.unique, kvq_ele_t *, unique_size);
    log_printf(15, "MALLOC j=%d\n", unique_size);
    for (i=0; i<unique_size; i++) {
        tbx_type_malloc_clear(kvq_global.unique[i], kvq_ele_t, n_rid);
    }

    //** We don't allow these on the local but make a temp space anyway
    kvq_local.n_pickone = 0;
    tbx_type_malloc_clear(kvq_local.pickone, kvq_ele_t, 1);
    kvq_global.n_unique = 0;
    tbx_type_malloc_clear(kvq_local.unique, kvq_ele_t *, 1);
    tbx_type_malloc_clear(kvq_local.unique[0], kvq_ele_t, n_rid);

    status = op_success_status;

    que = new_opque();
    stack = tbx_stack_new();

    err_cnt = 0;
    found = 0;
//  max_size = (req_size > fixed_size) ? req_size : fixed_size;

    for (i=0; i < n_rid; i++) {
        found = 0;
        loop_end = 1;
        query_local = NULL;
        rnd_off = tbx_random_get_int64(0, rss->n_rids-1);
//rnd_off = 0;  //FIXME

        if (hints_list != NULL) {
            query_local = (rsq_base_t *)hints_list[i].local_rsq;
            if (query_local != NULL) {
                loop_end = 2;
                rs_query_count(arg, query_local, &j, &(kvq_local.n_unique), &(kvq_local.n_pickone));
                if ((kvq_local.n_unique != 0) && (kvq_local.n_pickone != 0)) {
                    log_printf(0, "Unsupported use of pickone/unique in local RSQ hints_list[%d]=%s!\n", i, hints_list[i].fixed_rid_key);
                    status.op_status = OP_STATE_FAILURE;
                    status.error_code = RS_ERROR_FIXED_NOT_FOUND;
                    hints_list[i].status = RS_ERROR_HINTS_INVALID_LOCAL;
                    err_cnt++;
                    continue;
                }
            }

            if (i<fixed_size) {  //** Use the fixed list for assignment
                rse = tbx_list_search(rss->rid_table, hints_list[i].fixed_rid_key);
                if (rse == NULL) {
                    log_printf(0, "Missing element in hints list[%d]=%s! Ignoring check.\n", i, hints_list[i].fixed_rid_key);
                    hints_list[i].status = RS_ERROR_FIXED_NOT_FOUND;
                    continue;   //** Skip the check
                }
                rnd_off = rse->slot;
            }
        }

        //** See if we use a restrictive list.  Ususally used when rebalancing space
        pick_from = (hints_list != NULL) ? hints_list[i].pick_from : NULL;
        rid_change = NULL;
        change = 0;
        for (k=0; k<req_size; k++) {
            if (req[k].rid_index == i) {
                change += req[k].size;
            }
        }

        for (j=0; j<rss->n_rids; j++) {
            slot = (rnd_off+j) % rss->n_rids;
            rse = rss->random_array[slot];
            if (pick_from != NULL) {
                rid_change = apr_hash_get(pick_from, rse->rid_key, APR_HASH_KEY_STRING);
                log_printf(15, "PICK_FROM != NULL i=%d j=%d slot=%d rse->rid_key=%s rse->status=%d rid_change=%p\n", i, j, slot, rse->rid_key, rse->status, rid_change);

                if (rid_change == NULL) continue;  //** Not in our list so skip to the next
                ex_off_t delta = rid_change->delta - change;
                log_printf(15, "PICK_FROM != NULL i=%d j=%d slot=%d rse->rid_key=%s rse->status=%d rc->state=%d (" XOT ") > " XOT "????\n", i, j, slot, rse->rid_key, rse->status, rid_change->state, delta, rid_change->tolerance);

                //** Make sure we don't overshoot the target
                if (rid_change->state == 1) continue;   //** Already converged RID
                if (rid_change->delta <= 0) continue;   //** Need to move data OFF this RID
                if ((change - rid_change->delta) > rid_change->tolerance) continue;  //**delta>0 if we made it here
            }

            log_printf(15, "i=%d j=%d slot=%d rse->rid_key=%s rse->status=%d\n", i, j, slot, rse->rid_key, rse->status);
            if ((rse->status != RS_STATUS_UP) && (i>=fixed_size)) continue;  //** Skip this if disabled and not in the fixed list

            tbx_stack_empty(stack, 1);
            q = query_global->head;
            kvq = &kvq_global;
            for (loop=0; loop<loop_end; loop++) {
                i_unique = 0;
                i_pickone = 0;
                while (q != NULL) {
                    state = -1;
                    switch (q->op) {
                    case RSQ_BASE_OP_KV:
                        state = rss_test(q, rse, i, kvq->unique[i_unique], &(kvq->pickone[i_pickone]));
                        log_printf(0, "KV: key=%s val=%s i_unique=%d i_pickone=%d loop=%d rss_test=%d rse->rid_key=%s\n", q->key, q->val, i_unique, i_pickone, loop, state, rse->rid_key);
                        tbx_log_flush();
                        if ((q->key_op & RSQ_BASE_KV_UNIQUE) || (q->val_op & RSQ_BASE_KV_UNIQUE)) i_unique++;
                        if ((q->key_op & RSQ_BASE_KV_PICKONE) || (q->val_op & RSQ_BASE_KV_PICKONE)) i_pickone++;
                        break;
                    case RSQ_BASE_OP_NOT:
                        a = (int *)tbx_stack_pop(stack);
                        state = (*a == 0) ? 1 : 0;
                        //log_printf(0, "NOT(%d)=%d\n", *a, state);
                        free(a);
                        break;
                    case RSQ_BASE_OP_AND:
                        a = (int *)tbx_stack_pop(stack);
                        b = (int *)tbx_stack_pop(stack);
                        state = (*a) && (*b);
                        //log_printf(0, "%d AND %d = %d\n", *a, *b, state);
                        free(a);
                        free(b);
                        break;
                    case RSQ_BASE_OP_OR:
                        a = (int *)tbx_stack_pop(stack);
                        b = (int *)tbx_stack_pop(stack);
                        state = a || b;
                        //log_printf(0, "%d OR %d = %d\n", *a, *b, state);
                        free(a);
                        free(b);
                        break;
                    }

                    tbx_type_malloc(op_state, int, 1);
                    *op_state = state;
                    tbx_stack_push(stack, (void *)op_state);
                    log_printf(15, " stack_size=%d loop=%d push state=%d\n",tbx_stack_count(stack), loop, state);
                    tbx_log_flush();
                    q = q->next;
                }

                if (query_local != NULL) {
                    q = query_local->head;
                    kvq = &kvq_local;
                }
            }

            op_state = (int *)tbx_stack_pop(stack);
            state = -1;
            if (op_state != NULL) {
                state = *op_state;
                free(op_state);
            }

            if (op_state == NULL) {
                log_printf(1, "rs_simple_request: ERROR processing i=%d EMPTY STACK\n", i);
                found = 0;
                status.op_status = OP_STATE_FAILURE;
                status.error_code = RS_ERROR_EMPTY_STACK;
            } else if  (state == 1) { //** Got one
                log_printf(15, "rs_simple_request: processing i=%d ds_key=%s\n", i, rse->ds_key);
                found = 1;
                if ((i<fixed_size) && hints_list) hints_list[i].status = RS_ERROR_OK;

                for (k=0; k<req_size; k++) {
                    if (req[k].rid_index == i) {
                        log_printf(15, "rs_simple_request: i=%d ds_key=%s, rid_key=%s size=" XOT "\n", i, rse->ds_key, rse->rid_key, req[k].size);
                        req[k].rid_key = strdup(rse->rid_key);
                        req[k].gop = ds_allocate(rss->ds, rse->ds_key, da, req[k].size, caps[k], timeout);
                        opque_add(que, req[k].gop);
                    }
                }

                if (rid_change != NULL) { //** Flag that I'm tweaking things.  The caller does the source pending/delta half
                    rid_change->delta -= change;
                    rid_change->state = ((llabs(rid_change->delta) <= rid_change->tolerance) || (rid_change->tolerance == 0)) ? 1 : 0;
                }
                break;  //** Got one so exit the RID scan and start the next one
            } else if (i<fixed_size) {  //** This should have worked so flag an error
                if (hints_list) {
                   log_printf(1, "Match fail in fixed list[%d]=%s!\n", i, hints_list[i].fixed_rid_key);
                   hints_list[i].status = RS_ERROR_FIXED_MATCH_FAIL;
                } else {
                   log_printf(1, "Match fail in fixed list and no hints are provided!\n");
                }
                status.op_status = OP_STATE_FAILURE;
                status.error_code = RS_ERROR_FIXED_MATCH_FAIL;
                if (ignore_fixed_err == 0) err_cnt++;
                break;  //** Skip to the next in the list
            } else {
                found = 0;
            }
        }

        if ((found == 0) && (i>=fixed_size)) break;

    }


    //** Clean up
    log_printf(15, "FREE j=%d\n", unique_size);
    for (i=0; i<unique_size; i++) {
        free(kvq_global.unique[i]);
    }
    free(kvq_global.unique);
    free(kvq_global.pickone);

    free(kvq_local.unique[0]);
    free(kvq_local.unique);
    free(kvq_local.pickone);

    tbx_stack_free(stack, 1);

    log_printf(15, "rs_simple_request: END n_rid=%d\n", n_rid);

//callback_t *cb = (callback_t *)que->qd.list->top->data;
//op_generic_t *gop = (op_generic_t *)cb->priv;
//log_printf(15, "top gid=%d reg=%d\n", gop_id(gop), gop_id(req[0].gop));

    apr_thread_mutex_unlock(rss->lock);

    if ((found == 0) || (err_cnt>0)) {
        opque_free(que, OP_DESTROY);

        if (status.error_code == 0) {
            log_printf(1, "rs_simple_request: Can't find enough RIDs! requested=%d found=%d err_cnt=%d\n", n_rid, found, err_cnt);
            status.op_status = OP_STATE_FAILURE;
            status.error_code = RS_ERROR_NOT_ENOUGH_RIDS;
        }
        return(gop_dummy(status));
    }

    return(opque_get_gop(que));
}
Ejemplo n.º 6
0
lio_data_block_t *data_block_deserialize_text(lio_service_manager_t *sm, ex_id_t id, lio_exnode_exchange_t *exp)
{
    int bufsize=1024;
    char capgrp[bufsize];
    char *text, *etext;
    int i;
    lio_data_block_t *b;
    lio_data_service_fn_t *ds;
    tbx_inip_file_t *cfd;
    tbx_inip_group_t *cg;
    tbx_inip_element_t *ele;
    char *key;
    lio_data_block_attr_t *attr;

    //** Parse the ini text
    cfd = exp->text.fd;

    //** Find the cooresponding cap
    snprintf(capgrp, bufsize, "block-" XIDT, id);
    cg = tbx_inip_group_find(cfd, capgrp);
    if (cg == NULL) {
        log_printf(0, "data_block_deserialize_text: id=" XIDT " not found!\n", id);
        return(NULL);
    }

    //** Determine the type and make a blank block
    text = tbx_inip_get_string(cfd, capgrp, "type", "");
    ds = lio_lookup_service(sm, DS_SM_RUNNING, text);
    if (ds == NULL) {
        log_printf(0, "data_block_deserialize_text: b->id=" XIDT " Unknown data service tpye=%s!\n", id, text);
        return(NULL);;
    }
    free(text);

    //** Make the space
    b = data_block_create_with_id(ds, id);

    //** and parse the fields
    b->rid_key = tbx_inip_get_string(cfd, capgrp, "rid_key", "");
    b->size = tbx_inip_get_integer(cfd, capgrp, "size", b->size);
    b->max_size = tbx_inip_get_integer(cfd, capgrp, "max_size", b->max_size);
    i = tbx_inip_get_integer(cfd, capgrp, "ref_count", b->ref_count);
    tbx_atomic_set(b->ref_count, 0);
    tbx_atomic_set(b->initial_ref_count, i);
    etext = tbx_inip_get_string(cfd, capgrp, "read_cap", "");
    ds_set_cap(b->ds, b->cap, DS_CAP_READ, tbx_stk_unescape_text('\\', etext));
    free(etext);
    etext = tbx_inip_get_string(cfd, capgrp, "write_cap", "");
    ds_set_cap(b->ds, b->cap, DS_CAP_WRITE, tbx_stk_unescape_text('\\', etext));
    free(etext);
    etext = tbx_inip_get_string(cfd, capgrp, "manage_cap", "");
    ds_set_cap(b->ds, b->cap, DS_CAP_MANAGE, tbx_stk_unescape_text('\\', etext));
    free(etext);

    //** Now cycle through any misc attributes set
    ele = tbx_inip_ele_first(tbx_inip_group_find(cfd, capgrp));
    while (ele != NULL) {
        key = tbx_inip_ele_get_key(ele);

        //** Ignore the builtin commands
        if ((strcmp("rid_key", key) != 0) && (strcmp("size", key) != 0) && (strcmp("max_size", key) != 0) && (strcmp("type", key) != 0) &&
                (strcmp("ref_count", key) != 0) && (strcmp("read_cap", key) != 0) && (strcmp("write_cap", key) != 0) && (strcmp("manage_cap", key) != 0)) {
            tbx_type_malloc(attr, lio_data_block_attr_t, 1);
            attr->key = tbx_stk_unescape_text('\\', tbx_inip_ele_get_key(ele));
            attr->value = tbx_stk_unescape_text('\\', tbx_inip_ele_get_value(ele));
            if (b->attr_stack == NULL) b->attr_stack = tbx_stack_new();
            tbx_stack_push(b->attr_stack, attr);
        }

        ele = tbx_inip_ele_next(ele);
    }

    return(b);
}
Ejemplo n.º 7
0
host_portal_t *create_hportal(portal_context_t *hpc, void *connect_context, char *hostport, int min_conn, int max_conn, apr_time_t dt_connect)
{
    host_portal_t *hp;

    log_printf(15, "create_hportal: hpc=%p\n", hpc);
    tbx_type_malloc_clear(hp, host_portal_t, 1);
    assert_result(apr_pool_create(&(hp->mpool), NULL), APR_SUCCESS);

    char host[sizeof(hp->host)];
    int port;
    char *hp2 = strdup(hostport);
    char *bstate;
    int fin;

    host[0] = '\0';

    strncpy(host, tbx_stk_string_token(hp2, HP_HOSTPORT_SEPARATOR, &bstate, &fin), sizeof(host)-1);
    host[sizeof(host)-1] = '\0';
    port = atoi(bstate);
    free(hp2);
    log_printf(15, "create_hportal: hostport: %s host=%s port=%d min=%d max=%d dt=" TT "\n", hostport, host, port, min_conn, max_conn, dt_connect);

    strncpy(hp->host, host, sizeof(hp->host)-1);
    hp->host[sizeof(hp->host)-1] = '\0';

    //** Check if we can resolve the host's IP address
    char in_addr[6];
    if (tbx_dnsc_lookup(host, in_addr, NULL) != 0) {
        log_printf(1, "create_hportal: Can\'t resolve host address: %s:%d\n", host, port);
        hp->invalid_host = 0;
//     hp->invalid_host = 1;
    } else {
        hp->invalid_host = 0;
    }

    hp->port = port;
    snprintf(hp->skey, sizeof(hp->skey), "%s", hostport);
    hp->connect_context = hpc->fn->dup_connect_context(connect_context);

    hp->context = hpc;
    hp->min_conn = min_conn;
    hp->max_conn = max_conn;
    hp->dt_connect = dt_connect;
    hp->sleeping_conn = 0;
    hp->workload = 0;
    hp->executing_workload = 0;
    hp->cmds_processed = 0;
    hp->n_conn = 0;
    hp->conn_list = tbx_stack_new();
    hp->closed_que = tbx_stack_new();
    hp->que = tbx_stack_new();
    hp->direct_list = tbx_stack_new();
    hp->pause_until = 0;
    hp->stable_conn = max_conn;
    hp->closing_conn = 0;
    hp->failed_conn_attempts = 0;
    hp->successful_conn_attempts = 0;
    hp->abort_conn_attempts = hpc->abort_conn_attempts;

    apr_thread_mutex_create(&(hp->lock), APR_THREAD_MUTEX_DEFAULT, hp->mpool);
    apr_thread_cond_create(&(hp->cond), hp->mpool);

    return(hp);
}
Ejemplo n.º 8
0
void shutdown_hportal(portal_context_t *hpc)
{
    host_portal_t *hp;
    host_connection_t *hc;
    apr_hash_index_t *hi;
    void *val;

    log_printf(15, "shutdown_hportal: Shutting down the whole system\n");

//IFFY  apr_thread_mutex_lock(hpc->lock);

    //** First tell everyone to shutdown
    for (hi=apr_hash_first(hpc->pool, hpc->table); hi != NULL; hi = apr_hash_next(hi)) {
        apr_hash_this(hi, NULL, NULL, &val);
        hp = (host_portal_t *)val;
        hportal_lock(hp);

        log_printf(5, "before wait n_conn=%d tbx_stack_count(conn_list)=%d host=%s\n", hp->n_conn, tbx_stack_count(hp->conn_list), hp->skey);
        while (tbx_stack_count(hp->conn_list) != hp->n_conn) {
            hportal_unlock(hp);
            log_printf(5, "waiting for connections to finish starting.  host=%s closing_conn=%d n_conn=%d tbx_stack_count(conn_list)=%d\n", hp->skey, hp->closing_conn, hp->n_conn, tbx_stack_count(hp->conn_list));
            usleep(10000);
            hportal_lock(hp);
        }
        log_printf(5, "after wait n_conn=%d tbx_stack_count(conn_list)=%d\n", hp->n_conn, tbx_stack_count(hp->conn_list));

        tbx_stack_move_to_top(hp->conn_list);
        while ((hc = (host_connection_t *)tbx_stack_get_current_data(hp->conn_list)) != NULL) {
            tbx_stack_free(hp->que, 1);  //** Empty the que so we don't respawn connections
            hp->que = tbx_stack_new();
//        hportal_unlock(hp);

            lock_hc(hc);
            hc->shutdown_request = 1;
            apr_thread_cond_signal(hc->recv_cond);
            unlock_hc(hc);

//        hportal_lock(hp);
            tbx_stack_move_down(hp->conn_list);
        }

        hportal_unlock(hp);
    }


    //** Now go and clean up
    for (hi=apr_hash_first(hpc->pool, hpc->table); hi != NULL; hi = apr_hash_next(hi)) {
        apr_hash_this(hi, NULL, NULL, &val);
        hp = (host_portal_t *)val;
        apr_hash_set(hpc->table, hp->skey, APR_HASH_KEY_STRING, NULL);  //** This removes the key

        log_printf(15, "shutdown_hportal: Shutting down host=%s\n", hp->skey);

        hportal_lock(hp);

        log_printf(5, "closing_conn=%d n_conn=%d host=%s\n", hp->closing_conn, hp->n_conn, hp->host);
        _reap_hportal(hp, 0);  //** clean up any closed connections

        log_printf(5, "closing_conn=%d n_conn=%d\n", hp->closing_conn, hp->n_conn);
        while ((hp->closing_conn > 0) || (hp->n_conn > 0)) {
            log_printf(5, "waiting for connections to close.  host=%s closing_conn=%d n_conn=%d tbx_stack_count(conn_list)=%d\n", hp->skey, hp->closing_conn, hp->n_conn, tbx_stack_count(hp->conn_list));
            hportal_unlock(hp);
            usleep(10000);
            hportal_lock(hp);
        }

        shutdown_direct(hp);  //** Shutdown any direct connections

        tbx_stack_move_to_top(hp->conn_list);
        while ((hc = (host_connection_t *)tbx_stack_get_current_data(hp->conn_list)) != NULL) {
            tbx_stack_free(hp->que, 1);  //** Empty the que so we don't respawn connections
            hp->que = tbx_stack_new();
            hportal_unlock(hp);
            apr_thread_mutex_unlock(hpc->lock);

            close_hc(hc, 0);

            apr_thread_mutex_lock(hpc->lock);
            hportal_lock(hp);

            tbx_stack_move_to_top(hp->conn_list);
        }

        hportal_unlock(hp);

        destroy_hportal(hp);
    }

//IFFY  apr_thread_mutex_unlock(hpc->lock);

    return;
}