示例#1
0
void thread_pool_stats_make()
{
    int i;

    _tp_concurrent_max = 0;
    for (i=0; i<TP_MAX_DEPTH; i++) {
        _tp_depth_concurrent_max[i] = 0;
        tbx_atomic_set(_tp_depth_total[i], 0);
        tbx_atomic_set(_tp_depth_concurrent[i], 0);
    }
}
示例#2
0
gop_thread_pool_context_t *gop_tp_context_create(char *tp_name, int min_threads, int max_threads, int max_recursion_depth)
{
//  char buffer[1024];
    gop_thread_pool_context_t *tpc;
    apr_interval_time_t dt;
    int i;

    log_printf(15, "count=%d\n", _tp_context_count);

    tbx_type_malloc_clear(tpc, gop_thread_pool_context_t, 1);

    if (tbx_atomic_inc(_tp_context_count) == 0) {
        apr_pool_create(&_tp_pool, NULL);
        apr_thread_mutex_create(&_tp_lock, APR_THREAD_MUTEX_DEFAULT, _tp_pool);
        thread_pool_stats_init();
    }

    if (thread_local_depth_key == NULL) apr_threadkey_private_create(&thread_local_depth_key,_thread_pool_destructor, _tp_pool);
    tpc->pc = gop_hp_context_create(&_tp_base_portal);  //** Really just used for the submit

    default_thread_pool_config(tpc);
    if (min_threads > 0) tpc->min_threads = min_threads;
    if (max_threads > 0) tpc->max_threads = max_threads + 1;  //** Add one for the recursion depth starting offset being 1
    tpc->recursion_depth = max_recursion_depth + 1;  //** The min recusion normally starts at 1 so just slap an extra level and we don't care about 0|1 starting location
    tpc->max_concurrency = tpc->max_threads - tpc->recursion_depth;
    if (tpc->max_concurrency <= 0) {
        tpc->max_threads += 5 - tpc->max_concurrency;  //** MAke sure we have at least 5 threads for work
        tpc->max_concurrency = tpc->max_threads - tpc->recursion_depth;
        log_printf(0, "Specified max threads and recursion depth don't work. Adjusting max_threads=%d\n", tpc->max_threads);
    }

    dt = tpc->min_idle * 1000000;
    assert_result(apr_thread_pool_create(&(tpc->tp), tpc->min_threads, tpc->max_threads, _tp_pool), APR_SUCCESS);
    apr_thread_pool_idle_wait_set(tpc->tp, dt);
    apr_thread_pool_threshold_set(tpc->tp, 0);

    tpc->name = (tp_name == NULL) ? NULL : strdup(tp_name);
    tbx_atomic_set(tpc->n_ops, 0);
    tbx_atomic_set(tpc->n_completed, 0);
    tbx_atomic_set(tpc->n_started, 0);
    tbx_atomic_set(tpc->n_submitted, 0);
    tbx_atomic_set(tpc->n_running, 0);

    tbx_type_malloc(tpc->overflow_running_depth, int, tpc->recursion_depth);
    tbx_type_malloc(tpc->reserve_stack, tbx_stack_t *, tpc->recursion_depth);
    for (i=0; i<tpc->recursion_depth; i++) {
        tpc->overflow_running_depth[i] = -1;
        tpc->reserve_stack[i] = tbx_stack_new();
    }

    return(tpc);
}
示例#3
0
文件: data_block.c 项目: accre/lstore
lio_data_block_t *data_block_deserialize_text(lio_service_manager_t *sm, ex_id_t id, lio_exnode_exchange_t *exp)
{
    int bufsize=1024;
    char capgrp[bufsize];
    char *text, *etext;
    int i;
    lio_data_block_t *b;
    lio_data_service_fn_t *ds;
    tbx_inip_file_t *cfd;
    tbx_inip_group_t *cg;
    tbx_inip_element_t *ele;
    char *key;
    lio_data_block_attr_t *attr;

    //** Parse the ini text
    cfd = exp->text.fd;

    //** Find the cooresponding cap
    snprintf(capgrp, bufsize, "block-" XIDT, id);
    cg = tbx_inip_group_find(cfd, capgrp);
    if (cg == NULL) {
        log_printf(0, "data_block_deserialize_text: id=" XIDT " not found!\n", id);
        return(NULL);
    }

    //** Determine the type and make a blank block
    text = tbx_inip_get_string(cfd, capgrp, "type", "");
    ds = lio_lookup_service(sm, DS_SM_RUNNING, text);
    if (ds == NULL) {
        log_printf(0, "data_block_deserialize_text: b->id=" XIDT " Unknown data service tpye=%s!\n", id, text);
        return(NULL);;
    }
    free(text);

    //** Make the space
    b = data_block_create_with_id(ds, id);

    //** and parse the fields
    b->rid_key = tbx_inip_get_string(cfd, capgrp, "rid_key", "");
    b->size = tbx_inip_get_integer(cfd, capgrp, "size", b->size);
    b->max_size = tbx_inip_get_integer(cfd, capgrp, "max_size", b->max_size);
    i = tbx_inip_get_integer(cfd, capgrp, "ref_count", b->ref_count);
    tbx_atomic_set(b->ref_count, 0);
    tbx_atomic_set(b->initial_ref_count, i);
    etext = tbx_inip_get_string(cfd, capgrp, "read_cap", "");
    ds_set_cap(b->ds, b->cap, DS_CAP_READ, tbx_stk_unescape_text('\\', etext));
    free(etext);
    etext = tbx_inip_get_string(cfd, capgrp, "write_cap", "");
    ds_set_cap(b->ds, b->cap, DS_CAP_WRITE, tbx_stk_unescape_text('\\', etext));
    free(etext);
    etext = tbx_inip_get_string(cfd, capgrp, "manage_cap", "");
    ds_set_cap(b->ds, b->cap, DS_CAP_MANAGE, tbx_stk_unescape_text('\\', etext));
    free(etext);

    //** Now cycle through any misc attributes set
    ele = tbx_inip_ele_first(tbx_inip_group_find(cfd, capgrp));
    while (ele != NULL) {
        key = tbx_inip_ele_get_key(ele);

        //** Ignore the builtin commands
        if ((strcmp("rid_key", key) != 0) && (strcmp("size", key) != 0) && (strcmp("max_size", key) != 0) && (strcmp("type", key) != 0) &&
                (strcmp("ref_count", key) != 0) && (strcmp("read_cap", key) != 0) && (strcmp("write_cap", key) != 0) && (strcmp("manage_cap", key) != 0)) {
            tbx_type_malloc(attr, lio_data_block_attr_t, 1);
            attr->key = tbx_stk_unescape_text('\\', tbx_inip_ele_get_key(ele));
            attr->value = tbx_stk_unescape_text('\\', tbx_inip_ele_get_value(ele));
            if (b->attr_stack == NULL) b->attr_stack = tbx_stack_new();
            tbx_stack_push(b->attr_stack, attr);
        }

        ele = tbx_inip_ele_next(ele);
    }

    return(b);
}