Esempio n. 1
0
void gop_tp_context_destroy(gop_thread_pool_context_t *tpc)
{
    int i;
    log_printf(15, "gop_tp_context_destroy: Shutting down! count=%d\n", _tp_context_count);

    log_printf(15, "tpc->name=%s  high=%zu idle=%zu\n", tpc->name, apr_thread_pool_threads_high_count(tpc->tp),  apr_thread_pool_threads_idle_timeout_count(tpc->tp));
    gop_hp_context_destroy(tpc->pc);

    apr_thread_pool_destroy(tpc->tp);

    if (tbx_atomic_dec(_tp_context_count) == 0) {
        if (_tp_stats > 0) thread_pool_stats_print();
        apr_thread_mutex_destroy(_tp_lock);
        apr_pool_destroy(_tp_pool);
    }

    if (tpc->name != NULL) free(tpc->name);

    for (i=0; i<tpc->recursion_depth; i++) {
        tbx_stack_free(tpc->reserve_stack[i], 0);
    }
    free(tpc->reserve_stack);
    free(tpc->overflow_running_depth);

    free(tpc);
}
Esempio n. 2
0
void modify_hpc_thread_count(portal_context_t *hpc, int n)
{
//  apr_thread_mutex_lock(hpc->lock);
//  hpc->running_threads = hpc->running_threads + n;
//  apr_thread_mutex_unlock(hpc->lock);

    if (n == -1) {
        tbx_atomic_dec(hpc->running_threads);
    } else if (n == 1) {
        tbx_atomic_inc(hpc->running_threads);
    } else {
        assert((n == 1) || (n== -1));
    }

}
Esempio n. 3
0
void _tp_submit_op(void *arg, gop_op_generic_t *gop)
{
    gop_thread_pool_op_t *op = gop_get_tp(gop);
    apr_status_t aerr;
    int running;

    log_printf(15, "_tp_submit_op: gid=%d\n", gop_id(gop));

    tbx_atomic_inc(op->tpc->n_submitted);
    op->via_submit = 1;
    running = tbx_atomic_inc(op->tpc->n_running) + 1;

    if (running > op->tpc->max_concurrency) {
        apr_thread_mutex_lock(_tp_lock);
        tbx_atomic_inc(op->tpc->n_overflow);
        if (op->depth >= op->tpc->recursion_depth) {  //** Check if we hit the max recursion
            log_printf(0, "GOP has a recursion depth >= max specified in the TP!!!! gop depth=%d  TPC max=%d\n", op->depth, op->tpc->recursion_depth);
            tbx_stack_push(op->tpc->reserve_stack[op->tpc->recursion_depth-1], gop);  //** Need to do the push and overflow check
        } else {
            tbx_stack_push(op->tpc->reserve_stack[op->depth], gop);  //** Need to do the push and overflow check
        }
        gop = _tpc_overflow_next(op->tpc);             //** along with the submit or rollback atomically

        if (gop) {
            op = gop_get_tp(gop);
            aerr = apr_thread_pool_push(op->tpc->tp,(void *(*)(apr_thread_t *, void *))thread_pool_exec_fn, gop, APR_THREAD_TASK_PRIORITY_NORMAL, NULL);
        } else {
            tbx_atomic_dec(op->tpc->n_running);  //** We didn't actually submit anything
            if (op->overflow_slot != -1) {   //** Check if we need to undo our overflow slot
                op->tpc->overflow_running_depth[op->overflow_slot] = -1;
            }

            aerr = APR_SUCCESS;
        }
        apr_thread_mutex_unlock(_tp_lock);
    } else {
        aerr = apr_thread_pool_push(op->tpc->tp, (void *(*)(apr_thread_t *, void *))thread_pool_exec_fn, gop, APR_THREAD_TASK_PRIORITY_NORMAL, NULL);
    }

    if (aerr != APR_SUCCESS) {
        log_printf(0, "ERROR submiting task!  aerr=%d gid=%d\n", aerr, gop_id(gop));
    }
}