void _tp_submit_op(void *arg, gop_op_generic_t *gop) { gop_thread_pool_op_t *op = gop_get_tp(gop); apr_status_t aerr; int running; log_printf(15, "_tp_submit_op: gid=%d\n", gop_id(gop)); tbx_atomic_inc(op->tpc->n_submitted); op->via_submit = 1; running = tbx_atomic_inc(op->tpc->n_running) + 1; if (running > op->tpc->max_concurrency) { apr_thread_mutex_lock(_tp_lock); tbx_atomic_inc(op->tpc->n_overflow); if (op->depth >= op->tpc->recursion_depth) { //** Check if we hit the max recursion log_printf(0, "GOP has a recursion depth >= max specified in the TP!!!! gop depth=%d TPC max=%d\n", op->depth, op->tpc->recursion_depth); tbx_stack_push(op->tpc->reserve_stack[op->tpc->recursion_depth-1], gop); //** Need to do the push and overflow check } else { tbx_stack_push(op->tpc->reserve_stack[op->depth], gop); //** Need to do the push and overflow check } gop = _tpc_overflow_next(op->tpc); //** along with the submit or rollback atomically if (gop) { op = gop_get_tp(gop); aerr = apr_thread_pool_push(op->tpc->tp,(void *(*)(apr_thread_t *, void *))thread_pool_exec_fn, gop, APR_THREAD_TASK_PRIORITY_NORMAL, NULL); } else { tbx_atomic_dec(op->tpc->n_running); //** We didn't actually submit anything if (op->overflow_slot != -1) { //** Check if we need to undo our overflow slot op->tpc->overflow_running_depth[op->overflow_slot] = -1; } aerr = APR_SUCCESS; } apr_thread_mutex_unlock(_tp_lock); } else { aerr = apr_thread_pool_push(op->tpc->tp, (void *(*)(apr_thread_t *, void *))thread_pool_exec_fn, gop, APR_THREAD_TASK_PRIORITY_NORMAL, NULL); } if (aerr != APR_SUCCESS) { log_printf(0, "ERROR submiting task! aerr=%d gid=%d\n", aerr, gop_id(gop)); } }
ibp_op_t *new_ibp_op(ibp_context_t *ic) { ibp_op_t *op; //** Make the struct and clear it tbx_type_malloc(op, ibp_op_t, 1); tbx_atomic_inc(ic->n_ops); ibp_op_init(ic, op); return(op); }
gop_thread_pool_context_t *gop_tp_context_create(char *tp_name, int min_threads, int max_threads, int max_recursion_depth) { // char buffer[1024]; gop_thread_pool_context_t *tpc; apr_interval_time_t dt; int i; log_printf(15, "count=%d\n", _tp_context_count); tbx_type_malloc_clear(tpc, gop_thread_pool_context_t, 1); if (tbx_atomic_inc(_tp_context_count) == 0) { apr_pool_create(&_tp_pool, NULL); apr_thread_mutex_create(&_tp_lock, APR_THREAD_MUTEX_DEFAULT, _tp_pool); thread_pool_stats_init(); } if (thread_local_depth_key == NULL) apr_threadkey_private_create(&thread_local_depth_key,_thread_pool_destructor, _tp_pool); tpc->pc = gop_hp_context_create(&_tp_base_portal); //** Really just used for the submit default_thread_pool_config(tpc); if (min_threads > 0) tpc->min_threads = min_threads; if (max_threads > 0) tpc->max_threads = max_threads + 1; //** Add one for the recursion depth starting offset being 1 tpc->recursion_depth = max_recursion_depth + 1; //** The min recusion normally starts at 1 so just slap an extra level and we don't care about 0|1 starting location tpc->max_concurrency = tpc->max_threads - tpc->recursion_depth; if (tpc->max_concurrency <= 0) { tpc->max_threads += 5 - tpc->max_concurrency; //** MAke sure we have at least 5 threads for work tpc->max_concurrency = tpc->max_threads - tpc->recursion_depth; log_printf(0, "Specified max threads and recursion depth don't work. Adjusting max_threads=%d\n", tpc->max_threads); } dt = tpc->min_idle * 1000000; assert_result(apr_thread_pool_create(&(tpc->tp), tpc->min_threads, tpc->max_threads, _tp_pool), APR_SUCCESS); apr_thread_pool_idle_wait_set(tpc->tp, dt); apr_thread_pool_threshold_set(tpc->tp, 0); tpc->name = (tp_name == NULL) ? NULL : strdup(tp_name); tbx_atomic_set(tpc->n_ops, 0); tbx_atomic_set(tpc->n_completed, 0); tbx_atomic_set(tpc->n_started, 0); tbx_atomic_set(tpc->n_submitted, 0); tbx_atomic_set(tpc->n_running, 0); tbx_type_malloc(tpc->overflow_running_depth, int, tpc->recursion_depth); tbx_type_malloc(tpc->reserve_stack, tbx_stack_t *, tpc->recursion_depth); for (i=0; i<tpc->recursion_depth; i++) { tpc->overflow_running_depth[i] = -1; tpc->reserve_stack[i] = tbx_stack_new(); } return(tpc); }
int thread_pool_direct(gop_thread_pool_context_t *tpc, apr_thread_start_t fn, void *arg) { int err = apr_thread_pool_push(tpc->tp, fn, arg, APR_THREAD_TASK_PRIORITY_NORMAL, NULL); tbx_atomic_inc(tpc->n_direct); log_printf(10, "tpd=%d\n", tbx_atomic_get(tpc->n_direct)); if (err != APR_SUCCESS) { log_printf(0, "ERROR submiting task! err=%d\n", err); } return((err == APR_SUCCESS) ? 0 : 1); }
void modify_hpc_thread_count(portal_context_t *hpc, int n) { // apr_thread_mutex_lock(hpc->lock); // hpc->running_threads = hpc->running_threads + n; // apr_thread_mutex_unlock(hpc->lock); if (n == -1) { tbx_atomic_dec(hpc->running_threads); } else if (n == 1) { tbx_atomic_inc(hpc->running_threads); } else { assert((n == 1) || (n== -1)); } }
void ibppc_form_host(ibp_context_t *ic, char *hoststr, int n_host, char *host, rid_t rid) { int i, j, n; char rr[16]; // log_printf(15, "HOST host=%s rid=%s cmode=%d\n", host, rid.name, ic->connection_mode); //** Everybody gets the host copied for (i=0 ; (i<n_host) && (host[i] != '\0') ; i++) hoststr[i] = host[i]; if (i>=n_host-2) { //** Space isn't big enough so truncate and return if (i<n_host) hoststr[i] = '\0'; return; } //** If we make it here we have enough space for at least "# ? NULL" switch (ic->connection_mode) { case IBP_CMODE_RID: //** Add the "#RID" hoststr[i] = '#'; i++; for (j=0; (i<n_host) && (rid.name[j] != '\0'); i++, j++) hoststr[i] = rid.name[j]; break; case IBP_CMODE_ROUND_ROBIN: hoststr[i] = '#'; i++; n = tbx_atomic_inc(ic->rr_count); n = n % ic->rr_size; snprintf(rr, sizeof(rr), "%d", n); //log_printf(0, "HOST rr=%s i=%d\n", rr, i); // for (j=0; (i<n_host) && (rr[j] != '\0'); i++, j++) { printf("[%c,%d]", rr[j], j); hoststr[i] = rr[j]; } for (j=0; (i<n_host) && (rr[j] != '\0'); i++, j++) { hoststr[i] = rr[j]; } //printf("\n i=%d\n", i); break; } if (i<n_host) { hoststr[i] = '\0'; } else { hoststr[n_host-1] = '\0'; } // log_printf(15, "HOST hoststr=%s host=%s rid=%s cmode=%d\n", hoststr, host, rid.name, ic->connection_mode); return; }
int parse_cap(ibp_context_t *ic, ibp_cap_t *cap, char *host, int *port, char *key, char *typekey) { char *bstate; int finished = 0; int i, j, n, m; char rr[16]; host[MAX_HOST_SIZE-1] = '\0'; host[0] = '\0'; key[MAX_KEY_SIZE-1] = '\0'; key[0] = '\0'; typekey[MAX_KEY_SIZE-1] = '\0'; typekey[0] = '\0'; *port = -1; if (cap == NULL) return(1); char *temp = strdup(cap); char *ptr; tbx_stk_string_token(temp, "/", &bstate, &finished); //** gets the ibp:/ //log_printf(15, "1 ptr=%s\n", ptr); ptr = tbx_stk_string_token(NULL, ":", &bstate, &finished); //** This should be the hostname //log_printf(15, "2 ptr=%s\n", ptr); ptr = &(ptr[1]); //** Skip the extra "/" //log_printf(15, "3 ptr=%s\n", ptr); sscanf(tbx_stk_string_token(NULL, "/", &bstate, &finished), "%d", port); strncpy(host, ptr, MAX_HOST_SIZE-1); //** This should be the host name //log_printf(15, "ptr=%s host=%s ccmode=%d\n", ptr, host, ic->connection_mode); strncpy(key, tbx_stk_string_token(NULL, "/", &bstate, &finished), 255); strncpy(typekey, tbx_stk_string_token(NULL, "/", &bstate, &finished), 255); switch (ic->connection_mode) { case IBP_CMODE_RID: n = strlen(host); host[n] = '#'; n++; m = strlen(key); m = ((m+n) > MAX_HOST_SIZE-1) ? MAX_HOST_SIZE-1 - n : m; for (i=0; i<m; i++) { if (key[i] == '#') { host[i+n] = 0; break; } host[i+n] = key[i]; } break; case IBP_CMODE_ROUND_ROBIN: n = tbx_atomic_inc(ic->rr_count); n = n % ic->rr_size; snprintf(rr, sizeof(rr), "%d", n); n = strlen(host); host[n] = '#'; n++; for (j=0, i=n; (i<MAX_HOST_SIZE) && (rr[j] != '\0'); i++, j++) host[i] = rr[j]; if (i<1024) { host[i] = '\0'; } else { host[MAX_HOST_SIZE-1] = '\0'; } } free(temp); log_printf(14, "parse_cap: CAP=%s * parsed=[%s]:%d/%s/%s\n", cap, host, *port, key, typekey); if (finished == 1) log_printf(0, "parse_cap: Error parsing cap %s\n", cap); return(finished); }