/* * Assign a new request to a free thread. * * If there isn't a free thread, then try to create a new one, * up to the configured limits. */ int thread_pool_addrequest(REQUEST *request, RAD_REQUEST_FUNP fun) { time_t now = request->timestamp; request->process = fun; /* * We've been told not to spawn threads, so don't. */ if (!thread_pool.spawn_flag) { radius_handle_request(request, fun); #ifdef WNOHANG /* * Requests that care about child process exit * codes have already either called * rad_waitpid(), or they've given up. */ wait(NULL); #endif return 1; } /* * Add the new request to the queue. */ if (!request_enqueue(request, fun)) return 0; /* * If we haven't checked the number of child threads * in a while, OR if the thread pool appears to be full, * go manage it. */ if ((last_cleaned < now) || (thread_pool.active_threads == thread_pool.total_threads)) { thread_pool_manage(now); } return 1; }
/* * Add a request to the list of waiting requests. * This function gets called ONLY from the main handler thread... * * This function should never fail. */ int request_enqueue(REQUEST *request) { /* * If we haven't checked the number of child threads * in a while, OR if the thread pool appears to be full, * go manage it. */ if ((last_cleaned < request->timestamp) || (thread_pool.active_threads == thread_pool.total_threads)) { thread_pool_manage(request->timestamp); } pthread_mutex_lock(&thread_pool.queue_mutex); #ifdef WITH_STATS #ifdef WITH_ACCOUNTING if (thread_pool.auto_limit_acct) { struct timeval now; /* * Throw away accounting requests if we're too busy. */ if ((request->packet->code == PW_ACCOUNTING_REQUEST) && (fr_fifo_num_elements(thread_pool.fifo[RAD_LISTEN_ACCT]) > 0) && (thread_pool.num_queued > (thread_pool.max_queue_size / 2)) && (thread_pool.pps_in.pps_now > thread_pool.pps_out.pps_now)) { pthread_mutex_unlock(&thread_pool.queue_mutex); return 0; } gettimeofday(&now, NULL); thread_pool.pps_in.pps = rad_pps(&thread_pool.pps_in.pps_old, &thread_pool.pps_in.pps_now, &thread_pool.pps_in.time_old, &now); thread_pool.pps_in.pps_now++; } #endif /* WITH_ACCOUNTING */ #endif thread_pool.request_count++; if (thread_pool.num_queued >= thread_pool.max_queue_size) { int complain = FALSE; time_t now; static time_t last_complained = 0; now = time(NULL); if (last_complained != now) { last_complained = now; complain = TRUE; } pthread_mutex_unlock(&thread_pool.queue_mutex); /* * Mark the request as done. */ if (complain) { radlog(L_ERR, "Something is blocking the server. There are %d packets in the queue, waiting to be processed. Ignoring the new request.", thread_pool.max_queue_size); } return 0; } request->component = "<core>"; request->module = "<queue>"; /* * Push the request onto the appropriate fifo for that */ if (!fr_fifo_push(thread_pool.fifo[request->priority], request)) { pthread_mutex_unlock(&thread_pool.queue_mutex); radlog(L_ERR, "!!! ERROR !!! Failed inserting request %d into the queue", request->number); return 0; } thread_pool.num_queued++; pthread_mutex_unlock(&thread_pool.queue_mutex); /* * There's one more request in the queue. * * Note that we're not touching the queue any more, so * the semaphore post is outside of the mutex. This also * means that when the thread wakes up and tries to lock * the mutex, it will be unlocked, and there won't be * contention. */ sem_post(&thread_pool.semaphore); return 1; }
/* * Add a request to the list of waiting requests. * This function gets called ONLY from the main handler thread... * * This function should never fail. */ static int request_enqueue(REQUEST *request, RAD_REQUEST_FUNP fun) { request_queue_t *entry; /* * If we haven't checked the number of child threads * in a while, OR if the thread pool appears to be full, * go manage it. */ if ((last_cleaned < request->timestamp) || (thread_pool.active_threads == thread_pool.total_threads) || (thread_pool.exited_threads > 0)) { thread_pool_manage(request->timestamp); } pthread_mutex_lock(&thread_pool.queue_mutex); thread_pool.request_count++; if (thread_pool.num_queued >= thread_pool.max_queue_size) { int complain = FALSE; time_t now; static time_t last_complained = 0; now = time(NULL); if (last_complained != now) { last_complained = now; complain = TRUE; } pthread_mutex_unlock(&thread_pool.queue_mutex); /* * Mark the request as done. */ if (complain) { radlog(L_ERR, "Something is blocking the server. There are %d packets in the queue, waiting to be processed. Ignoring the new request.", thread_pool.max_queue_size); } request->child_state = REQUEST_DONE; return 0; } request->child_state = REQUEST_QUEUED; request->component = "<core>"; request->module = "<queue>"; entry = rad_malloc(sizeof(*entry)); entry->request = request; entry->fun = fun; /* * Push the request onto the appropriate fifo for that */ if (!fr_fifo_push(thread_pool.fifo[request->priority], entry)) { pthread_mutex_unlock(&thread_pool.queue_mutex); radlog(L_ERR, "!!! ERROR !!! Failed inserting request %d into the queue", request->number); request->child_state = REQUEST_DONE; return 0; } thread_pool.num_queued++; pthread_mutex_unlock(&thread_pool.queue_mutex); /* * There's one more request in the queue. * * Note that we're not touching the queue any more, so * the semaphore post is outside of the mutex. This also * means that when the thread wakes up and tries to lock * the mutex, it will be unlocked, and there won't be * contention. */ sem_post(&thread_pool.semaphore); return 1; }
/* * Add a request to the list of waiting requests. * This function gets called ONLY from the main handler thread... * * This function should never fail. */ int request_enqueue(REQUEST *request) { /* * If we haven't checked the number of child threads * in a while, OR if the thread pool appears to be full, * go manage it. */ if ((last_cleaned < request->timestamp) || (thread_pool.active_threads == thread_pool.total_threads) || (thread_pool.exited_threads > 0)) { thread_pool_manage(request->timestamp); } pthread_mutex_lock(&thread_pool.queue_mutex); #ifdef WITH_STATS #ifdef WITH_ACCOUNTING if (thread_pool.auto_limit_acct) { struct timeval now; /* * Throw away accounting requests if we're too * busy. The NAS should retransmit these, and no * one should notice. * * In contrast, we always try to process * authentication requests. Those are more time * critical, and it's harder to determine which * we can throw away, and which we can keep. * * We allow the queue to get half full before we * start worrying. Even then, we still require * that the rate of input packets is higher than * the rate of outgoing packets. i.e. the queue * is growing. * * Once that happens, we roll a dice to see where * the barrier is for "keep" versus "toss". If * the queue is smaller than the barrier, we * allow it. If the queue is larger than the * barrier, we throw the packet away. Otherwise, * we keep it. * * i.e. the probability of throwing the packet * away increases from 0 (queue is half full), to * 100 percent (queue is completely full). * * A probabilistic approach allows us to process * SOME of the new accounting packets. */ if ((request->packet->code == PW_CODE_ACCOUNTING_REQUEST) && (thread_pool.num_queued > (thread_pool.max_queue_size / 2)) && (thread_pool.pps_in.pps_now > thread_pool.pps_out.pps_now)) { uint32_t prob; int keep; /* * Take a random value of how full we * want the queue to be. It's OK to be * half full, but we get excited over * anything more than that. */ keep = (thread_pool.max_queue_size / 2); prob = fr_rand() & ((1 << 10) - 1); keep *= prob; keep >>= 10; keep += (thread_pool.max_queue_size / 2); /* * If the queue is larger than our dice * roll, we throw the packet away. */ if (thread_pool.num_queued > keep) { pthread_mutex_unlock(&thread_pool.queue_mutex); return 0; } } gettimeofday(&now, NULL); /* * Calculate the instantaneous arrival rate into * the queue. */ thread_pool.pps_in.pps = rad_pps(&thread_pool.pps_in.pps_old, &thread_pool.pps_in.pps_now, &thread_pool.pps_in.time_old, &now); thread_pool.pps_in.pps_now++; }
/* * Add a request to the list of waiting requests. * This function gets called ONLY from the main handler thread... * * This function should never fail. */ int request_enqueue(REQUEST *request) { static int last_complained = 0; almost_now = request->timestamp; /* * If we haven't checked the number of child threads * in a while, OR if the thread pool appears to be full, * go manage it. */ if ((last_cleaned < request->timestamp) || (thread_pool.active_threads == thread_pool.total_threads)) { thread_pool_manage(request->timestamp); } pthread_mutex_lock(&thread_pool.queue_mutex); thread_pool.request_count++; if ((thread_pool.num_queued >= thread_pool.max_queue_size) && (last_complained != almost_now)) { last_complained = almost_now; pthread_mutex_unlock(&thread_pool.queue_mutex); /* * Mark the request as done. */ radlog(L_ERR, "Something is blocking the server. There are %d packets in the queue, waiting to be processed. Ignoring the new request.", thread_pool.max_queue_size); return 0; } request->component = "<core>"; request->module = "<queue>"; /* * Push the request onto the appropriate fifo for that */ if (!fr_fifo_push(thread_pool.fifo[request->priority], request)) { pthread_mutex_unlock(&thread_pool.queue_mutex); radlog(L_ERR, "!!! ERROR !!! Failed inserting request %d into the queue", request->number); return 0; } thread_pool.num_queued++; pthread_mutex_unlock(&thread_pool.queue_mutex); /* * There's one more request in the queue. * * Note that we're not touching the queue any more, so * the semaphore post is outside of the mutex. This also * means that when the thread wakes up and tries to lock * the mutex, it will be unlocked, and there won't be * contention. */ sem_post(&thread_pool.semaphore); return 1; }