Ejemplo n.º 1
0
void thread_pool_queue_stats(int array[RAD_LISTEN_MAX], int pps[2])
{
	int i;

#ifndef WITH_GCD
	if (pool_initialized) {
		struct timeval now;

		for (i = 0; i < RAD_LISTEN_MAX; i++) {
			array[i] = fr_fifo_num_elements(thread_pool.fifo[i]);
		}

		gettimeofday(&now, NULL);

		pps[0] = rad_pps(&thread_pool.pps_in.pps_old,
				 &thread_pool.pps_in.pps_now,
				 &thread_pool.pps_in.time_old,
				 &now);
		pps[1] = rad_pps(&thread_pool.pps_out.pps_old,
				 &thread_pool.pps_out.pps_now,
				 &thread_pool.pps_out.time_old,
				 &now);

	} else
#endif	/* WITH_GCD */
	{
		for (i = 0; i < RAD_LISTEN_MAX; i++) {
			array[i] = 0;
		}

		pps[0] = pps[1] = 0;
	}
}
Ejemplo n.º 2
0
/*
 *	Remove a request from the queue.
 */
static int request_dequeue(REQUEST **prequest)
{
	time_t blocked;
	static time_t last_complained = 0;
	RAD_LISTEN_TYPE i, start;
	REQUEST *request;
	reap_children();

	pthread_mutex_lock(&thread_pool.queue_mutex);

#ifdef WITH_STATS
#ifdef WITH_ACCOUNTING
	if (thread_pool.auto_limit_acct) {
		struct timeval now;

		gettimeofday(&now, NULL);
		
		thread_pool.pps_out.pps  = rad_pps(&thread_pool.pps_out.pps_old,
						   &thread_pool.pps_out.pps_now,
						   &thread_pool.pps_out.time_old,
						   &now);
		thread_pool.pps_out.pps_now++;
	}
#endif
#endif

	/*
	 *	Clear old requests from all queues.
	 *
	 *	We only do one pass over the queue, in order to
	 *	amortize the work across the child threads.  Since we
	 *	do N checks for one request de-queued, the old
	 *	requests will be quickly cleared.
	 */
	for (i = 0; i < RAD_LISTEN_MAX; i++) {
		request = fr_fifo_peek(thread_pool.fifo[i]);
		if (!request) continue;

		rad_assert(request->magic == REQUEST_MAGIC);

		if (request->master_state != REQUEST_STOP_PROCESSING) {
			continue;
		}

		/*
		 *	This entry was marked to be stopped.  Acknowledge it.
		 */
		request = fr_fifo_pop(thread_pool.fifo[i]);
		rad_assert(request != NULL);
		request->child_state = REQUEST_DONE;
		thread_pool.num_queued--;
	}

	start = 0;
 retry:
	/*
	 *	Pop results from the top of the queue
	 */
	for (i = start; i < RAD_LISTEN_MAX; i++) {
		request = fr_fifo_pop(thread_pool.fifo[i]);
		if (request) {
			start = i;
			break;
		}
	}

	if (!request) {
		pthread_mutex_unlock(&thread_pool.queue_mutex);
		*prequest = NULL;
		return 0;
	}

	rad_assert(thread_pool.num_queued > 0);
	thread_pool.num_queued--;
	*prequest = request;

	rad_assert(*prequest != NULL);
	rad_assert(request->magic == REQUEST_MAGIC);

	request->component = "<core>";
	request->module = "<thread>";

	/*
	 *	If the request has sat in the queue for too long,
	 *	kill it.
	 *
	 *	The main clean-up code can't delete the request from
	 *	the queue, and therefore won't clean it up until we
	 *	have acknowledged it as "done".
	 */
	if (request->master_state == REQUEST_STOP_PROCESSING) {
		request->module = "<done>";
		request->child_state = REQUEST_DONE;
		goto retry;
	}

	/*
	 *	The thread is currently processing a request.
	 */
	thread_pool.active_threads++;

	blocked = time(NULL);
	if ((blocked - request->timestamp) > 5) {
		if (last_complained < blocked) {
			last_complained = blocked;
			blocked -= request->timestamp;
		} else {
			blocked = 0;
		}
	} else {
		blocked = 0;
	}

	pthread_mutex_unlock(&thread_pool.queue_mutex);

	if (blocked) {
		radlog(L_ERR, "(%u) %s has been waiting in the processing queue for %d seconds.  Check that all databases are running properly!",
		       request->number, fr_packet_codes[request->packet->code], (int) blocked);
	}

	return 1;
}
Ejemplo n.º 3
0
/*
 *	Add a request to the list of waiting requests.
 *	This function gets called ONLY from the main handler thread...
 *
 *	This function should never fail.
 */
int request_enqueue(REQUEST *request)
{
	/*
	 *	If we haven't checked the number of child threads
	 *	in a while, OR if the thread pool appears to be full,
	 *	go manage it.
	 */
	if ((last_cleaned < request->timestamp) ||
	    (thread_pool.active_threads == thread_pool.total_threads)) {
		thread_pool_manage(request->timestamp);
	}


	pthread_mutex_lock(&thread_pool.queue_mutex);

#ifdef WITH_STATS
#ifdef WITH_ACCOUNTING
	if (thread_pool.auto_limit_acct) {
		struct timeval now;

		/*
		 *	Throw away accounting requests if we're too busy.
		 */
		if ((request->packet->code == PW_ACCOUNTING_REQUEST) &&
		    (fr_fifo_num_elements(thread_pool.fifo[RAD_LISTEN_ACCT]) > 0) && 
		    (thread_pool.num_queued > (thread_pool.max_queue_size / 2)) &&
		    (thread_pool.pps_in.pps_now > thread_pool.pps_out.pps_now)) {
			
			pthread_mutex_unlock(&thread_pool.queue_mutex);
			return 0;
		}

		gettimeofday(&now, NULL);
		
		thread_pool.pps_in.pps = rad_pps(&thread_pool.pps_in.pps_old,
						 &thread_pool.pps_in.pps_now,
						 &thread_pool.pps_in.time_old,
						 &now);
		
		thread_pool.pps_in.pps_now++;
	}
#endif	/* WITH_ACCOUNTING */
#endif

	thread_pool.request_count++;

	if (thread_pool.num_queued >= thread_pool.max_queue_size) {
		int complain = FALSE;
		time_t now;
		static time_t last_complained = 0;

		now = time(NULL);
		if (last_complained != now) {
			last_complained = now;
			complain = TRUE;
		}
		    
		pthread_mutex_unlock(&thread_pool.queue_mutex);

		/*
		 *	Mark the request as done.
		 */
		if (complain) {
			radlog(L_ERR, "Something is blocking the server.  There are %d packets in the queue, waiting to be processed.  Ignoring the new request.", thread_pool.max_queue_size);
		}
		return 0;
	}
	request->component = "<core>";
	request->module = "<queue>";

	/*
	 *	Push the request onto the appropriate fifo for that
	 */
	if (!fr_fifo_push(thread_pool.fifo[request->priority], request)) {
		pthread_mutex_unlock(&thread_pool.queue_mutex);
		radlog(L_ERR, "!!! ERROR !!! Failed inserting request %d into the queue", request->number);
		return 0;
	}

	thread_pool.num_queued++;

	pthread_mutex_unlock(&thread_pool.queue_mutex);

	/*
	 *	There's one more request in the queue.
	 *
	 *	Note that we're not touching the queue any more, so
	 *	the semaphore post is outside of the mutex.  This also
	 *	means that when the thread wakes up and tries to lock
	 *	the mutex, it will be unlocked, and there won't be
	 *	contention.
	 */
	sem_post(&thread_pool.semaphore);

	return 1;
}
Ejemplo n.º 4
0
/*
 *	Add a request to the list of waiting requests.
 *	This function gets called ONLY from the main handler thread...
 *
 *	This function should never fail.
 */
int request_enqueue(REQUEST *request)
{
	/*
	 *	If we haven't checked the number of child threads
	 *	in a while, OR if the thread pool appears to be full,
	 *	go manage it.
	 */
	if ((last_cleaned < request->timestamp) ||
	    (thread_pool.active_threads == thread_pool.total_threads) ||
	    (thread_pool.exited_threads > 0)) {
		thread_pool_manage(request->timestamp);
	}


	pthread_mutex_lock(&thread_pool.queue_mutex);

#ifdef WITH_STATS
#ifdef WITH_ACCOUNTING
	if (thread_pool.auto_limit_acct) {
		struct timeval now;

		/*
		 *	Throw away accounting requests if we're too
		 *	busy.  The NAS should retransmit these, and no
		 *	one should notice.
		 *
		 *	In contrast, we always try to process
		 *	authentication requests.  Those are more time
		 *	critical, and it's harder to determine which
		 *	we can throw away, and which we can keep.
		 *
		 *	We allow the queue to get half full before we
		 *	start worrying.  Even then, we still require
		 *	that the rate of input packets is higher than
		 *	the rate of outgoing packets.  i.e. the queue
		 *	is growing.
		 *
		 *	Once that happens, we roll a dice to see where
		 *	the barrier is for "keep" versus "toss".  If
		 *	the queue is smaller than the barrier, we
		 *	allow it.  If the queue is larger than the
		 *	barrier, we throw the packet away.  Otherwise,
		 *	we keep it.
		 *
		 *	i.e. the probability of throwing the packet
		 *	away increases from 0 (queue is half full), to
		 *	100 percent (queue is completely full).
		 *
		 *	A probabilistic approach allows us to process
		 *	SOME of the new accounting packets.
		 */
		if ((request->packet->code == PW_CODE_ACCOUNTING_REQUEST) &&
		    (thread_pool.num_queued > (thread_pool.max_queue_size / 2)) &&
		    (thread_pool.pps_in.pps_now > thread_pool.pps_out.pps_now)) {
			uint32_t prob;
			int keep;

			/*
			 *	Take a random value of how full we
			 *	want the queue to be.  It's OK to be
			 *	half full, but we get excited over
			 *	anything more than that.
			 */
			keep = (thread_pool.max_queue_size / 2);
			prob = fr_rand() & ((1 << 10) - 1);
			keep *= prob;
			keep >>= 10;
			keep += (thread_pool.max_queue_size / 2);

			/*
			 *	If the queue is larger than our dice
			 *	roll, we throw the packet away.
			 */
			if (thread_pool.num_queued > keep) {
				pthread_mutex_unlock(&thread_pool.queue_mutex);
				return 0;
			}
		}

		gettimeofday(&now, NULL);

		/*
		 *	Calculate the instantaneous arrival rate into
		 *	the queue.
		 */
		thread_pool.pps_in.pps = rad_pps(&thread_pool.pps_in.pps_old,
						 &thread_pool.pps_in.pps_now,
						 &thread_pool.pps_in.time_old,
						 &now);

		thread_pool.pps_in.pps_now++;
	}