Ejemplo n.º 1
0
/**
 * test1	Allocate a dcb and do lots of other things
 *
  */
static int
test1()
{
DCB   *dcb, *extra, *clone;
int     size = 100;
int     bite1 = 35;
int     bite2 = 60;
int     bite3 = 10;
int     buflen;

        /* Single buffer tests */
        ss_dfprintf(stderr,
                    "testdcb : creating buffer with type DCB_ROLE_SERVICE_LISTENER"); 
        dcb = dcb_alloc(DCB_ROLE_SERVICE_LISTENER);
        printDCB(dcb);
        ss_info_dassert(dcb_isvalid(dcb), "New DCB must be valid");
        ss_dfprintf(stderr, "\t..done\nAllocated dcb.");
        clone = dcb_clone(dcb);
        ss_dfprintf(stderr, "\t..done\nCloned dcb");
        printAllDCBs();
        ss_info_dassert(true, "Something is true");
        ss_dfprintf(stderr, "\t..done\n");
        dcb_close(dcb);
        ss_dfprintf(stderr, "Freed original dcb");
        ss_info_dassert(!dcb_isvalid(dcb), "Freed DCB must not be valid");
        ss_dfprintf(stderr, "\t..done\nMake clone DCB a zombie");
        clone->state = DCB_STATE_NOPOLLING;
        dcb_close(clone);
        ss_info_dassert(dcb_get_zombies() == clone, "Clone DCB must be start of zombie list now");
        ss_dfprintf(stderr, "\t..done\nProcess the zombies list");
        dcb_process_zombies(0);
        ss_dfprintf(stderr, "\t..done\nCheck clone no longer valid");
        ss_info_dassert(!dcb_isvalid(clone), "After zombie processing, clone DCB must not be valid");
        ss_dfprintf(stderr, "\t..done\n");
		
	return 0;
}
Ejemplo n.º 2
0
/**
 * The main polling loop
 *
 * This routine does the polling and despatches of IO events
 * to the DCB's. It may be called either directly or as the entry point
 * of a polling thread within the gateway.
 *
 * The routine will loop as long as the variable "shutdown" is set to zero,
 * setting this to a non-zero value will cause the polling loop to return.
 *
 * There are two options for the polling, a debug option that is only useful if
 * you have a single thread. This blocks in epoll_wait until an event occurs.
 *
 * The non-debug option does an epoll with a time out. This allows the checking of
 * shutdown value to be checked in all threads. The algorithm for polling in this
 * mode is to do a poll with no-wait, if no events are detected then the poll is
 * repeated with a time out. This allows for a quick check before making the call 
 * with timeout. The call with the timeout differs in that the Linux scheduler may
 * deschedule a process if a timeout is included, but will not do this if a 0 timeout
 * value is given. this improves performance when the gateway is under heavy load.
 *
 * In order to provide a fairer means of sharing the threads between the different
 * DCB's the poll mechanism has been decoupled from the processing of the events.
 * The events are now recieved via the epoll_wait call, a queue of DCB's that have
 * events pending is maintained and as new events arrive the DCB is added to the end
 * of this queue. If an eent arrives for a DCB alreayd in the queue, then the event
 * bits are added to the DCB but the DCB mantains the same point in the queue unless
 * the original events are already being processed. If they are being processed then
 * the DCB is moved to the back of the queue, this means that a DCB that is receiving
 * events at a high rate will not block the execution of events for other DCB's and
 * should result in a fairer polling strategy.
 *
 * The introduction of the ability to inject "fake" write events into the event queue meant
 * that there was a possibility to "starve" new events sicne the polling loop would
 * consume the event queue before looking for new events. If the DCB that inject
 * the fake event then injected another fake event as a result of the first it meant
 * that new events did not get added to the queue. The strategy has been updated to
 * not consume the entire event queue, but process one event before doing a non-blocking
 * call to add any new events before processing any more events. A blocking call to
 * collect events is only made if there are no pending events to be processed on the
 * event queue.
 *
 * Also introduced a "timeout bias" mechanism. This mechansim control the length of
 * of timeout passed to epoll_wait in blocking calls based on previous behaviour.
 * The initial call will block for 10% of the define timeout peroid, this will be
 * increased in increments of 10% until the full timeout value is used. If at any
 * point there is an event to be processed then the value will be reduced to 10% again
 * for the next blocking call.
 *
 * @param arg	The thread ID passed as a void * to satisfy the threading package
 */
void
poll_waitevents(void *arg)
{
struct epoll_event events[MAX_EVENTS];
int		   i, nfds, timeout_bias = 1;
int		   thread_id = (int)arg;
DCB                *zombies = NULL;
int		   poll_spins = 0;

	/** Add this thread to the bitmask of running polling threads */
	bitmask_set(&poll_mask, thread_id);
	if (thread_data)
	{
		thread_data[thread_id].state = THREAD_IDLE;
	}

	/** Init mysql thread context for use with a mysql handle and a parser */
	mysql_thread_init();
	
	while (1)
	{
		if (pollStats.evq_pending == 0 && timeout_bias < 10)
		{
			timeout_bias++;
		}

		atomic_add(&n_waiting, 1);
#if BLOCKINGPOLL
		nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
		atomic_add(&n_waiting, -1);
#else /* BLOCKINGPOLL */
#if MUTEX_EPOLL
                simple_mutex_lock(&epoll_wait_mutex, TRUE);
#endif
		if (thread_data)
		{
			thread_data[thread_id].state = THREAD_POLLING;
		}
                
		atomic_add(&pollStats.n_polls, 1);
		if ((nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, 0)) == -1)
		{
			atomic_add(&n_waiting, -1);
                        int eno = errno;
                        errno = 0;
                        LOGIF(LD, (skygw_log_write(
                                LOGFILE_DEBUG,
                                "%lu [poll_waitevents] epoll_wait returned "
                                "%d, errno %d",
                                pthread_self(),
                                nfds,
                                eno)));
			atomic_add(&n_waiting, -1);
		}
		/*
		 * If there are no new descriptors from the non-blocking call
		 * and nothing to process on the event queue then for do a
		 * blocking call to epoll_wait.
		 *
		 * We calculate a timeout bias to alter the length of the blocking
		 * call based on the time since we last received an event to process
		 */
		else if (nfds == 0 && pollStats.evq_pending == 0 && poll_spins++ > number_poll_spins)
		{
			atomic_add(&pollStats.blockingpolls, 1);
			nfds = epoll_wait(epoll_fd,
                                                  events,
                                                  MAX_EVENTS,
                                                  (max_poll_sleep * timeout_bias) / 10);
			if (nfds == 0 && pollStats.evq_pending)
			{
				atomic_add(&pollStats.wake_evqpending, 1);
				poll_spins = 0;
			}
		}
		else
		{
			atomic_add(&n_waiting, -1);
		}

		if (n_waiting == 0)
			atomic_add(&pollStats.n_nothreads, 1);
#if MUTEX_EPOLL
                simple_mutex_unlock(&epoll_wait_mutex);
#endif
#endif /* BLOCKINGPOLL */
		if (nfds > 0)
		{
			timeout_bias = 1;
			if (poll_spins <= number_poll_spins + 1)
				atomic_add(&pollStats.n_nbpollev, 1);
			poll_spins = 0;
                        LOGIF(LD, (skygw_log_write(
                                LOGFILE_DEBUG,
                                "%lu [poll_waitevents] epoll_wait found %d fds",
                                pthread_self(),
                                nfds)));
			atomic_add(&pollStats.n_pollev, 1);
			if (thread_data)
			{
				thread_data[thread_id].n_fds = nfds;
				thread_data[thread_id].cur_dcb = NULL;
				thread_data[thread_id].event = 0;
				thread_data[thread_id].state = THREAD_PROCESSING;
			}

			pollStats.n_fds[(nfds < MAXNFDS ? (nfds - 1) : MAXNFDS - 1)]++;

			load_average = (load_average * load_samples + nfds)
						/ (load_samples + 1);
			atomic_add(&load_samples, 1);
			atomic_add(&load_nfds, nfds);

			/*
			 * Process every DCB that has a new event and add
			 * it to the poll queue.
			 * If the DCB is currently being processed then we
			 * or in the new eent bits to the pending event bits
			 * and leave it in the queue.
			 * If the DCB was not already in the queue then it was
			 * idle and is added to the queue to process after
			 * setting the event bits.
			 */
			for (i = 0; i < nfds; i++)
			{
				DCB 	*dcb = (DCB *)events[i].data.ptr;
				__uint32_t	ev = events[i].events;

				spinlock_acquire(&pollqlock);
				if (DCB_POLL_BUSY(dcb))
				{
					if (dcb->evq.pending_events == 0)
					{
						pollStats.evq_pending++;
						dcb->evq.inserted = hkheartbeat;
					}
					dcb->evq.pending_events |= ev;
				}
				else
				{
					dcb->evq.pending_events = ev;
					if (eventq)
					{
						dcb->evq.prev = eventq->evq.prev;
						eventq->evq.prev->evq.next = dcb;
						eventq->evq.prev = dcb;
						dcb->evq.next = eventq;
					}
					else
					{
						eventq = dcb;
						dcb->evq.prev = dcb;
						dcb->evq.next = dcb;
					}
					pollStats.evq_length++;
					pollStats.evq_pending++;
					dcb->evq.inserted = hkheartbeat;
					if (pollStats.evq_length > pollStats.evq_max)
					{
						pollStats.evq_max = pollStats.evq_length;
					}
				}
				spinlock_release(&pollqlock);
			}
		}

		/*
		 * Process of the queue of waiting requests
		 * This is done without checking the evq_pending count as a
		 * precautionary measure to avoid issues if the house keeping
		 * of the count goes wrong.
		 */
		if (process_pollq(thread_id))
			timeout_bias = 1;

		if (thread_data)
			thread_data[thread_id].state = THREAD_ZPROCESSING;
		zombies = dcb_process_zombies(thread_id);
		if (thread_data)
			thread_data[thread_id].state = THREAD_IDLE;

		if (do_shutdown)
		{
                        /*<
                         * Remove the thread from the bitmask of running
                         * polling threads.
                         */
			if (thread_data)
			{
				thread_data[thread_id].state = THREAD_STOPPED;
			}
			bitmask_clear(&poll_mask, thread_id);
			/** Release mysql thread context */
			mysql_thread_end();
			return;
		}
		if (thread_data)
		{
			thread_data[thread_id].state = THREAD_IDLE;
		}
	} /*< while(1) */
}