Beispiel #1
0
/**
 * @brief Context switch to the highest priority task while saving off the 
 * current task state.
 *
 * This function needs to be externally synchronized.
 * We could be switching from the idle task.  The priority searcher has been tuned
 * to return IDLE_PRIO for a completely empty run_queue case.
 */
void dispatch_save(void)
{
	if(cur_tcb->cur_prio == HIGHEST_PRIO)
		return;

	tcb_t *dest, *temp;
	temp = cur_tcb;
	uint8_t hp = highest_prio();

	/* If the idle task is he only next high prio task
	   that means we are already running idle task */
	if(hp == IDLE_PRIO)
		dest = cur_tcb;
	else
		dest = runqueue_remove(hp);

	/* Set cur_tcb to the task that we are about to run */
	cur_tcb = dest;

	/* Set the cur_kstack var of the task that we are about to run */
	cur_kstack = (int)dest->kstack_high;
	
	/* Add the task that we just switched from back to the queue */
	runqueue_add(temp, temp->cur_prio);

	ctx_switch_full(&(dest->context), &(temp->context));
}
Beispiel #2
0
/**
 * @brief Context switch to the highest priority task while saving off the 
 * current task state.
 *
 * This function needs to beexternally synchronized.
 * We could be switching from the idle task.  The priority searcher has been tuned
 * to return IDLE_PRIO for a completely empty run_queue case.
 */
void dispatch_save(void)
{
	uint8_t next_prio;
	tcb_t *next_tcb, *saved_cur_tcb;

//	printf("inside dispatch save\n");

//	printf("added cur_tcb %u %p to run queue\n", cur_tcb->cur_prio, cur_tcb);
	next_prio = highest_prio();
	/*
	 * add the current task to the run queue...
	 */
	runqueue_add(cur_tcb, cur_tcb->cur_prio);
//	printf("next_prio is %u\n", next_prio);
		

	next_tcb = runqueue_remove(next_prio);
//	printf(" d save: removed next_tcb %u %p from run queue\n", next_tcb->cur_prio, next_tcb);
//	print_run_queue();
	saved_cur_tcb = cur_tcb;
	cur_tcb = next_tcb;
#if 0
	printf("before calling ctx sw full, cur->context is %p\n", &(saved_cur_tcb->context));
	printf("hexdump of cur->context is\n");
	hexdump(&saved_cur_tcb->context, 160);
	printf("before calling ctx sw full, next->context is %p\n", &(next_tcb->context));
	printf("hexdump of next->context is\n");
	hexdump(&next_tcb->context, 160);
#endif
//	disable_interrupts();
	ctx_switch_full((volatile void *)(&(next_tcb->context)),
					(volatile void *)(&(saved_cur_tcb->context)));
//	while(1);
}
Beispiel #3
0
/**
 * @brief Context switch to the highest priority task that is not this task -- 
 * and save the current task but don't mark is runnable.
 *
 * There is always an idle task to switch to.
 */
void dispatch_sleep(void)
{
	uint8_t next_prio;
	tcb_t *next_tcb, *saved_cur_tcb;

//	printf("inside dispatch sleep\n");
	next_prio = highest_prio();
//	printf("next_prio is %u\n", next_prio);
		
	next_tcb = runqueue_remove(next_prio);
//	printf("d sleep: removed next_tcb %u %p from run queue\n", next_tcb->cur_prio, next_tcb);
//	print_run_queue();
	saved_cur_tcb = cur_tcb;
	cur_tcb = next_tcb;
//	printf("before calling ctx sw full, next->context->sp is %p\n", next_tcb->context.sp);
//	printf("before calling ctx sw full, cur->context->sp is %p\n", saved_cur_tcb->context.sp);
//	printf("in dispatch sleep, sp is %u\n", get_kernel_sp());
//	hexdump(get_kernel_sp(), 200);
//	disable_interrupts();
//	printf("inside dispatch sleep hexdump of cur->context is\n");
//	hexdump(&saved_cur_tcb->context, 160);
	ctx_switch_full((volatile void *)(&(next_tcb->context)),
					(volatile void *)(&(saved_cur_tcb->context)));
//	while(1);
	
}
Beispiel #4
0
int mutex_lock_syscall(int mutex  __attribute__((unused)))
{
	//printf("mutex_lock, %d\n", mutex);
	// if mutex invalid
	if (mutex >= OS_NUM_MUTEX || gtMutex[mutex].bAvailable == 1) {
		return EINVAL;
	}
	
	if (gtMutex[mutex].pHolding_Tcb == get_cur_tcb())
		return EDEADLOCK;
	
	// if blocked
	if (gtMutex[mutex].bLock == 1) {
		tcb_t** tmp = &(gtMutex[mutex].pSleep_queue);
		while (1) {
			if (*tmp == (void *)0) {
				gtMutex[mutex].pSleep_queue = runqueue_remove(get_cur_prio());
				break;
			}
			if((*tmp)->sleep_queue == (void *)0) {
				(*tmp)->sleep_queue = runqueue_remove(get_cur_prio());
				((*tmp)->sleep_queue) = (void *)0;
				break;
			}
			tmp = (tcb_t **)&((*tmp)->sleep_queue);
		}
		//printf("blocked!!!!!!!!!\n");
		//printf("mutex holding tcb: %d\n", (gtMutex[mutex].pHolding_Tcb)->native_prio);
		dispatch_save();
	}

	// if not blocked		
	gtMutex[mutex].bLock = 1;
	gtMutex[mutex].pHolding_Tcb = get_cur_tcb();
	
	//printf("mutex holding tcb: %d\n", (gtMutex[mutex].pHolding_Tcb)->native_prio);
	//printf("mutex sleep queue\n");
	/*
	tcb_t* tmp = gtMutex[mutex].pSleep_queue;
	while (tmp != (void *)0) {
		printf("sleep %d\n", tmp->native_prio);
		tmp = tmp->sleep_queue;
	}
	*/
	return 0;

}
Beispiel #5
0
/**
 * @brief Context switch to the highest priority task that is not this task -- 
 * don't save the current task state.
 *
 * There is always an idle task to switch to.
 */
void dispatch_nosave(void)
{
	/* Get the highest priority */
	uint8_t prio = highest_prio();
	/* Get the corresponding task */
	tcb_t *tcb = runqueue_remove(prio);
	/* Call ctx_switch_half */
	cur_tcb = tcb;
	ctx_switch_half(&(cur_tcb -> context));
}
Beispiel #6
0
/**
 * @brief Context switch to the highest priority task that is not this task -- 
 * don't save the current task state.
 *
 * There is always an idle task to switch to.
 */
void dispatch_nosave(void)
{
  uint8_t prio = highest_prio();
  tcb_t *next_task = runqueue_remove(prio);

  //Enqueue current task
  runqueue_add(cur_tcb, cur_tcb->cur_prio);
  cur_tcb = next_task;
  ctx_switch_half(&(next_task->context));

}
Beispiel #7
0
/**
 * @brief Context switch to the highest priority task that is not this task -- 
 * and save the current task but don't mark is runnable.
 *
 * There is always an idle task to switch to.
 */
void dispatch_sleep(void)
{
	tcb_t *dest, *temp;
	temp = cur_tcb;
	uint8_t cp = get_cur_prio(); /* i.e task that was just put to sleep */
	uint8_t hp = highest_prio(); /* next task to run */

	/* Run idle task if there are no other */
	if(cp == hp)
		dest = runqueue_remove(IDLE_PRIO);
	else
		dest = runqueue_remove(hp);

	/* Set cur_tcb to the task that we are about to run */
	cur_tcb = dest;
	
	/* Set the cur_kstack var of the task that we are about to run */
	cur_kstack = (int)dest->kstack_high;
	
	ctx_switch_full(&(dest->context), &(temp->context));
}
Beispiel #8
0
/**
 * @brief Context switch to the highest priority task that is not this task -- 
 * and save the current task but don't mark is runnable.
 *
 * There is always an idle task to switch to.
 */
void dispatch_sleep(void)
{
  uint8_t prio = highest_prio();
  //printf("Highest prio is %d\n",prio);
  tcb_t *next_task = runqueue_remove(prio);
  
  tcb_t *tmp_task = cur_tcb;
  
  cur_tcb = next_task;
  
  ctx_switch_full(&(next_task->context), &(tmp_task->context));
	
}
Beispiel #9
0
/**
 * @brief Context switch to the highest priority task that is not this task -- 
 * and save the current task but don't mark is runnable.
 *
 * There is always an idle task to switch to.
 */
void dispatch_sleep(void)
{
	/* Similar implementation as dispatch_save but let the current task sleep, and don't 
	 * add it into the runqueue
	 */
	uint8_t prio = highest_prio();
	tcb_t *new_task;

	new_task = runqueue_remove(prio);

	sched_context_t *cur_ctx = &(cur_tcb -> context);
	cur_tcb = new_task;
	ctx_switch_full(&(new_task -> context), cur_ctx);

}
Beispiel #10
0
/**
 * @brief Context switch to the highest priority task that is not this task -- 
 * don't save the current task state.
 *
 * There is always an idle task to switch to.
 */
void dispatch_nosave(void)
{
	/*
	   This executes the first task
	   This ends up being the initial high prio task
	 */

	/* Set cur_tcb to the task that we are about to run */
	cur_tcb = runqueue_remove(highest_prio());

	/* Set the cur_kstack var of the task that we are about to run */
	cur_kstack = (int)cur_tcb->kstack_high;

	ctx_switch_half(&(cur_tcb->context));
}
Beispiel #11
0
/**
 * @brief Context switch to the highest priority task while saving off the 
 * current task state.
 *
 * This function needs to be externally synchronized.
 * We could be switching from the idle task.  The priority searcher has been tuned
 * to return IDLE_PRIO for a completely empty run_queue case.
 */
void dispatch_save(void)
{
	tcb_t *new_task;
	tcb_t *old_task = cur_tcb;
	uint8_t prio = highest_prio();
	/* If the current task is not the highest priority task */
	if (old_task -> cur_prio > prio) {

		new_task = runqueue_remove(prio);
		sched_context_t *old_ctx = &(old_task -> context);
		runqueue_add(old_task, old_task -> cur_prio);
		cur_tcb = new_task;
		/* !!! ctx_switch_full has not yet implemented */
		ctx_switch_full(&(new_task -> context), old_ctx);
	}
}
Beispiel #12
0
/**
 * @brief Context switch to the highest priority task that is not this task -- 
 * don't save the current task state.
 *
 * There is always an idle task to switch to.
 */
void dispatch_nosave(void)
{
	uint8_t next_prio;
	tcb_t *next_tcb;
//	printf("inside dispatch no save\n");
	next_prio = highest_prio();
//	printf("next_prio is %u\n", next_prio);
		
	/*
	 * manage the run queue...
	 */
	next_tcb = runqueue_remove(next_prio);
//	printf("d nosave: removed next_tcb %u %p from run queue\n", next_tcb->cur_prio, next_tcb);
//	print_run_queue();
	cur_tcb = next_tcb;
//	printf("before calling ctx sw half, context->sp is %p\n", next_tcb->context.sp);
	ctx_switch_half((volatile void *)(&(next_tcb->context)));
}
Beispiel #13
0
void async_loop() {

	if (uwsgi.async < 2) {
		uwsgi_log("the async loop engine requires async mode (--async <n>)\n");
		exit(1);
	}

	int interesting_fd, i;
	struct uwsgi_rb_timer *min_timeout;
	int timeout;
	int is_a_new_connection;
	int proto_parser_status;

	uint64_t now;

	struct uwsgi_async_request *current_request = NULL;

	void *events = event_queue_alloc(64);
	struct uwsgi_socket *uwsgi_sock;

	uwsgi.async_runqueue = NULL;

	uwsgi.wait_write_hook = async_wait_fd_write;
        uwsgi.wait_read_hook = async_wait_fd_read;

	if (uwsgi.signal_socket > -1) {
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.signal_socket);
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.my_signal_socket);
	}

	// set a default request manager
	if (!uwsgi.schedule_to_req)
		uwsgi.schedule_to_req = async_schedule_to_req;

	if (!uwsgi.schedule_to_main) {
		uwsgi_log("*** DANGER *** async mode without coroutine/greenthread engine loaded !!!\n");
	}

	while (uwsgi.workers[uwsgi.mywid].manage_next_request) {

		now = (uint64_t) uwsgi_now();
		if (uwsgi.async_runqueue) {
			timeout = 0;
		}
		else {
			min_timeout = uwsgi_min_rb_timer(uwsgi.rb_async_timeouts, NULL);
			if (min_timeout) {
				timeout = min_timeout->value - now;
				if (timeout <= 0) {
					async_expire_timeouts(now);
					timeout = 0;
				}
			}
			else {
				timeout = -1;
			}
		}

		uwsgi.async_nevents = event_queue_wait_multi(uwsgi.async_queue, timeout, events, 64);

		now = (uint64_t) uwsgi_now();
		// timeout ???
		if (uwsgi.async_nevents == 0) {
			async_expire_timeouts(now);
		}


		for (i = 0; i < uwsgi.async_nevents; i++) {
			// manage events
			interesting_fd = event_queue_interesting_fd(events, i);

			// signals are executed in the main stack... in the future we could have dedicated stacks for them
			if (uwsgi.signal_socket > -1 && (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket)) {
				uwsgi_receive_signal(interesting_fd, "worker", uwsgi.mywid);
				continue;
			}

			is_a_new_connection = 0;

			// new request coming in ?
			uwsgi_sock = uwsgi.sockets;
			while (uwsgi_sock) {

				if (interesting_fd == uwsgi_sock->fd) {

					is_a_new_connection = 1;

					uwsgi.wsgi_req = find_first_available_wsgi_req();
					if (uwsgi.wsgi_req == NULL) {
						uwsgi_async_queue_is_full((time_t)now);
						break;
					}

					// on error re-insert the request in the queue
					wsgi_req_setup(uwsgi.wsgi_req, uwsgi.wsgi_req->async_id, uwsgi_sock);
					if (wsgi_req_simple_accept(uwsgi.wsgi_req, interesting_fd)) {
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}

					if (wsgi_req_async_recv(uwsgi.wsgi_req)) {
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}

					// by default the core is in UWSGI_AGAIN mode
					uwsgi.wsgi_req->async_status = UWSGI_AGAIN;
					// some protocol (like zeromq) do not need additional parsing, just push it in the runqueue
					if (uwsgi.wsgi_req->do_not_add_to_async_queue) {
						runqueue_push(uwsgi.wsgi_req);
					}

					break;
				}

				uwsgi_sock = uwsgi_sock->next;
			}

			if (!is_a_new_connection) {
				// proto event
				uwsgi.wsgi_req = find_wsgi_req_proto_by_fd(interesting_fd);
				if (uwsgi.wsgi_req) {
					proto_parser_status = uwsgi.wsgi_req->socket->proto(uwsgi.wsgi_req);
					// reset timeout
					async_reset_request(uwsgi.wsgi_req);
					// parsing complete
					if (!proto_parser_status) {
						// remove fd from event poll and fd proto table 
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						event_queue_del_fd(uwsgi.async_queue, interesting_fd, event_queue_read());
						// put request in the runqueue
						runqueue_push(uwsgi.wsgi_req);
						continue;
					}
					else if (proto_parser_status < 0) {
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						close(interesting_fd);
						continue;
					}
					// re-add timer
					async_add_timeout(uwsgi.wsgi_req, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]);
					continue;
				}

				// app-registered event
				uwsgi.wsgi_req = find_wsgi_req_by_fd(interesting_fd);
				// unknown fd, remove it (for safety)
				if (uwsgi.wsgi_req == NULL) {
					close(interesting_fd);
					continue;
				}

				// remove all the fd monitors and timeout
				async_reset_request(uwsgi.wsgi_req);
				uwsgi.wsgi_req->async_ready_fd = 1;
				uwsgi.wsgi_req->async_last_ready_fd = interesting_fd;

				// put the request in the runqueue again
				runqueue_push(uwsgi.wsgi_req);
			}
		}


		// event queue managed, give cpu to runqueue
		current_request = uwsgi.async_runqueue;

		while(current_request) {

			// current_request could be nulled on error/end of request
			struct uwsgi_async_request *next_request = current_request->next;

			uwsgi.wsgi_req = current_request->wsgi_req;
			uwsgi.schedule_to_req();
			uwsgi.wsgi_req->switches++;

			// request ended ?
			if (uwsgi.wsgi_req->async_status <= UWSGI_OK ||
				uwsgi.wsgi_req->waiting_fds || uwsgi.wsgi_req->async_timeout) {
				// remove from the runqueue
				runqueue_remove(current_request);
			}
			current_request = next_request;
		}

	}

}
Beispiel #14
0
void *async_loop(void *arg1) {

	struct uwsgi_async_fd *tmp_uaf;
	int interesting_fd, i;
	struct uwsgi_rb_timer *min_timeout;
	int timeout;
	int is_a_new_connection;
	int proto_parser_status;

	time_t now, last_now = 0;

	static struct uwsgi_async_request *current_request = NULL, *next_async_request = NULL;

	void *events = event_queue_alloc(64);
	struct uwsgi_socket *uwsgi_sock;

	uwsgi.async_runqueue = NULL;
	uwsgi.async_runqueue_cnt = 0;

	if (uwsgi.signal_socket > -1) {
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.signal_socket);
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.my_signal_socket);
	}

	// set a default request manager
        if (!uwsgi.schedule_to_req) uwsgi.schedule_to_req = async_schedule_to_req;

	while (uwsgi.workers[uwsgi.mywid].manage_next_request) {

		if (uwsgi.async_runqueue_cnt) {
			timeout = 0;
		}
		else {
			min_timeout = uwsgi_min_rb_timer(uwsgi.rb_async_timeouts);
			if (uwsgi.async_runqueue_cnt) {
                        	timeout = 0;
			}
                	if (min_timeout) {
                        	timeout = min_timeout->key - time(NULL);
                        	if (timeout <= 0) {
                                	async_expire_timeouts();
                                	timeout = 0;
                        	}
			}
			else {
				timeout = -1;
			}
                }

		uwsgi.async_nevents = event_queue_wait_multi(uwsgi.async_queue, timeout, events, 64);

		// timeout ???
		if (uwsgi.async_nevents == 0) {
                	async_expire_timeouts();
		}


		for(i=0;i<uwsgi.async_nevents;i++) {
			// manage events
			interesting_fd = event_queue_interesting_fd(events, i);

			if (uwsgi.signal_socket > -1 && (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket)) {
				uwsgi_receive_signal(interesting_fd, "worker", uwsgi.mywid);
				continue;
			}

			is_a_new_connection = 0;

			// new request coming in ?

			uwsgi_sock = uwsgi.sockets;
			while(uwsgi_sock) {

				if (interesting_fd == uwsgi_sock->fd) {

					is_a_new_connection = 1;	

					uwsgi.wsgi_req = find_first_available_wsgi_req();
					if (uwsgi.wsgi_req == NULL) {
						now = time(NULL);
						if (now > last_now) {
							uwsgi_log("async queue is full !!!\n");
							last_now = now;
						}
						break;
					}

					wsgi_req_setup(uwsgi.wsgi_req, uwsgi.wsgi_req->async_id, uwsgi_sock );
					if (wsgi_req_simple_accept(uwsgi.wsgi_req, interesting_fd)) {
#ifdef UWSGI_EVENT_USE_PORT
                                		event_queue_add_fd_read(uwsgi.async_queue, interesting_fd);
#endif
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}
#ifdef UWSGI_EVENT_USE_PORT
                                	event_queue_add_fd_read(uwsgi.async_queue, interesting_fd);
#endif

// on linux we do not need to reset the socket to blocking state
#ifndef __linux__
	                                /* re-set blocking socket */
					int arg = uwsgi_sock->arg;
					arg &= (~O_NONBLOCK);
	                                if (fcntl(uwsgi.wsgi_req->poll.fd, F_SETFL, arg) < 0) {
	                                       	uwsgi_error("fcntl()");
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
	                                	break;
					}
#endif

					if (wsgi_req_async_recv(uwsgi.wsgi_req)) {
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}

					if (uwsgi.wsgi_req->do_not_add_to_async_queue) {
						runqueue_push(uwsgi.wsgi_req);
					}

					break;
				}

				uwsgi_sock = uwsgi_sock->next;
			}

			if (!is_a_new_connection) {
				// proto event
				uwsgi.wsgi_req = find_wsgi_req_proto_by_fd(interesting_fd);
				if (uwsgi.wsgi_req) {
					proto_parser_status = uwsgi.wsgi_req->socket->proto(uwsgi.wsgi_req);
					// reset timeout
					rb_erase(&uwsgi.wsgi_req->async_timeout->rbt, uwsgi.rb_async_timeouts);
					free(uwsgi.wsgi_req->async_timeout);
					uwsgi.wsgi_req->async_timeout = NULL;
					// parsing complete
					if (!proto_parser_status) {
						// remove fd from event poll and fd proto table	
#ifndef UWSGI_EVENT_USE_PORT
						event_queue_del_fd(uwsgi.async_queue, interesting_fd, event_queue_read());
#endif
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						// put request in the runqueue
						runqueue_push(uwsgi.wsgi_req);
						continue;
					}
					else if (proto_parser_status < 0) {
						if (proto_parser_status == -1)
							uwsgi_log("error parsing request\n");
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						close(interesting_fd);
						continue;
					}
					// re-add timer
					async_add_timeout(uwsgi.wsgi_req, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]);
					continue;
				}

				// app event
				uwsgi.wsgi_req = find_wsgi_req_by_fd(interesting_fd);
				// unknown fd, remove it (for safety)
				if (uwsgi.wsgi_req == NULL) {
					close(interesting_fd);
					continue;
				}

				// remove all the fd monitors and timeout
				while(uwsgi.wsgi_req->waiting_fds) {
#ifndef UWSGI_EVENT_USE_PORT
                                        event_queue_del_fd(uwsgi.async_queue, uwsgi.wsgi_req->waiting_fds->fd, uwsgi.wsgi_req->waiting_fds->event);
#endif
                                        tmp_uaf = uwsgi.wsgi_req->waiting_fds;
                                        uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL;
                                        uwsgi.wsgi_req->waiting_fds = tmp_uaf->next;
                                        free(tmp_uaf);
                                }
				uwsgi.wsgi_req->waiting_fds = NULL;
                                if (uwsgi.wsgi_req->async_timeout) {
                                        rb_erase(&uwsgi.wsgi_req->async_timeout->rbt, uwsgi.rb_async_timeouts);
                                        free(uwsgi.wsgi_req->async_timeout);
                                        uwsgi.wsgi_req->async_timeout = NULL;
                                }	

				uwsgi.wsgi_req->async_ready_fd = 1;
				uwsgi.wsgi_req->async_last_ready_fd = interesting_fd;

				// put the request in the runqueue again
				runqueue_push(uwsgi.wsgi_req);
			}
		}

		// event queue managed, give cpu to runqueue
		if (!current_request)
			current_request = uwsgi.async_runqueue;

		if (uwsgi.async_runqueue_cnt) {

			uwsgi.wsgi_req = current_request->wsgi_req;

			uwsgi.schedule_to_req();
			uwsgi.wsgi_req->switches++;

			next_async_request = current_request->next;
			// request ended ?
			if (uwsgi.wsgi_req->async_status <= UWSGI_OK) {
				// remove all the monitored fds and timeout
				while(uwsgi.wsgi_req->waiting_fds) {
#ifndef UWSGI_EVENT_USE_PORT
					event_queue_del_fd(uwsgi.async_queue, uwsgi.wsgi_req->waiting_fds->fd, uwsgi.wsgi_req->waiting_fds->event);
#endif
					tmp_uaf = uwsgi.wsgi_req->waiting_fds;
					uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL;
					uwsgi.wsgi_req->waiting_fds = tmp_uaf->next;
					free(tmp_uaf);
				}
				uwsgi.wsgi_req->waiting_fds = NULL;
				if (uwsgi.wsgi_req->async_timeout) {
					rb_erase(&uwsgi.wsgi_req->async_timeout->rbt, uwsgi.rb_async_timeouts);
                        		free(uwsgi.wsgi_req->async_timeout);
                        		uwsgi.wsgi_req->async_timeout = NULL;
				}

				// remove from the list
				runqueue_remove(current_request);

				uwsgi_close_request(uwsgi.wsgi_req);

				// push wsgi_request in the unused stack
				uwsgi.async_queue_unused_ptr++;
				uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;

			}
			else if (uwsgi.wsgi_req->waiting_fds || uwsgi.wsgi_req->async_timeout) {
				// remove this request from suspended list	
				runqueue_remove(current_request);
			}

			current_request = next_async_request;

		}


	}

	return NULL;

}
Beispiel #15
0
void async_loop() {

	if (uwsgi.async < 2) {
		uwsgi_log("the async loop engine requires async mode (--async <n>)\n");
		exit(1);
	}

	struct uwsgi_async_fd *tmp_uaf;
	int interesting_fd, i;
	struct uwsgi_rb_timer *min_timeout;
	int timeout;
	int is_a_new_connection;
	int proto_parser_status;

	uint64_t now;

	static struct uwsgi_async_request *current_request = NULL, *next_async_request = NULL;

	void *events = event_queue_alloc(64);
	struct uwsgi_socket *uwsgi_sock;

	uwsgi.async_runqueue = NULL;
	uwsgi.async_runqueue_cnt = 0;

	uwsgi.wait_write_hook = async_wait_fd_write;
        uwsgi.wait_read_hook = async_wait_fd_read;

	if (uwsgi.signal_socket > -1) {
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.signal_socket);
		event_queue_add_fd_read(uwsgi.async_queue, uwsgi.my_signal_socket);
	}

	// set a default request manager
	if (!uwsgi.schedule_to_req)
		uwsgi.schedule_to_req = async_schedule_to_req;

	if (!uwsgi.schedule_to_main) {
		uwsgi_log("*** WARNING *** async mode without coroutine/greenthread engine loaded !!!\n");
	}

	while (uwsgi.workers[uwsgi.mywid].manage_next_request) {

		now = (uint64_t) uwsgi_now();
		if (uwsgi.async_runqueue_cnt) {
			timeout = 0;
		}
		else {
			min_timeout = uwsgi_min_rb_timer(uwsgi.rb_async_timeouts, NULL);
			if (min_timeout) {
				timeout = min_timeout->value - now;
				if (timeout <= 0) {
					async_expire_timeouts(now);
					timeout = 0;
				}
			}
			else {
				timeout = -1;
			}
		}

		uwsgi.async_nevents = event_queue_wait_multi(uwsgi.async_queue, timeout, events, 64);

		now = (uint64_t) uwsgi_now();
		// timeout ???
		if (uwsgi.async_nevents == 0) {
			async_expire_timeouts(now);
		}


		for (i = 0; i < uwsgi.async_nevents; i++) {
			// manage events
			interesting_fd = event_queue_interesting_fd(events, i);

			if (uwsgi.signal_socket > -1 && (interesting_fd == uwsgi.signal_socket || interesting_fd == uwsgi.my_signal_socket)) {
				uwsgi_receive_signal(interesting_fd, "worker", uwsgi.mywid);
				continue;
			}


			is_a_new_connection = 0;

			// new request coming in ?

			uwsgi_sock = uwsgi.sockets;
			while (uwsgi_sock) {

				if (interesting_fd == uwsgi_sock->fd) {

					is_a_new_connection = 1;

					uwsgi.wsgi_req = find_first_available_wsgi_req();
					if (uwsgi.wsgi_req == NULL) {
						uwsgi_async_queue_is_full((time_t)now);
						break;
					}

					wsgi_req_setup(uwsgi.wsgi_req, uwsgi.wsgi_req->async_id, uwsgi_sock);
					if (wsgi_req_simple_accept(uwsgi.wsgi_req, interesting_fd)) {
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}

					if (wsgi_req_async_recv(uwsgi.wsgi_req)) {
						uwsgi.async_queue_unused_ptr++;
						uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;
						break;
					}

					if (uwsgi.wsgi_req->do_not_add_to_async_queue) {
						runqueue_push(uwsgi.wsgi_req);
					}

					break;
				}

				uwsgi_sock = uwsgi_sock->next;
			}

			if (!is_a_new_connection) {
				// proto event
				uwsgi.wsgi_req = find_wsgi_req_proto_by_fd(interesting_fd);
				if (uwsgi.wsgi_req) {
					proto_parser_status = uwsgi.wsgi_req->socket->proto(uwsgi.wsgi_req);
					// reset timeout
					uwsgi_del_rb_timer(uwsgi.rb_async_timeouts, uwsgi.wsgi_req->async_timeout);
					free(uwsgi.wsgi_req->async_timeout);
					uwsgi.wsgi_req->async_timeout = NULL;
					// parsing complete
					if (!proto_parser_status) {
						// remove fd from event poll and fd proto table 
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						event_queue_del_fd(uwsgi.async_queue, interesting_fd, event_queue_read());
						// put request in the runqueue
						runqueue_push(uwsgi.wsgi_req);
						continue;
					}
					else if (proto_parser_status < 0) {
						if (proto_parser_status == -1)
							uwsgi_log("error parsing request\n");
						uwsgi.async_proto_fd_table[interesting_fd] = NULL;
						close(interesting_fd);
						continue;
					}
					// re-add timer
					async_add_timeout(uwsgi.wsgi_req, uwsgi.shared->options[UWSGI_OPTION_SOCKET_TIMEOUT]);
					continue;
				}

				// app event
				uwsgi.wsgi_req = find_wsgi_req_by_fd(interesting_fd);
				// unknown fd, remove it (for safety)
				if (uwsgi.wsgi_req == NULL) {
					close(interesting_fd);
					continue;
				}

				// remove all the fd monitors and timeout
				while (uwsgi.wsgi_req->waiting_fds) {
					tmp_uaf = uwsgi.wsgi_req->waiting_fds;
					uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL;
					event_queue_del_fd(uwsgi.async_queue, tmp_uaf->fd, tmp_uaf->event);
					uwsgi.wsgi_req->waiting_fds = tmp_uaf->next;
					free(tmp_uaf);
				}
				uwsgi.wsgi_req->waiting_fds = NULL;
				if (uwsgi.wsgi_req->async_timeout) {
					uwsgi_del_rb_timer(uwsgi.rb_async_timeouts, uwsgi.wsgi_req->async_timeout);
					free(uwsgi.wsgi_req->async_timeout);
					uwsgi.wsgi_req->async_timeout = NULL;
				}

				uwsgi.wsgi_req->async_ready_fd = 1;
				uwsgi.wsgi_req->async_last_ready_fd = interesting_fd;

				// put the request in the runqueue again
				runqueue_push(uwsgi.wsgi_req);
				// avoid managing other enqueued events...
				break;
			}
		}

		// event queue managed, give cpu to runqueue
		if (!current_request)
			current_request = uwsgi.async_runqueue;

		if (uwsgi.async_runqueue_cnt) {

			uwsgi.wsgi_req = current_request->wsgi_req;

			uwsgi.schedule_to_req();
			uwsgi.wsgi_req->switches++;

			next_async_request = current_request->next;
			// request ended ?
			if (uwsgi.wsgi_req->async_status <= UWSGI_OK) {
				// remove all the monitored fds and timeout
				while (uwsgi.wsgi_req->waiting_fds) {
					tmp_uaf = uwsgi.wsgi_req->waiting_fds;
					uwsgi.async_waiting_fd_table[tmp_uaf->fd] = NULL;
					event_queue_del_fd(uwsgi.async_queue, tmp_uaf->fd, tmp_uaf->event);
					uwsgi.wsgi_req->waiting_fds = tmp_uaf->next;
					free(tmp_uaf);
				}
				uwsgi.wsgi_req->waiting_fds = NULL;
				if (uwsgi.wsgi_req->async_timeout) {
					uwsgi_del_rb_timer(uwsgi.rb_async_timeouts, uwsgi.wsgi_req->async_timeout);
					free(uwsgi.wsgi_req->async_timeout);
					uwsgi.wsgi_req->async_timeout = NULL;
				}

				// remove from the list
				runqueue_remove(current_request);

				uwsgi_close_request(uwsgi.wsgi_req);

				// push wsgi_request in the unused stack
				uwsgi.async_queue_unused_ptr++;
				uwsgi.async_queue_unused[uwsgi.async_queue_unused_ptr] = uwsgi.wsgi_req;

			}
			else if (uwsgi.wsgi_req->waiting_fds || uwsgi.wsgi_req->async_timeout) {
				// remove this request from suspended list      
				runqueue_remove(current_request);
			}

			current_request = next_async_request;

		}


	}

}