void global_cached_io::process_all_requests()
{
	// We first process the completed requests from the disk.
	// It will add completed user requests and pending requests to queues
	// for further processing.
	while (!completed_disk_queue.is_empty()) {
		int num = completed_disk_queue.get_num_entries();
		stack_array<io_request> reqs(num);
		int ret = completed_disk_queue.fetch(reqs.data(), num);
		process_disk_completed_requests(reqs.data(), ret);
	}

	// Process the requests that are pending on the pages.
	// It may add completed user requests to queues for further processing. 
	if (!pending_requests.is_empty())
		handle_pending_requests();

	// Process buffered user requests.
	// It may add completed user requests to queues for further processing. 
	process_user_reqs();

	std::vector<io_request> requests;
	// Process the completed requests served in the cache directly.
	process_cached_reqs(requests);

	// Process completed user requests.
	process_completed_requests(requests);

	// Process requests issued in the user compute.
	// We try to gather all requests so we can merge them. However, we only
	// have the local collection of the requests. We still need to rely on
	// the OS's elevator algorithm to merge the requests from different
	// global_cached_io.
	access(requests.data(), requests.size(), NULL);

	// Processing the pending requests on the pages might issue
	// more I/O requests.
	flush_requests();
}
Beispiel #2
0
int serve(struct server *srv)
{
	const static int CONTROL_PFD = 0;
	struct pollfd pfd[1];
	memset(&pfd, 0, sizeof(pfd));

	pfd[CONTROL_PFD].fd = srv->ctl_fd;
	pfd[CONTROL_PFD].events = POLLIN;

	while (! srv->bquit) {
		int n = poll(pfd, sizeof(pfd)/sizeof(struct pollfd), -1);
		if (n < 0) {
			fprintf(stderr, "poll error\n");
			return -1;
		}
		else if (n == 0) {
		}
		else {
			process_conns();
			process_client_reqs();
			process_user_reqs(&pfd[CONTROL_PFD], &pfd[CONTROL_PFD].fd);
		}
	}
}