Exemplo n.º 1
0
void global_cached_io::process_user_reqs()
{
	std::vector<thread_safe_page *> dirty_pages;

	// If we haven't finished processing a request, we need to continue
	// processing it.
	if (!processing_req.is_empty())
		process_user_req(dirty_pages, NULL);

	// Now we can process the remaining requests.
	// If processing_req isn't empty, it's likely because there are too many
	// referenced pages in the cache and we can't evict a page from a page set.
	// So we can stop processing the remaining requests for now.
	while (processing_req.is_empty() && !user_requests.is_empty()) {
		io_request req = user_requests.pop_front();
		// We don't allow the user's requests to be extended requests.
		assert(!req.is_extended_req());
		// TODO right now it only supports single-buf requests.
		assert(req.get_num_bufs() == 1);

		if (req.is_flush()) {
			num_completed_areqs.inc(1);
			continue;
		}
		processing_req.init(req);
		process_user_req(dirty_pages, NULL);
	}

	get_global_cache()->mark_dirty_pages(dirty_pages.data(),
				dirty_pages.size(), underlying);

	flush_requests();
}
Exemplo n.º 2
0
static int ixp_recover(struct ixpfront_info *info)
{
	int i;
	struct ixp_request *req;
	struct ixp_shadow *copy;
	int j;

	/* Stage 1: Make a safe copy of the shadow state. */
	copy = kmalloc(sizeof(info->shadow),
		       GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
	if (!copy)
		return -ENOMEM;
	memcpy(copy, info->shadow, sizeof(info->shadow));

	/* Stage 2: Set up free list. */
	memset(&info->shadow, 0, sizeof(info->shadow));
	for (i = 0; i < IXP_RING_SIZE; i++)
		info->shadow[i].req.id = i+1;
	info->shadow_free = info->ring.req_prod_pvt;
	info->shadow[IXP_RING_SIZE-1].req.id = 0x0fffffff;

	/* Stage 3: Find pending requests and requeue them. */
	for (i = 0; i < IXP_RING_SIZE; i++) {
		/* Not in use? */
		if (copy[i].req_page == NULL)
			continue;

		/* Grab a request slot and copy shadow state into it. */
		req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
		*req = copy[i].req;

		/* We get a new request id, and must reset the shadow state. */
		req->id = get_id_from_freelist(info);
		memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));

		/* Rewrite any grant references invalidated by susp/resume. */
		for (j = 0; j < req->nr_segments; j++)
			gnttab_grant_foreign_access_ref(
				req->seg[j].gref,
				info->xbdev->otherend_id,
				pfn_to_mfn(info->shadow[req->id].frame[j]),
				0);
		info->shadow[req->id].req = *req;

		info->ring.req_prod_pvt++;
	}

	kfree(copy);

	xenbus_switch_state(info->xbdev, XenbusStateConnected);

	/* Now safe for us to use the shared ring */
	info->connected = IXP_STATE_CONNECTED;

	/* Send off requeued requests */
	flush_requests(info);

	return 0;
}
Exemplo n.º 3
0
void global_cached_io::access(io_request *requests, int num, io_status *status)
{
	if (num == 0)
		return;

	ASSERT_EQ(get_thread(), thread::get_curr_thread());
	num_issued_areqs.inc(num);

	bool syncd = false;
	std::vector<thread_safe_page *> dirty_pages;

	if (!processing_req.is_empty()) {
		process_user_req(dirty_pages, NULL);
		if (!processing_req.is_empty()) {
			user_requests.add(requests, num);
			goto end;
		}
	}

	for (int i = 0; i < num; i++) {
		// We don't allow the user's requests to be extended requests.
		assert(!requests[i].is_extended_req());
		// TODO right now it only supports single-buf requests.
		assert(requests[i].get_num_bufs() == 1);

		if (requests[i].is_flush()) {
			syncd = true;
			num_completed_areqs.inc(1);
			continue;
		}
		else if (requests[i].is_sync()) {
			syncd = true;
		}
		assert(processing_req.is_empty());
		processing_req.init(requests[i]);
		io_status *stat_p = NULL;
		if (status)
			stat_p = &status[i];
		process_user_req(dirty_pages, stat_p);
		// We can't process all requests. Let's queue the remaining requests.
		if (!processing_req.is_empty() && i < num - 1) {
			user_requests.add(&requests[i + 1], num - i - 1);
			break;
		}
	}

end:
	get_global_cache()->mark_dirty_pages(dirty_pages.data(),
				dirty_pages.size(), underlying);

	if (syncd)
		flush_requests();
}
Exemplo n.º 4
0
/**
 * We wait for at least the specified number of requests to complete.
 */
int global_cached_io::wait4complete(int num_to_complete)
{
	flush_requests();
	int pending = num_pending_ios();
	num_to_complete = min(pending, num_to_complete);

	process_all_requests();
	while (pending - num_pending_ios() < num_to_complete) {
		// We only wait when there are pending requests in the underlying IO.
		if (num_to_underlying.get() - num_from_underlying.get() > 0) {
			get_thread()->wait();
		}
		process_all_requests();
	}
	return pending - num_pending_ios();
}
Exemplo n.º 5
0
/*
 * do_blkif_request
 *  read a block; request is in a request queue
 */
static void do_blkif_request(struct request_queue *rq)
{
	struct blkfront_info *info = NULL;
	struct request *req;
	int queued;

	pr_debug("Entered do_blkif_request\n");

	queued = 0;

	while ((req = blk_peek_request(rq)) != NULL) {
		info = req->rq_disk->private_data;

		if (RING_FULL(&info->ring))
			goto wait;

		blk_start_request(req);

		if (!blk_fs_request(req)) {
			__blk_end_request_all(req, -EIO);
			continue;
		}

		pr_debug("do_blk_req %p: cmd %p, sec %lx, "
			 "(%u/%u) buffer:%p [%s]\n",
			 req, req->cmd, (unsigned long)blk_rq_pos(req),
			 blk_rq_cur_sectors(req), blk_rq_sectors(req),
			 req->buffer, rq_data_dir(req) ? "write" : "read");

		if (blkif_queue_request(req)) {
			blk_requeue_request(rq, req);
wait:
			/* Avoid pointless unplugs. */
			blk_stop_queue(rq);
			break;
		}

		queued++;
	}

	if (queued != 0)
		flush_requests(info);
}
Exemplo n.º 6
0
void global_cached_io::process_all_requests()
{
	// We first process the completed requests from the disk.
	// It will add completed user requests and pending requests to queues
	// for further processing.
	while (!completed_disk_queue.is_empty()) {
		int num = completed_disk_queue.get_num_entries();
		stack_array<io_request> reqs(num);
		int ret = completed_disk_queue.fetch(reqs.data(), num);
		process_disk_completed_requests(reqs.data(), ret);
	}

	// Process the requests that are pending on the pages.
	// It may add completed user requests to queues for further processing. 
	if (!pending_requests.is_empty())
		handle_pending_requests();

	// Process buffered user requests.
	// It may add completed user requests to queues for further processing. 
	process_user_reqs();

	std::vector<io_request> requests;
	// Process the completed requests served in the cache directly.
	process_cached_reqs(requests);

	// Process completed user requests.
	process_completed_requests(requests);

	// Process requests issued in the user compute.
	// We try to gather all requests so we can merge them. However, we only
	// have the local collection of the requests. We still need to rely on
	// the OS's elevator algorithm to merge the requests from different
	// global_cached_io.
	access(requests.data(), requests.size(), NULL);

	// Processing the pending requests on the pages might issue
	// more I/O requests.
	flush_requests();
}
Exemplo n.º 7
0
/*
 * blkif_queue_request
 *
 * request block io
 *
 * id: for guest use only.
 * operation: BLKIF_OP_{READ,WRITE,PROBE}
 * buffer: buffer to read/write into. this should be a
 *   virtual address in the guest os.
 */
int ixp_queue_request(struct app_request *app_req, void *metadata)
{
	struct ixpfront_info *info = (struct ixpfront_info *) metadata;
	unsigned long buffer_mfn;
	struct ixp_request *ring_req;
  	char *req_page = 0, *curr_pos;
	unsigned long id;
	int ref, err;
	grant_ref_t gref_head;

	if (unlikely(info->connected != IXP_STATE_CONNECTED))
		return 1;

  	if (RING_FULL(&info->ring)) {
		printk(KERN_ERR "%s:Ring full - returning backpressure\n", __FUNCTION__);
		return 1;
	}

	if (gnttab_alloc_grant_references(
		IXPIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
		/*gnttab_request_free_callback(
			&info->callback,
			ixp_restart_queue_callback,
			info,
			IXP_MAX_SEGMENTS_PER_REQUEST);*/
		return 1; 
	}

	/* Fill out a communications ring structure. */
	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
	id = get_id_from_freelist(info);

	ring_req->id = id;
	ring_req->handle = info->handle;

	ring_req->operation = IXP_OP_3DES_ENCRYPT;

	ring_req->nr_segments = 1;
	BUG_ON(ring_req->nr_segments > IXPIF_MAX_SEGMENTS_PER_REQUEST);

	req_page = (char *)__get_free_page(GFP_NOIO | __GFP_HIGH);

	if(req_page == 0) {
	  printk(KERN_ERR "ixp_queue_request:Error allocating memory");
	  return 1;
	}

	((struct des_request *)req_page)->key_size = app_req->key_size;
	((struct des_request *)req_page)->iv_size = app_req->iv_size;
	((struct des_request *)req_page)->msg_size = app_req->msg_size;

	curr_pos = req_page + sizeof(struct des_request);
	memcpy(curr_pos, app_req->key, app_req->key_size);
	curr_pos += app_req->key_size;

	memcpy(curr_pos, app_req->iv, app_req->iv_size);
	curr_pos += app_req->iv_size;

	memcpy(curr_pos, app_req->msg, app_req->msg_size);
	curr_pos += app_req->msg_size;

	buffer_mfn = virt_to_mfn(req_page);

 	/* install a grant reference. */
	ref = gnttab_claim_grant_reference(&gref_head);
  	BUG_ON(ref == -ENOSPC);

	gnttab_grant_foreign_access_ref(
	      ref,
	      info->xbdev->otherend_id,
	      buffer_mfn,
	      0);
	
	info->shadow[id].r_params.presp = app_req->presp;
	info->shadow[id].r_params.callbk_tag = app_req->callbk_tag;
	info->shadow[id].frame[0] = mfn_to_pfn(buffer_mfn);
	info->shadow[id].req_page = req_page;

	ring_req->seg[0] =
	      (struct ixp_request_segment) {
		.gref       = ref
	      };

	info->ring.req_prod_pvt++;

	/* Keep a private copy so we can reissue requests when recovering. */
	info->shadow[id].req = *ring_req;

  	flush_requests(info);

	//gnttab_free_grant_references(gref_head);

	return 0;
}