Esempio n. 1
0
// TODO: right now there is one channel (remote), in the future, the caller
// may specify local which will cause it to give up the core to do the work.
// creation of additional remote channel also allows the caller to prioritize
// work, because the default policy for the kernel is to roundrobin between them.
int async_syscall(arsc_channel_t* chan, syscall_req_t* req, syscall_desc_t** desc_ptr2)
{
	// Note that this assumes one global frontring (TODO)
	// abort if there is no room for our request.  ring size is currently 64.
	// we could spin til it's free, but that could deadlock if this same thread
	// is supposed to consume the requests it is waiting on later.
	syscall_desc_t* desc = malloc(sizeof (syscall_desc_t));
	desc->channel = chan;
	syscall_front_ring_t *fr = &(desc->channel->sysfr);
	//TODO: can do it locklessly using CAS, but could change with local async calls
	struct mcs_lock_qnode local_qn = {0};
	mcs_lock_lock(&(chan->aclock), &local_qn);
	if (RING_FULL(fr)) {
		errno = EBUSY;
		return -1;
	}
	// req_prod_pvt comes in as the previously produced item.  need to
	// increment to the next available spot, which is the one we'll work on.
	// at some point, we need to listen for the responses.
	desc->idx = ++(fr->req_prod_pvt);
	syscall_req_t* r = RING_GET_REQUEST(fr, desc->idx);
	// CAS on the req->status perhaps
	req->status = REQ_alloc;

	memcpy(r, req, sizeof(syscall_req_t));
	r->status = REQ_ready;
	// push our updates to syscallfrontring.req_prod_pvt
	// note: it is ok to push without protection since it is atomic and kernel
	// won't process any requests until they are marked REQ_ready (also atomic)
	RING_PUSH_REQUESTS(fr);
	//cprintf("DEBUG: sring->req_prod: %d, sring->rsp_prod: %d\n", 
	mcs_lock_unlock(&desc->channel->aclock, &local_qn);
	*desc_ptr2 = desc;
	return 0;
}
Esempio n. 2
0
struct blktap_request*
blktap_ring_make_request(struct blktap *tap)
{
	struct blktap_ring *ring = &tap->ring;
	struct blktap_request *request;
	int usr_idx;

	if (RING_FULL(&ring->ring))
		return ERR_PTR(-ENOSPC);

	request = blktap_request_alloc(tap);
	if (!request)
		return ERR_PTR(-ENOMEM);

	for (usr_idx = 0; usr_idx < BLKTAP_RING_SIZE; usr_idx++)
		if (!ring->pending[usr_idx])
			break;

	BUG_ON(usr_idx >= BLKTAP_RING_SIZE);

	request->tap     = tap;
	request->usr_idx = usr_idx;

	ring->pending[usr_idx] = request;
	ring->n_pending++;

	return request;
}
Esempio n. 3
0
static void kick_pending_request_queues(struct blkfront_info *info)
{
	if (!RING_FULL(&info->ring)) {
		/* Re-enable calldowns. */
		blk_start_queue(info->rq);
		/* Kick things off immediately. */
		do_blkif_request(info->rq);
	}
}
Esempio n. 4
0
static void blkfront_wait_slot(struct blkfront_dev *dev)
{
    /* Wait for a slot */
    if (RING_FULL(&dev->ring)) {
	unsigned long flags;
	DEFINE_WAIT(w);
	local_irq_save(flags);
	while (1) {
	    blkfront_aio_poll(dev);
	    if (!RING_FULL(&dev->ring))
		break;
	    /* Really no slot, go to sleep. */
	    add_waiter(w, blkfront_queue);
	    local_irq_restore(flags);
	    schedule();
	    local_irq_save(flags);
	}
	remove_waiter(w, blkfront_queue);
	local_irq_restore(flags);
    }
}
Esempio n. 5
0
static int write_block(FILE *p, blkif_sector_t sector, size_t amt)
{
  static uint64_t next_reqid = 1;
  blkif_response_t *rsp;
  blkif_request_t *req;
  int notify, work_to_do;
  uint64_t reqid;
  RING_IDX i;

  /* wait until we can write something */
  while(RING_FULL(&p->ring)) runtime_block(1);

  /* write out the request */
  i = p->ring.req_prod_pvt++;
  req = RING_GET_REQUEST(&p->ring, i);
  memset(req, 0, sizeof(blkif_request_t));
  req->operation         = BLKIF_OP_WRITE;
  req->nr_segments       = 1;
  req->handle            = p->disk_handle;
  req->id                = reqid = next_reqid++;
  req->sector_number     = sector;
  req->seg[0].gref       = p->block_grant;
  req->seg[0].first_sect = 0;
  req->seg[0].last_sect  = (amt - 1) / 512;
  wmb();
  RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&p->ring, notify);
  if(notify) channel_send(p->chan);

  /* wait for it to be satisfied */
  do {
    while(!RING_HAS_UNCONSUMED_RESPONSES(&p->ring))
      runtime_block(1);
    i = p->ring.rsp_cons++;
    rsp = RING_GET_RESPONSE(&p->ring, i);
  } while(rsp->id != reqid);

  /* was it successful? */
  if(rsp->status != BLKIF_RSP_OKAY) {
    printf("PROFILING: Block write failed!\n");
    return 0;
  }

  /* we do writes one at a time, synchronously, so work_to_do should always
     be false */
  RING_FINAL_CHECK_FOR_RESPONSES(&p->ring, work_to_do);
  assert(!work_to_do);

  return 1;
}
Esempio n. 6
0
static int scsifront_do_request(struct vscsifrnt_info *info,
				struct vscsifrnt_shadow *shadow)
{
	struct vscsiif_front_ring *ring = &(info->ring);
	struct vscsiif_request *ring_req;
	struct scsi_cmnd *sc = shadow->sc;
	uint32_t id;
	int i, notify;

	if (RING_FULL(&info->ring))
		return -EBUSY;

	id = scsifront_get_rqid(info);	/* use id in response */
	if (id >= VSCSIIF_MAX_REQS)
		return -EBUSY;

	info->shadow[id] = shadow;
	shadow->rqid = id;

	ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
	ring->req_prod_pvt++;

	ring_req->rqid        = id;
	ring_req->act         = shadow->act;
	ring_req->ref_rqid    = shadow->ref_rqid;
	ring_req->nr_segments = shadow->nr_segments;

	ring_req->id      = sc->device->id;
	ring_req->lun     = sc->device->lun;
	ring_req->channel = sc->device->channel;
	ring_req->cmd_len = sc->cmd_len;

	BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);

	memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);

	ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
	ring_req->timeout_per_command = sc->request->timeout / HZ;

	for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
		ring_req->seg[i] = shadow->seg[i];

	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
	if (notify)
		notify_remote_via_irq(info->irq);

	return 0;
}
Esempio n. 7
0
/*
 * do_blkif_request
 *  read a block; request is in a request queue
 */
static void do_blkif_request(struct request_queue *rq)
{
	struct blkfront_info *info = NULL;
	struct request *req;
	int queued;

	pr_debug("Entered do_blkif_request\n");

	queued = 0;

	while ((req = blk_peek_request(rq)) != NULL) {
		info = req->rq_disk->private_data;

		if (RING_FULL(&info->ring))
			goto wait;

		blk_start_request(req);

		if (!blk_fs_request(req)) {
			__blk_end_request_all(req, -EIO);
			continue;
		}

		pr_debug("do_blk_req %p: cmd %p, sec %lx, "
			 "(%u/%u) buffer:%p [%s]\n",
			 req, req->cmd, (unsigned long)blk_rq_pos(req),
			 blk_rq_cur_sectors(req), blk_rq_sectors(req),
			 req->buffer, rq_data_dir(req) ? "write" : "read");

		if (blkif_queue_request(req)) {
			blk_requeue_request(rq, req);
wait:
			/* Avoid pointless unplugs. */
			blk_stop_queue(rq);
			break;
		}

		queued++;
	}

	if (queued != 0)
		flush_requests(info);
}
Esempio n. 8
0
/* walk dirty map and enqueue read requests.
 * returns:  0 when entire bitmap has been enqueued,
 *           1 when the ring is full
 *          -1 on error
 */
static int writelog_enqueue_requests(struct writelog* wl)
{
  struct disk_range* range = wl->shm;
  log_request_t* req;

  for (range = wl->cur; (void*)range < bmend(wl->shm); range++) {
    if (!range->count)
      break;

    if (RING_FULL(&wl->fring))
	break;

    /* insert range into request stream */
    /* 1. get next request slot from ring */
    /* 2. ensure enough shm space is available */
    
    BDPRINTF("enqueueing dirty extent: %"PRIu64":%u (ring space: %d/%d)",
	     range->sector, range->count, RING_FREE_REQUESTS(&wl->fring),
	     RING_SIZE(&wl->fring));

    req = RING_GET_REQUEST(&wl->fring, wl->fring.req_prod_pvt);

    req->sector = range->sector;
    req->count = range->count;
    /* ... */
    req->offset = 0;

    wl->fring.req_prod_pvt++;
    wl->inflight++;
  }

  wl->cur = range;

  if (range->count)
    return 1;

  return 0;
}
Esempio n. 9
0
/*
 * blkif_queue_request
 *
 * request block io
 *
 * id: for guest use only.
 * operation: BLKIF_OP_{READ,WRITE,PROBE}
 * buffer: buffer to read/write into. this should be a
 *   virtual address in the guest os.
 */
int ixp_queue_request(struct app_request *app_req, void *metadata)
{
	struct ixpfront_info *info = (struct ixpfront_info *) metadata;
	unsigned long buffer_mfn;
	struct ixp_request *ring_req;
  	char *req_page = 0, *curr_pos;
	unsigned long id;
	int ref, err;
	grant_ref_t gref_head;

	if (unlikely(info->connected != IXP_STATE_CONNECTED))
		return 1;

  	if (RING_FULL(&info->ring)) {
		printk(KERN_ERR "%s:Ring full - returning backpressure\n", __FUNCTION__);
		return 1;
	}

	if (gnttab_alloc_grant_references(
		IXPIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
		/*gnttab_request_free_callback(
			&info->callback,
			ixp_restart_queue_callback,
			info,
			IXP_MAX_SEGMENTS_PER_REQUEST);*/
		return 1; 
	}

	/* Fill out a communications ring structure. */
	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
	id = get_id_from_freelist(info);

	ring_req->id = id;
	ring_req->handle = info->handle;

	ring_req->operation = IXP_OP_3DES_ENCRYPT;

	ring_req->nr_segments = 1;
	BUG_ON(ring_req->nr_segments > IXPIF_MAX_SEGMENTS_PER_REQUEST);

	req_page = (char *)__get_free_page(GFP_NOIO | __GFP_HIGH);

	if(req_page == 0) {
	  printk(KERN_ERR "ixp_queue_request:Error allocating memory");
	  return 1;
	}

	((struct des_request *)req_page)->key_size = app_req->key_size;
	((struct des_request *)req_page)->iv_size = app_req->iv_size;
	((struct des_request *)req_page)->msg_size = app_req->msg_size;

	curr_pos = req_page + sizeof(struct des_request);
	memcpy(curr_pos, app_req->key, app_req->key_size);
	curr_pos += app_req->key_size;

	memcpy(curr_pos, app_req->iv, app_req->iv_size);
	curr_pos += app_req->iv_size;

	memcpy(curr_pos, app_req->msg, app_req->msg_size);
	curr_pos += app_req->msg_size;

	buffer_mfn = virt_to_mfn(req_page);

 	/* install a grant reference. */
	ref = gnttab_claim_grant_reference(&gref_head);
  	BUG_ON(ref == -ENOSPC);

	gnttab_grant_foreign_access_ref(
	      ref,
	      info->xbdev->otherend_id,
	      buffer_mfn,
	      0);
	
	info->shadow[id].r_params.presp = app_req->presp;
	info->shadow[id].r_params.callbk_tag = app_req->callbk_tag;
	info->shadow[id].frame[0] = mfn_to_pfn(buffer_mfn);
	info->shadow[id].req_page = req_page;

	ring_req->seg[0] =
	      (struct ixp_request_segment) {
		.gref       = ref
	      };

	info->ring.req_prod_pvt++;

	/* Keep a private copy so we can reissue requests when recovering. */
	info->shadow[id].req = *ring_req;

  	flush_requests(info);

	//gnttab_free_grant_references(gref_head);

	return 0;
}