예제 #1
0
static int map_data_for_request(struct vscsifrnt_info *info,
				struct scsi_cmnd *sc,
				struct vscsiif_request *ring_req,
				struct vscsifrnt_shadow *shadow)
{
	grant_ref_t gref_head;
	struct page *page;
	int err, ref, ref_cnt = 0;
	int grant_ro = (sc->sc_data_direction == DMA_TO_DEVICE);
	unsigned int i, off, len, bytes;
	unsigned int data_len = scsi_bufflen(sc);
	unsigned int data_grants = 0, seg_grants = 0;
	struct scatterlist *sg;
	unsigned long mfn;
	struct scsiif_request_segment *seg;

	ring_req->nr_segments = 0;
	if (sc->sc_data_direction == DMA_NONE || !data_len)
		return 0;

	scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i)
		data_grants += PFN_UP(sg->offset + sg->length);

	if (data_grants > VSCSIIF_SG_TABLESIZE) {
		if (data_grants > info->host->sg_tablesize) {
			shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
			     "Unable to map request_buffer for command!\n");
			return -E2BIG;
		}
		seg_grants = vscsiif_grants_sg(data_grants);
		shadow->sg = kcalloc(data_grants,
			sizeof(struct scsiif_request_segment), GFP_ATOMIC);
		if (!shadow->sg)
			return -ENOMEM;
	}
	seg = shadow->sg ? : ring_req->seg;

	err = gnttab_alloc_grant_references(seg_grants + data_grants,
					    &gref_head);
	if (err) {
		kfree(shadow->sg);
		shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
			     "gnttab_alloc_grant_references() error\n");
		return -ENOMEM;
	}

	if (seg_grants) {
		page = virt_to_page(seg);
		off = (unsigned long)seg & ~PAGE_MASK;
		len = sizeof(struct scsiif_request_segment) * data_grants;
		while (len > 0) {
			bytes = min_t(unsigned int, len, PAGE_SIZE - off);

			ref = gnttab_claim_grant_reference(&gref_head);
			BUG_ON(ref == -ENOSPC);

			mfn = pfn_to_mfn(page_to_pfn(page));
			gnttab_grant_foreign_access_ref(ref,
				info->dev->otherend_id, mfn, 1);
			shadow->gref[ref_cnt] = ref;
			ring_req->seg[ref_cnt].gref   = ref;
			ring_req->seg[ref_cnt].offset = (uint16_t)off;
			ring_req->seg[ref_cnt].length = (uint16_t)bytes;

			page++;
			len -= bytes;
			off = 0;
			ref_cnt++;
		}
		BUG_ON(seg_grants < ref_cnt);
		seg_grants = ref_cnt;
	}

	scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) {
		page = sg_page(sg);
		off = sg->offset;
		len = sg->length;

		while (len > 0 && data_len > 0) {
			/*
			 * sg sends a scatterlist that is larger than
			 * the data_len it wants transferred for certain
			 * IO sizes.
			 */
			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
			bytes = min(bytes, data_len);

			ref = gnttab_claim_grant_reference(&gref_head);
			BUG_ON(ref == -ENOSPC);

			mfn = pfn_to_mfn(page_to_pfn(page));
			gnttab_grant_foreign_access_ref(ref,
				info->dev->otherend_id, mfn, grant_ro);

			shadow->gref[ref_cnt] = ref;
			seg->gref   = ref;
			seg->offset = (uint16_t)off;
			seg->length = (uint16_t)bytes;

			page++;
			seg++;
			len -= bytes;
			data_len -= bytes;
			off = 0;
			ref_cnt++;
		}
	}
예제 #2
0
/*
 * blkif_queue_request
 *
 * request block io
 *
 * id: for guest use only.
 * operation: BLKIF_OP_{READ,WRITE,PROBE}
 * buffer: buffer to read/write into. this should be a
 *   virtual address in the guest os.
 */
int ixp_queue_request(struct app_request *app_req, void *metadata)
{
	struct ixpfront_info *info = (struct ixpfront_info *) metadata;
	unsigned long buffer_mfn;
	struct ixp_request *ring_req;
  	char *req_page = 0, *curr_pos;
	unsigned long id;
	int ref, err;
	grant_ref_t gref_head;

	if (unlikely(info->connected != IXP_STATE_CONNECTED))
		return 1;

  	if (RING_FULL(&info->ring)) {
		printk(KERN_ERR "%s:Ring full - returning backpressure\n", __FUNCTION__);
		return 1;
	}

	if (gnttab_alloc_grant_references(
		IXPIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
		/*gnttab_request_free_callback(
			&info->callback,
			ixp_restart_queue_callback,
			info,
			IXP_MAX_SEGMENTS_PER_REQUEST);*/
		return 1; 
	}

	/* Fill out a communications ring structure. */
	ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
	id = get_id_from_freelist(info);

	ring_req->id = id;
	ring_req->handle = info->handle;

	ring_req->operation = IXP_OP_3DES_ENCRYPT;

	ring_req->nr_segments = 1;
	BUG_ON(ring_req->nr_segments > IXPIF_MAX_SEGMENTS_PER_REQUEST);

	req_page = (char *)__get_free_page(GFP_NOIO | __GFP_HIGH);

	if(req_page == 0) {
	  printk(KERN_ERR "ixp_queue_request:Error allocating memory");
	  return 1;
	}

	((struct des_request *)req_page)->key_size = app_req->key_size;
	((struct des_request *)req_page)->iv_size = app_req->iv_size;
	((struct des_request *)req_page)->msg_size = app_req->msg_size;

	curr_pos = req_page + sizeof(struct des_request);
	memcpy(curr_pos, app_req->key, app_req->key_size);
	curr_pos += app_req->key_size;

	memcpy(curr_pos, app_req->iv, app_req->iv_size);
	curr_pos += app_req->iv_size;

	memcpy(curr_pos, app_req->msg, app_req->msg_size);
	curr_pos += app_req->msg_size;

	buffer_mfn = virt_to_mfn(req_page);

 	/* install a grant reference. */
	ref = gnttab_claim_grant_reference(&gref_head);
  	BUG_ON(ref == -ENOSPC);

	gnttab_grant_foreign_access_ref(
	      ref,
	      info->xbdev->otherend_id,
	      buffer_mfn,
	      0);
	
	info->shadow[id].r_params.presp = app_req->presp;
	info->shadow[id].r_params.callbk_tag = app_req->callbk_tag;
	info->shadow[id].frame[0] = mfn_to_pfn(buffer_mfn);
	info->shadow[id].req_page = req_page;

	ring_req->seg[0] =
	      (struct ixp_request_segment) {
		.gref       = ref
	      };

	info->ring.req_prod_pvt++;

	/* Keep a private copy so we can reissue requests when recovering. */
	info->shadow[id].req = *ring_req;

  	flush_requests(info);

	//gnttab_free_grant_references(gref_head);

	return 0;
}