Ejemplo n.º 1
0
static int
virtio_net_alloc_bufs(void)
{
	data_vir = alloc_contig(PACKET_BUF_SZ, 0, &data_phys);

	if (!data_vir)
		return ENOMEM;

	hdrs_vir = alloc_contig(BUF_PACKETS * sizeof(hdrs_vir[0]),
				 0, &hdrs_phys);

	if (!hdrs_vir) {
		free_contig(data_vir, PACKET_BUF_SZ);
		return ENOMEM;
	}

	packets = malloc(BUF_PACKETS * sizeof(packets[0]));

	if (!packets) {
		free_contig(data_vir, PACKET_BUF_SZ);
		free_contig(hdrs_vir, BUF_PACKETS * sizeof(hdrs_vir[0]));
		return ENOMEM;
	}

	memset(data_vir, 0, PACKET_BUF_SZ);
	memset(hdrs_vir, 0, BUF_PACKETS * sizeof(hdrs_vir[0]));
	memset(packets, 0, BUF_PACKETS * sizeof(packets[0]));

	return OK;
}
Ejemplo n.º 2
0
Archivo: atl2.c Proyecto: ssinghi/minix
/*===========================================================================*
 *				atl2_alloc_dma				     *
 *===========================================================================*/
static int atl2_alloc_dma(void)
{
	/* Allocate DMA ring buffers.
	 */

	state.txd_base = alloc_contig(ATL2_TXD_BUFSIZE,
		AC_ALIGN4K, &state.txd_phys);
	state.txs_base = alloc_contig(ATL2_TXS_COUNT * sizeof(u32_t),
		AC_ALIGN4K, &state.txs_phys);

	/* The data buffer in each RxD descriptor must be 128-byte aligned.
	 * The two Tx buffers merely require a 4-byte start alignment.
	 */
	state.rxd_align = 128 - offsetof(rxd_t, data);
	state.rxd_base_u =
		alloc_contig(state.rxd_align + ATL2_RXD_COUNT * ATL2_RXD_SIZE,
		AC_ALIGN4K, &state.rxd_phys);

	/* Unlike mmap, alloc_contig returns NULL on failure. */
	if (!state.txd_base || !state.txs_base || !state.rxd_base_u)
		return ENOMEM;

	state.rxd_base = (rxd_t *) (state.rxd_base_u + state.rxd_align);
	state.rxd_phys += state.rxd_align;

	/* Zero out just in case. */
	memset(state.txd_base, 0, ATL2_TXD_BUFSIZE);
	memset(state.txs_base, 0, ATL2_TXS_COUNT * sizeof(u32_t));
	memset(state.rxd_base, 0, ATL2_RXD_COUNT * ATL2_RXD_SIZE);

	return OK;
}
Ejemplo n.º 3
0
/*===========================================================================*
 *				vbox_init				     *
 *===========================================================================*/
static int vbox_init(int UNUSED(type), sef_init_info_t *UNUSED(info))
{
	/* Initialize the device. */
	int devind;
	u16_t vid, did;
	struct VMMDevReportGuestInfo *req;
	int r;

	interval = DEFAULT_INTERVAL;
	drift = DEFAULT_DRIFT;

	if (env_argc > 1)
		optset_parse(optset_table, env_argv[1]);

	pci_init();

	r = pci_first_dev(&devind, &vid, &did);

	for (;;) {
		if (r != 1)
			panic("backdoor device not found");

		if (vid == VMMDEV_PCI_VID && did == VMMDEV_PCI_DID)
			break;

		r = pci_next_dev(&devind, &vid, &did);
	}

	pci_reserve(devind);

	port = pci_attr_r32(devind, PCI_BAR) & PCI_BAR_IO_MASK;

	irq = pci_attr_r8(devind, PCI_ILR);
	hook_id = 0;

	if ((r = sys_irqsetpolicy(irq, 0 /* IRQ_REENABLE */, &hook_id)) != OK)
		panic("unable to register IRQ: %d", r);

	if ((r = sys_irqenable(&hook_id)) != OK)
		panic("unable to enable IRQ: %d", r);

	if ((vir_ptr = alloc_contig(VMMDEV_BUF_SIZE, 0, &phys_ptr)) == NULL)
		panic("unable to allocate memory");

	req = (struct VMMDevReportGuestInfo *) vir_ptr;
	req->add_version = VMMDEV_GUEST_VERSION;
	req->os_type = VMMDEV_GUEST_OS_OTHER;

	if ((r = vbox_request(&req->header, phys_ptr,
			VMMDEV_REQ_REPORTGUESTINFO, sizeof(*req))) !=
			VMMDEV_ERR_OK)
		panic("backdoor device not functioning");

	ticks = sys_hz() * interval;

	sys_setalarm(ticks, 0);

	return OK;
}
Ejemplo n.º 4
0
PRIVATE int init_buffers(sub_dev_t *sub_dev_ptr)
{
#if (CHIP == INTEL)
	char *base;
	size_t size, off;
	unsigned left;
	u32_t i;
	phys_bytes ph;

	/* allocate dma buffer space */
	size= sub_dev_ptr->DmaSize + 64 * 1024;
	off = base= alloc_contig(size, AC_ALIGN4K, &ph);
	if (!base) {
		error("%s: failed to allocate dma buffer for channel %d\n", 
				drv.DriverName,i);
		return EIO;
	}
	sub_dev_ptr->DmaBuf= base;

	tell_dev((vir_bytes)base, size, 0, 0, 0);

	/* allocate extra buffer space */
	if (!(sub_dev_ptr->ExtraBuf = malloc(sub_dev_ptr->NrOfExtraBuffers * 
					sub_dev_ptr->DmaSize / 
					sub_dev_ptr->NrOfDmaFragments))) {
		error("%s failed to allocate extra buffer for channel %d\n", 
				drv.DriverName,i);
		return EIO;
	}

	sub_dev_ptr->DmaPtr = sub_dev_ptr->DmaBuf;
	i = sys_umap(SELF, D, 
			(vir_bytes) sub_dev_ptr->DmaBuf, 
			(phys_bytes) sizeof(sub_dev_ptr->DmaBuf), 
			&(sub_dev_ptr->DmaPhys));

	if (i != OK) {
		return EIO;
	}

	if ((left = dma_bytes_left(sub_dev_ptr->DmaPhys)) < 
			sub_dev_ptr->DmaSize) {
		/* First half of buffer crosses a 64K boundary,
		 * can't DMA into that */
		sub_dev_ptr->DmaPtr += left;
		sub_dev_ptr->DmaPhys += left;
	}
	/* write the physical dma address and size to the device */
	drv_set_dma(sub_dev_ptr->DmaPhys, 
			sub_dev_ptr->DmaSize, sub_dev_ptr->Nr);
	return OK;

#else /* CHIP != INTEL */
	error("%s: init_buffer() failed, CHIP != INTEL", drv.DriverName);
	return EIO;
#endif /* CHIP == INTEL */
}
/*============================================================================*
 *				lan8710a_init_desc			      *
 *============================================================================*/
static void
lan8710a_init_desc(void)
{
	lan8710a_desc_t *p_rx_desc;
	lan8710a_desc_t *p_tx_desc;
	phys_bytes   buf_phys_addr;
	u8_t i;

	/* Attempt to allocate. */
	if ((lan8710a_state.p_rx_buf = alloc_contig((LAN8710A_NUM_RX_DESC
			* LAN8710A_IOBUF_SIZE), AC_ALIGN4K,
			&buf_phys_addr)) == NULL) {
		panic("failed to allocate RX buffers.");
	}
	for (i = 0; i < LAN8710A_NUM_RX_DESC; i++) {
		p_rx_desc = &(lan8710a_state.rx_desc[i]);
		memset(p_rx_desc, 0x0, sizeof(lan8710a_desc_t));
		p_rx_desc->pkt_len_flags = LAN8710A_DESC_FLAG_OWN;
		p_rx_desc->buffer_length_off = LAN8710A_IOBUF_SIZE;
		p_rx_desc->buffer_pointer = (u32_t)(buf_phys_addr +
						(i * LAN8710A_IOBUF_SIZE));

		p_rx_desc->next_pointer =
		   (u32_t)((i == (LAN8710A_NUM_RX_DESC - 1)) ?
			   (lan8710a_state.rx_desc_phy) :
			   (lan8710a_state.rx_desc_phy +
			     ((i + 1) * sizeof(lan8710a_desc_t))));
	}

	/* Attempt to allocate. */
	if ((lan8710a_state.p_tx_buf = alloc_contig((LAN8710A_NUM_TX_DESC
			* LAN8710A_IOBUF_SIZE), AC_ALIGN4K,
			&buf_phys_addr)) == NULL) {
		panic("failed to allocate TX buffers");
	}
	for (i = 0; i < LAN8710A_NUM_TX_DESC; i++) {
		p_tx_desc = &(lan8710a_state.tx_desc[i]);
		memset(p_tx_desc, 0x0, sizeof(lan8710a_desc_t));
		p_tx_desc->buffer_pointer = (u32_t)(buf_phys_addr +
				(i * LAN8710A_IOBUF_SIZE));
	}
	lan8710a_state.rx_desc_idx = 0;
	lan8710a_state.tx_desc_idx = 0;
}
Ejemplo n.º 6
0
static int
virtio_blk_alloc_requests(void)
{
	/* Allocate memory for request headers and status field */

	hdrs_vir = alloc_contig(VIRTIO_BLK_NUM_THREADS * sizeof(hdrs_vir[0]),
				AC_ALIGN4K, &hdrs_phys);

	if (!hdrs_vir)
		return ENOMEM;

	status_vir = alloc_contig(VIRTIO_BLK_NUM_THREADS * sizeof(status_vir[0]),
				  AC_ALIGN4K, &status_phys);

	if (!status_vir) {
		free_contig(hdrs_vir, VIRTIO_BLK_NUM_THREADS * sizeof(hdrs_vir[0]));
		return ENOMEM;
	}

	return OK;
}
Ejemplo n.º 7
0
static int
init_indirect_desc_table(struct indirect_desc_table *desc)
{
	desc->in_use = 0;
	desc->len = (MAPVEC_NR + MAPVEC_NR / 2) * sizeof(struct vring_desc);

	desc->descs = alloc_contig(desc->len, AC_ALIGN4K, &desc->paddr);
	memset(desc->descs, 0, desc->len);

	if (desc->descs == NULL)
		return ENOMEM;

	return OK;
}
Ejemplo n.º 8
0
static int
alloc_phys_queue(struct virtio_queue *q)
{
	assert(q != NULL);

	/* How much memory do we need? */
	q->ring_size = vring_size(q->num, PAGE_SIZE);

	q->vaddr = alloc_contig(q->ring_size, AC_ALIGN4K, &q->paddr);

	if (q->vaddr == NULL)
		return ENOMEM;

	q->data = alloc_contig(sizeof(q->data[0]) * q->num, AC_ALIGN4K, NULL);

	if (q->data == NULL) {
		free_contig(q->vaddr, q->ring_size);
		q->vaddr = NULL;
		q->paddr = 0;
		return ENOMEM;
	}

	return OK;
}
Ejemplo n.º 9
0
/*===========================================================================*
 *				flt_malloc				     *
 *===========================================================================*/
char *flt_malloc(size_t size, char *sbuf, size_t ssize)
{
	/* Allocate a buffer for 'size' bytes. If 'size' is equal to or less
	 * than 'ssize', return the static buffer 'sbuf', otherwise, use
	 * malloc() to allocate memory dynamically.
	 */
	char *p;

	if (size <= ssize)
		return sbuf;

	if(!(p = alloc_contig(size, 0, NULL)))
		panic("out of memory: %d", size);

	return p;
}
Ejemplo n.º 10
0
Archivo: fbd.c Proyecto: Hooman3/minix
/*===========================================================================*
 *				sef_cb_init_fresh			     *
 *===========================================================================*/
static int sef_cb_init_fresh(int type, sef_init_info_t *UNUSED(info))
{
	clock_t uptime;
	int r;

	/* Parse the given parameters. */
	if (env_argc > 1)
		optset_parse(optset_table, env_argv[1]);

	if (driver_label[0] == '\0')
		panic("no driver label given");

	if (ds_retrieve_label_endpt(driver_label, &driver_endpt))
		panic("unable to resolve driver label");

	if (driver_minor > 255)
		panic("no or invalid driver minor given");

#if DEBUG
	printf("FBD: driver label '%s' (endpt %d), minor %d\n",
		driver_label, driver_endpt, driver_minor);
#endif

	/* Initialize resources. */
	fbd_buf = alloc_contig(BUF_SIZE, 0, NULL);

	assert(fbd_buf != NULL);

	if ((r = getticks(&uptime)) != OK)
		panic("getuptime failed (%d)\n", r);

	srand48(uptime);

	/* Announce we are up! */
	blockdriver_announce(type);

	return OK;
}
Ejemplo n.º 11
0
/*===========================================================================*
 *				get_block				     *
 *===========================================================================*/
struct buf *get_block(
  register dev_t dev,		/* on which device is the block? */
  register block_t block,	/* which block is wanted? */
  int only_search		/* if NO_READ, don't read, else act normal */
)
{
/* Check to see if the requested block is in the block cache.  If so, return
 * a pointer to it.  If not, evict some other block and fetch it (unless
 * 'only_search' is 1).  All the blocks in the cache that are not in use
 * are linked together in a chain, with 'front' pointing to the least recently
 * used block and 'rear' to the most recently used block.  If 'only_search' is
 * 1, the block being requested will be overwritten in its entirety, so it is
 * only necessary to see if it is in the cache; if it is not, any free buffer
 * will do.  It is not necessary to actually read the block in from disk.
 * If 'only_search' is PREFETCH, the block need not be read from the disk,
 * and the device is not to be marked on the block, so callers can tell if
 * the block returned is valid.
 * In addition to the LRU chain, there is also a hash chain to link together
 * blocks whose block numbers end with the same bit strings, for fast lookup.
 */

  int b;
  static struct buf *bp, *prev_ptr;
  u64_t yieldid = VM_BLOCKID_NONE, getid = make64(dev, block);

  assert(buf_hash);
  assert(buf);
  assert(nr_bufs > 0);

  ASSERT(fs_block_size > 0);

  /* Search the hash chain for (dev, block). Do_read() can use 
   * get_block(NO_DEV ...) to get an unnamed block to fill with zeros when
   * someone wants to read from a hole in a file, in which case this search
   * is skipped
   */
  if (dev != NO_DEV) {
	b = BUFHASH(block);
	bp = buf_hash[b];
	while (bp != NULL) {
		if (bp->b_blocknr == block && bp->b_dev == dev) {
			/* Block needed has been found. */
			if (bp->b_count == 0) rm_lru(bp);
			bp->b_count++;	/* record that block is in use */
			ASSERT(bp->b_bytes == fs_block_size);
			ASSERT(bp->b_dev == dev);
			ASSERT(bp->b_dev != NO_DEV);
			ASSERT(bp->bp);
			return(bp);
		} else {
			/* This block is not the one sought. */
			bp = bp->b_hash; /* move to next block on hash chain */
		}
	}
  }

  /* Desired block is not on available chain.  Take oldest block ('front'). */
  if ((bp = front) == NULL) panic("all buffers in use: %d", nr_bufs);

  if(bp->b_bytes < fs_block_size) {
	ASSERT(!bp->bp);
	ASSERT(bp->b_bytes == 0);
	if(!(bp->bp = alloc_contig( (size_t) fs_block_size, 0, NULL))) {
		printf("MFS: couldn't allocate a new block.\n");
		for(bp = front;
			bp && bp->b_bytes < fs_block_size; bp = bp->b_next)
			;
		if(!bp) {
			panic("no buffer available");
		}
	} else {
  		bp->b_bytes = fs_block_size;
	}
  }

  ASSERT(bp);
  ASSERT(bp->bp);
  ASSERT(bp->b_bytes == fs_block_size);
  ASSERT(bp->b_count == 0);

  rm_lru(bp);

  /* Remove the block that was just taken from its hash chain. */
  b = BUFHASH(bp->b_blocknr);
  prev_ptr = buf_hash[b];
  if (prev_ptr == bp) {
	buf_hash[b] = bp->b_hash;
  } else {
	/* The block just taken is not on the front of its hash chain. */
	while (prev_ptr->b_hash != NULL)
		if (prev_ptr->b_hash == bp) {
			prev_ptr->b_hash = bp->b_hash;	/* found it */
			break;
		} else {
			prev_ptr = prev_ptr->b_hash;	/* keep looking */
		}
  }

  /* If the block taken is dirty, make it clean by writing it to the disk.
   * Avoid hysteresis by flushing all other dirty blocks for the same device.
   */
  if (bp->b_dev != NO_DEV) {
	if (ISDIRTY(bp)) flushall(bp->b_dev);

	/* Are we throwing out a block that contained something?
	 * Give it to VM for the second-layer cache.
	 */
	yieldid = make64(bp->b_dev, bp->b_blocknr);
	assert(bp->b_bytes == fs_block_size);
	BP_CLEARDEV(bp);
  }

  /* Fill in block's parameters and add it to the hash chain where it goes. */
  if(dev == NO_DEV) BP_CLEARDEV(bp);
  else BP_SETDEV(bp, dev);
  bp->b_blocknr = block;	/* fill in block number */
  bp->b_count++;		/* record that block is being used */
  b = BUFHASH(bp->b_blocknr);
  bp->b_hash = buf_hash[b];

  buf_hash[b] = bp;		/* add to hash list */

  if(dev == NO_DEV) {
	if(vmcache && cmp64(yieldid, VM_BLOCKID_NONE) != 0) {
		vm_yield_block_get_block(yieldid, VM_BLOCKID_NONE,
			bp->bp, fs_block_size);
	}
	return(bp);	/* If the caller wanted a NO_DEV block, work is done. */
  }

  /* Go get the requested block unless searching or prefetching. */
  if(only_search == PREFETCH || only_search == NORMAL) {
	/* Block is not found in our cache, but we do want it
	 * if it's in the vm cache.
	 */
	if(vmcache) {
		/* If we can satisfy the PREFETCH or NORMAL request 
		 * from the vm cache, work is done.
		 */
		if(vm_yield_block_get_block(yieldid, getid,
			bp->bp, fs_block_size) == OK) {
			return bp;
		}
	}
  }

  if(only_search == PREFETCH) {
	/* PREFETCH: don't do i/o. */
	BP_CLEARDEV(bp);
  } else if (only_search == NORMAL) {
	read_block(bp);
  } else if(only_search == NO_READ) {
	/* we want this block, but its contents
	 * will be overwritten. VM has to forget
	 * about it.
	 */
	if(vmcache) {
		vm_forgetblock(getid);
	}
  } else
	panic("unexpected only_search value: %d", only_search);

  assert(bp->bp);

  return(bp);			/* return the newly acquired block */
}
Ejemplo n.º 12
0
/*
 * Initialize receive and transmit buffers.
 */
static void
e1000_init_buf(e1000_t * e)
{
	phys_bytes rx_desc_p, rx_buff_p;
	phys_bytes tx_desc_p, tx_buff_p;
	int i;

	/* Number of descriptors. */
	e->rx_desc_count = E1000_RXDESC_NR;
	e->tx_desc_count = E1000_TXDESC_NR;

	/* Allocate receive descriptors. */
	if ((e->rx_desc = alloc_contig(sizeof(e1000_rx_desc_t) *
	    e->rx_desc_count, AC_ALIGN4K, &rx_desc_p)) == NULL)
		panic("failed to allocate RX descriptors");

	memset(e->rx_desc, 0, sizeof(e1000_rx_desc_t) * e->rx_desc_count);

	/* Allocate receive buffers. */
	e->rx_buffer_size = E1000_RXDESC_NR * E1000_IOBUF_SIZE;

	if ((e->rx_buffer = alloc_contig(e->rx_buffer_size, AC_ALIGN4K,
	    &rx_buff_p)) == NULL)
		panic("failed to allocate RX buffers");

	/* Set up receive descriptors. */
	for (i = 0; i < E1000_RXDESC_NR; i++)
		e->rx_desc[i].buffer = rx_buff_p + i * E1000_IOBUF_SIZE;

	/* Allocate transmit descriptors. */
	if ((e->tx_desc = alloc_contig(sizeof(e1000_tx_desc_t) *
	    e->tx_desc_count, AC_ALIGN4K, &tx_desc_p)) == NULL)
		panic("failed to allocate TX descriptors");

	memset(e->tx_desc, 0, sizeof(e1000_tx_desc_t) * e->tx_desc_count);

	/* Allocate transmit buffers. */
	e->tx_buffer_size = E1000_TXDESC_NR * E1000_IOBUF_SIZE;

	if ((e->tx_buffer = alloc_contig(e->tx_buffer_size, AC_ALIGN4K,
	    &tx_buff_p)) == NULL)
		panic("failed to allocate TX buffers");

	/* Set up transmit descriptors. */
	for (i = 0; i < E1000_TXDESC_NR; i++)
		e->tx_desc[i].buffer = tx_buff_p + i * E1000_IOBUF_SIZE;

	/* Set up the receive ring registers. */
	e1000_reg_write(e, E1000_REG_RDBAL, rx_desc_p);
	e1000_reg_write(e, E1000_REG_RDBAH, 0);
	e1000_reg_write(e, E1000_REG_RDLEN,
	    e->rx_desc_count * sizeof(e1000_rx_desc_t));
	e1000_reg_write(e, E1000_REG_RDH, 0);
	e1000_reg_write(e, E1000_REG_RDT, e->rx_desc_count - 1);
	e1000_reg_unset(e, E1000_REG_RCTL, E1000_REG_RCTL_BSIZE);
	e1000_reg_set(e, E1000_REG_RCTL, E1000_REG_RCTL_EN);

	/* Set up the transmit ring registers. */
	e1000_reg_write(e, E1000_REG_TDBAL, tx_desc_p);
	e1000_reg_write(e, E1000_REG_TDBAH, 0);
	e1000_reg_write(e, E1000_REG_TDLEN,
	    e->tx_desc_count * sizeof(e1000_tx_desc_t));
	e1000_reg_write(e, E1000_REG_TDH, 0);
	e1000_reg_write(e, E1000_REG_TDT, 0);
	e1000_reg_set(e, E1000_REG_TCTL,
	    E1000_REG_TCTL_EN | E1000_REG_TCTL_PSP);
}
Ejemplo n.º 13
0
Archivo: fbd.c Proyecto: Hooman3/minix
/*===========================================================================*
 *				fbd_transfer_copy			     *
 *===========================================================================*/
static ssize_t fbd_transfer_copy(int do_write, u64_t position,
	endpoint_t endpt, iovec_t *iov, unsigned int count, size_t size,
	int flags)
{
	/* Interpose on the request. */
	iovec_s_t iovec[NR_IOREQS];
	struct vscp_vec vscp_vec[SCPVEC_NR];
	cp_grant_id_t grant;
	size_t off, len;
	message m;
	char *ptr;
	int i, j, r;
	ssize_t rsize;

	assert(count > 0 && count <= SCPVEC_NR);

	if (size > BUF_SIZE) {
		printf("FBD: allocating memory for %d bytes\n", size);

		ptr = alloc_contig(size, 0, NULL);

		assert(ptr != NULL);
	}
	else ptr = fbd_buf;

	/* For write operations, first copy in the data to write. */
	if (do_write) {
		for (i = off = 0; i < count; i++) {
			len = iov[i].iov_size;

			vscp_vec[i].v_from = endpt;
			vscp_vec[i].v_to = SELF;
			vscp_vec[i].v_gid = iov[i].iov_addr;
			vscp_vec[i].v_offset = 0;
			vscp_vec[i].v_addr = (vir_bytes) (ptr + off);
			vscp_vec[i].v_bytes = len;

			off += len;
		}

		if ((r = sys_vsafecopy(vscp_vec, i)) != OK)
			panic("vsafecopy failed (%d)\n", r);

		/* Trigger write hook. */
		rule_io_hook(ptr, size, position, FBD_FLAG_WRITE);
	}

	/* Allocate grants for the data, in the same chunking as the original
	 * vector. This avoids performance fluctuations with bad hardware as
	 * observed with the filter driver.
	 */
	for (i = off = 0; i < count; i++) {
		len = iov[i].iov_size;

		iovec[i].iov_size = len;
		iovec[i].iov_grant = cpf_grant_direct(driver_endpt,
			(vir_bytes) (ptr + off), len,
			do_write ? CPF_READ : CPF_WRITE);
		assert(iovec[i].iov_grant != GRANT_INVALID);

		off += len;
	}

	grant = cpf_grant_direct(driver_endpt, (vir_bytes) iovec,
		count * sizeof(iovec[0]), CPF_READ);
	assert(grant != GRANT_INVALID);

	m.m_type = do_write ? BDEV_SCATTER : BDEV_GATHER;
	m.m_lbdev_lblockdriver_msg.minor = driver_minor;
	m.m_lbdev_lblockdriver_msg.count = count;
	m.m_lbdev_lblockdriver_msg.grant = grant;
	m.m_lbdev_lblockdriver_msg.flags = flags;
	m.m_lbdev_lblockdriver_msg.id = 0;
	m.m_lbdev_lblockdriver_msg.pos = position;

	if ((r = ipc_sendrec(driver_endpt, &m)) != OK)
		panic("ipc_sendrec to driver failed (%d)\n", r);

	if (m.m_type != BDEV_REPLY)
		panic("invalid reply from driver (%d)\n", m.m_type);

	cpf_revoke(grant);

	for (i = 0; i < count; i++)
		cpf_revoke(iovec[i].iov_grant);

	/* For read operations, finish by copying out the data read. */
	if (!do_write) {
		/* Trigger read hook. */
		rule_io_hook(ptr, size, position, FBD_FLAG_READ);

		/* Upon success, copy back whatever has been processed. */
		rsize = m.m_lblockdriver_lbdev_reply.status;
		for (i = j = off = 0; rsize > 0 && i < count; i++) {
			len = MIN(rsize, iov[i].iov_size);

			vscp_vec[j].v_from = SELF;
			vscp_vec[j].v_to = endpt;
			vscp_vec[j].v_gid = iov[i].iov_addr;
			vscp_vec[j].v_offset = 0;
			vscp_vec[j].v_addr = (vir_bytes) (ptr + off);
			vscp_vec[j].v_bytes = len;

			off += len;
			rsize -= len;
			j++;
		}

		if (j > 0 && (r = sys_vsafecopy(vscp_vec, j)) != OK)
			panic("vsafecopy failed (%d)\n", r);
	}

	if (ptr != fbd_buf)
		free_contig(ptr, size);

	return m.m_lblockdriver_lbdev_reply.status;
}