示例#1
0
static void
virtio_net_refill_rx_queue(void)
{
	struct vumap_phys phys[2];
	struct packet *p;

	while ((in_rx < BUF_PACKETS / 2) && !STAILQ_EMPTY(&free_list)) {
		/* peek */
		p = STAILQ_FIRST(&free_list);
		/* remove */
		STAILQ_REMOVE_HEAD(&free_list, next);

		phys[0].vp_addr = p->phdr;
		assert(!(phys[0].vp_addr & 1));
		phys[0].vp_size = sizeof(struct virtio_net_hdr);

		phys[1].vp_addr = p->pdata;
		assert(!(phys[1].vp_addr & 1));
		phys[1].vp_size = MAX_PACK_SIZE;

		/* RX queue needs write */
		phys[0].vp_addr |= 1;
		phys[1].vp_addr |= 1;

		virtio_to_queue(net_dev, RX_Q, phys, 2, p);
		in_rx++;
	}

	if (in_rx == 0 && STAILQ_EMPTY(&free_list)) {
		dput(("warning: rx queue underflow!"));
		virtio_net_stats.ets_fifoUnder++;
	}
}
示例#2
0
/*
 * Put user bytes into a free packet buffer, forward this packet to the TX
 * queue, and return OK.  If there are no free packet buffers, return SUSPEND.
 */
static int
virtio_net_send(struct netdriver_data * data, size_t len)
{
	struct vumap_phys phys[2];
	struct packet *p;

	if (STAILQ_EMPTY(&free_list))
		return SUSPEND;

	p = STAILQ_FIRST(&free_list);
	STAILQ_REMOVE_HEAD(&free_list, next);

	if (len > MAX_PACK_SIZE)
		panic("%s: packet too large to send: %zu", name, len);

	netdriver_copyin(data, 0, p->vdata, len);

	phys[0].vp_addr = p->phdr;
	assert(!(phys[0].vp_addr & 1));
	phys[0].vp_size = sizeof(struct virtio_net_hdr);
	phys[1].vp_addr = p->pdata;
	assert(!(phys[1].vp_addr & 1));
	phys[1].vp_size = len;
	virtio_to_queue(net_dev, TX_Q, phys, 2, p);

	return OK;
}
示例#3
0
static int
virtio_blk_flush(void)
{
	struct vumap_phys phys[2];
	size_t phys_cnt = sizeof(phys) / sizeof(phys[0]);

	/* Which thread is doing this request? */
	thread_id_t tid = blockdriver_mt_get_tid();

	/* Host may not support flushing */
	if (!virtio_host_supports(blk_dev, VIRTIO_BLK_F_FLUSH))
		return EOPNOTSUPP;

	/* Prepare the header */
	memset(&hdrs_vir[tid], 0, sizeof(hdrs_vir[0]));
	hdrs_vir[tid].type = VIRTIO_BLK_T_FLUSH;

	/* Let this be a barrier if the host supports it */
	if (virtio_host_supports(blk_dev, VIRTIO_BLK_F_BARRIER))
		hdrs_vir[tid].type |= VIRTIO_BLK_T_BARRIER;

	/* Header and status for the queue */
	phys[0].vp_addr = hdrs_phys + tid * sizeof(hdrs_vir[0]);
	phys[0].vp_size = sizeof(hdrs_vir[0]);
	phys[1].vp_addr = status_phys + tid * sizeof(status_vir[0]);
	phys[1].vp_size = 1;

	/* Status always needs write access */
	phys[1].vp_addr |= 1;

	/* Send flush request to queue */
	virtio_to_queue(blk_dev, 0, phys, phys_cnt, &tid);

	blockdriver_mt_sleep();

	/* All was good */
	if (mystatus(tid) == VIRTIO_BLK_S_OK)
		return OK;

	/* Error path */
	dprintf(("ERROR status=%02x op=flush t=%d", mystatus(tid), tid));

	return virtio_blk_status2error(mystatus(tid));
}
示例#4
0
static int
virtio_net_cpy_from_user(message *m)
{
	/* Put user bytes into a a free packet buffer and
	 * then forward this packet to the TX queue.
	 */
	int r;
	iovec_s_t iovec[NR_IOREQS];
	struct vumap_phys phys[2];
	struct packet *p;
	size_t bytes;

	/* This should only be called if free_list has some entries */
	assert(!STAILQ_EMPTY(&free_list));

	p = STAILQ_FIRST(&free_list);
	STAILQ_REMOVE_HEAD(&free_list, next);

	virtio_net_fetch_iovec(iovec, m, m->m_net_netdrv_dl_writev_s.grant,
		 m->m_net_netdrv_dl_writev_s.count);

	r = sys_easy_vsafecopy_from(m->m_source, iovec,
		m->m_net_netdrv_dl_writev_s.count, (vir_bytes)p->vdata,
		MAX_PACK_SIZE, &bytes);

	if (r != OK)
		panic("%s: copy from %d failed", name, m->m_source);


	phys[0].vp_addr = p->phdr;
	assert(!(phys[0].vp_addr & 1));
	phys[0].vp_size = sizeof(struct virtio_net_hdr);
	phys[1].vp_addr = p->pdata;
	assert(!(phys[1].vp_addr & 1));
	phys[1].vp_size = bytes;
	virtio_to_queue(net_dev, TX_Q, phys, 2, p);
	return bytes;
}
示例#5
0
static ssize_t
virtio_blk_transfer(dev_t minor, int write, u64_t position, endpoint_t endpt,
		    iovec_t *iovec, unsigned int cnt, int flags)
{
	/* Need to translate vir to phys */
	struct vumap_vir vir[NR_IOREQS];

	/* Physical addresses of buffers, including header and trailer */
	struct vumap_phys phys[NR_IOREQS + 2];

	/* Which thread is doing the transfer? */
	thread_id_t tid = blockdriver_mt_get_tid();

	vir_bytes size = 0;
	vir_bytes size_tmp = 0;
	struct device *dv;
	u64_t sector;
	u64_t end_part;
	int r, pcnt = sizeof(phys) / sizeof(phys[0]);

	iovec_s_t *iv = (iovec_s_t *)iovec;
	int access = write ? VUA_READ : VUA_WRITE;

	/* Make sure we don't touch this one anymore */
	iovec = NULL;

	if (cnt > NR_IOREQS)
		return EINVAL;

	/* position greater than capacity? */
	if (position >= blk_config.capacity * VIRTIO_BLK_BLOCK_SIZE)
		return 0;

	dv = virtio_blk_part(minor);

	/* Does device exist? */
	if (!dv)
		return ENXIO;

	position += dv->dv_base;
	end_part = dv->dv_base + dv->dv_size;

	/* Hmmm, AHCI tries to fix this up, but lets just say everything
	 * needs to be sector (512 byte) aligned...
	 */
	if (position % VIRTIO_BLK_BLOCK_SIZE) {
		dprintf(("Non sector-aligned access %016llx", position));
		return EINVAL;
	}

	sector = position / VIRTIO_BLK_BLOCK_SIZE;

	r = prepare_vir_vec(endpt, vir, iv, cnt, &size);

	if (r != OK)
		return r;

	if (position >= end_part)
		return 0;

	/* Truncate if the partition is smaller than that */
	if (position + size > end_part - 1) {
		size = end_part - position;

		/* Fix up later */
		size_tmp = 0;
		cnt = 0;
	} else {
		/* Use all buffers */
		size_tmp = size;
	}

	/* Fix up the number of vectors if size was truncated */
	while (size_tmp < size)
		size_tmp += vir[cnt++].vv_size;

	/* If the last vector was too big, just truncate it */
	if (size_tmp > size) {
		vir[cnt - 1].vv_size = vir[cnt -1].vv_size - (size_tmp - size);
		size_tmp -= (size_tmp - size);
	}

	if (size % VIRTIO_BLK_BLOCK_SIZE) {
		dprintf(("non-sector sized read (%lu) from %d", size, endpt));
		return EINVAL;
	}

	/* Map vir to phys */
	if ((r = sys_vumap(endpt, vir, cnt, 0, access,
			   &phys[1], &pcnt)) != OK) {

		dprintf(("Unable to map memory from %d (%d)", endpt, r));
		return r;
	}

	/* Prepare the header */
	memset(&hdrs_vir[tid], 0, sizeof(hdrs_vir[0]));

	if (write)
		hdrs_vir[tid].type = VIRTIO_BLK_T_OUT;
	else
		hdrs_vir[tid].type = VIRTIO_BLK_T_IN;

	hdrs_vir[tid].ioprio = 0;
	hdrs_vir[tid].sector = sector;

	/* First the header */
	phys[0].vp_addr = hdrs_phys + tid * sizeof(hdrs_vir[0]);
	phys[0].vp_size = sizeof(hdrs_vir[0]);

	/* Put the physical buffers into phys */
	if ((r = prepare_bufs(vir, &phys[1], pcnt, write)) != OK)
		return r;

	/* Put the status at the end */
	phys[pcnt + 1].vp_addr = status_phys + tid * sizeof(status_vir[0]);
	phys[pcnt + 1].vp_size = sizeof(u8_t);

	/* Status always needs write access */
	phys[1 + pcnt].vp_addr |= 1;

	/* Send addresses to queue */
	virtio_to_queue(blk_dev, 0, phys, 2 + pcnt, &tid);

	/* Wait for completion */
	blockdriver_mt_sleep();

	/* All was good */
	if (mystatus(tid) == VIRTIO_BLK_S_OK)
		return size;

	/* Error path */
	dprintf(("ERROR status=%02x sector=%llu len=%lx cnt=%d op=%s t=%d",
		 mystatus(tid), sector, size, pcnt,
		 write ? "write" : "read", tid));

	return virtio_blk_status2error(mystatus(tid));
}