예제 #1
0
void
vnet_rx_vio_desc_data(struct vnet_softc *sc, struct vio_msg_tag *tag)
{
	struct vnet_desc_msg *dm = (struct vnet_desc_msg *)tag;
	struct ldc_conn *lc = &sc->sc_lc;
	struct ldc_map *map = sc->sc_lm;
	struct ifnet *ifp = &sc->sc_ac.ac_if;
	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
	struct mbuf *m;
	caddr_t buf;
	paddr_t pa;
	psize_t nbytes;
	u_int cons;
	int err;

	switch(tag->stype) {
	case VIO_SUBTYPE_INFO:
		buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO);
		if (buf == NULL) {
			ifp->if_ierrors++;
			goto skip;
		}
		nbytes = roundup(dm->nbytes, 8);

		if (dm->nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) {
			ifp->if_ierrors++;
			goto skip;
		}

		pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa);
		err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN,
		    dm->cookie[0].addr, pa, nbytes, &nbytes);
		if (err != H_EOK) {
			pool_put(&sc->sc_pool, buf);
			ifp->if_ierrors++;
			goto skip;
		}

		/* Stupid OBP doesn't align properly. */
                m = m_devget(buf, dm->nbytes, ETHER_ALIGN);
		pool_put(&sc->sc_pool, buf);
		if (m == NULL) {
			ifp->if_ierrors++;
			goto skip;
		}

		/* Pass it on. */
		ml_enqueue(&ml, m);
		if_input(ifp, &ml);

	skip:
		dm->tag.stype = VIO_SUBTYPE_ACK;
		dm->tag.sid = sc->sc_local_sid;
		vnet_sendmsg(sc, dm, sizeof(*dm));
		break;

	case VIO_SUBTYPE_ACK:
		DPRINTF(("DATA/ACK/DESC_DATA\n"));

		if (dm->desc_handle != sc->sc_tx_cons) {
			printf("out of order\n");
			return;
		}

		cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);

		map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0;
		atomic_dec_int(&map->lm_count);

		pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf);
		ifp->if_opackets++;

		sc->sc_tx_cons++;
		break;

	case VIO_SUBTYPE_NACK:
		DPRINTF(("DATA/NACK/DESC_DATA\n"));
		break;

	default:
		DPRINTF(("DATA/0x%02x/DESC_DATA\n", tag->stype));
		break;
	}
}
예제 #2
0
void
vnet_rx_vio_dring_data(struct vnet_softc *sc, struct vio_msg_tag *tag)
{
	struct vio_dring_msg *dm = (struct vio_dring_msg *)tag;
	struct ldc_conn *lc = &sc->sc_lc;
	struct ifnet *ifp = &sc->sc_ac.ac_if;
	struct mbuf *m;
	paddr_t pa;
	psize_t nbytes;
	int err;

	switch(tag->stype) {
	case VIO_SUBTYPE_INFO:
	{
		struct vnet_desc desc;
		uint64_t cookie;
		paddr_t desc_pa;
		int idx, ack_end_idx = -1;
		struct mbuf_list ml = MBUF_LIST_INITIALIZER();

		idx = dm->start_idx;
		for (;;) {
			cookie = sc->sc_peer_dring_cookie.addr;
			cookie += idx * sc->sc_peer_desc_size;
			nbytes = sc->sc_peer_desc_size;
			pmap_extract(pmap_kernel(), (vaddr_t)&desc, &desc_pa);
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, cookie,
			    desc_pa, nbytes, &nbytes);
			if (err != H_EOK) {
				printf("hv_ldc_copy_in %d\n", err);
				break;
			}

			if (desc.hdr.dstate != VIO_DESC_READY)
				break;

			if (desc.nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) {
				ifp->if_ierrors++;
				goto skip;
			}

			m = MCLGETI(NULL, M_DONTWAIT, NULL, desc.nbytes);
			if (!m)
				break;
			m->m_len = m->m_pkthdr.len = desc.nbytes;
			nbytes = roundup(desc.nbytes + VNET_ETHER_ALIGN, 8);

			pmap_extract(pmap_kernel(), (vaddr_t)m->m_data, &pa);
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN,
			    desc.cookie[0].addr, pa, nbytes, &nbytes);
			if (err != H_EOK) {
				m_freem(m);
				goto skip;
			}
			m->m_data += VNET_ETHER_ALIGN;

			ml_enqueue(&ml, m);

		skip:
			desc.hdr.dstate = VIO_DESC_DONE;
			nbytes = sc->sc_peer_desc_size;
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_OUT, cookie,
			    desc_pa, nbytes, &nbytes);
			if (err != H_EOK)
				printf("hv_ldc_copy_out %d\n", err);

			ack_end_idx = idx;
			if (++idx == sc->sc_peer_dring_nentries)
				idx = 0;
		}

		if_input(ifp, &ml);

		if (ack_end_idx == -1) {
			dm->tag.stype = VIO_SUBTYPE_NACK;
		} else {
			dm->tag.stype = VIO_SUBTYPE_ACK;
			dm->end_idx = ack_end_idx;
		}
		dm->tag.sid = sc->sc_local_sid;
		dm->proc_state = VIO_DP_STOPPED;
		vnet_sendmsg(sc, dm, sizeof(*dm));
		break;
	}

	case VIO_SUBTYPE_ACK:
	{
		struct ldc_map *map = sc->sc_lm;
		u_int cons, count;

		sc->sc_peer_state = dm->proc_state;

		cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);
		while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) {
			map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0;
			atomic_dec_int(&map->lm_count);

			pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf);
			ifp->if_opackets++;

			sc->sc_vd->vd_desc[cons].hdr.dstate = VIO_DESC_FREE;
			sc->sc_tx_cons++;
			cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1);
		}

		count = sc->sc_tx_prod - sc->sc_tx_cons;
		if (count > 0 && sc->sc_peer_state != VIO_DP_ACTIVE)
			vnet_send_dring_data(sc, cons);

		KERNEL_LOCK();
		if (count < (sc->sc_vd->vd_nentries - 1))
			ifp->if_flags &= ~IFF_OACTIVE;
		if (count == 0)
			ifp->if_timer = 0;

		vnet_start(ifp);
		KERNEL_UNLOCK();
		break;
	}

	case VIO_SUBTYPE_NACK:
		DPRINTF(("DATA/NACK/DRING_DATA\n"));
		sc->sc_peer_state = VIO_DP_STOPPED;
		break;

	default:
		DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
		break;
	}
}
예제 #3
0
void
vdsp_rx_vio_dring_data(struct vdsp_softc *sc, struct vio_msg_tag *tag)
{
	struct vio_dring_msg *dm = (struct vio_dring_msg *)tag;
	struct vd_desc *vd;
	vaddr_t va;
	paddr_t pa;
	uint64_t size, off;
	psize_t nbytes;
	int err;

	switch(tag->stype) {
	case VIO_SUBTYPE_INFO:
		DPRINTF(("DATA/INFO/DRING_DATA\n"));

		if (dm->dring_ident != sc->sc_dring_ident ||
		    dm->start_idx >= sc->sc_num_descriptors) {
			dm->tag.stype = VIO_SUBTYPE_NACK;
			vdsp_sendmsg(sc, dm, sizeof(*dm), 0);
			return;
		}

		off = dm->start_idx * sc->sc_descriptor_size;
		vd = (struct vd_desc *)(sc->sc_vd + off);
		va = (vaddr_t)vd;
		size = sc->sc_descriptor_size;
		while (size > 0) {
			pmap_extract(pmap_kernel(), va, &pa);
			nbytes = MIN(size, PAGE_SIZE - (off & PAGE_MASK));
			err = hv_ldc_copy(sc->sc_lc.lc_id, LDC_COPY_IN,
			    sc->sc_dring_cookie.addr + off, pa,
			    nbytes, &nbytes);
			if (err != H_EOK) {
				printf("%s: hv_ldc_copy %d\n", __func__, err);
				return;
			}
			va += nbytes;
			size -= nbytes;
			off += nbytes;
		}

		sc->sc_vd_ring[sc->sc_vd_prod % sc->sc_num_descriptors] = vd;
		membar_producer();
		sc->sc_vd_prod++;
		task_add(systq, &sc->sc_vd_task);

		break;

	case VIO_SUBTYPE_ACK:
		DPRINTF(("DATA/ACK/DRING_DATA\n"));
		break;

	case VIO_SUBTYPE_NACK:
		DPRINTF(("DATA/NACK/DRING_DATA\n"));
		break;

	default:
		DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
		break;
	}
}
예제 #4
0
void
vdsp_write_dring(void *arg1, void *arg2)
{
	struct vdsp_softc *sc = arg1;
	struct ldc_conn *lc = &sc->sc_lc;
	struct vd_desc *vd = arg2;
	struct proc *p = curproc;
	struct iovec iov;
	struct uio uio;
	caddr_t buf;
	vaddr_t va;
	paddr_t pa;
	uint64_t size, off;
	psize_t nbytes;
	int err, i;

	if (sc->sc_vp == NULL)
		return;

	buf = malloc(vd->size, M_DEVBUF, M_WAITOK);

	KERNEL_UNLOCK();
	i = 0;
	va = (vaddr_t)buf;
	size = vd->size;
	off = 0;
	while (size > 0 && i < vd->ncookies) {
		pmap_extract(pmap_kernel(), va, &pa);
		nbytes = MIN(size, vd->cookie[i].size - off);
		nbytes = MIN(nbytes, PAGE_SIZE - (off & PAGE_MASK));
		err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN,
		    vd->cookie[i].addr + off, pa, nbytes, &nbytes);
		if (err != H_EOK) {
			printf("%s: hv_ldc_copy: %d\n", __func__, err);
			vd->status = EIO;
			KERNEL_LOCK();
			goto fail;
		}
		va += nbytes;
		size -= nbytes;
		off += nbytes;
		if (off >= vd->cookie[i].size) {
			off = 0;
			i++;
		}
	}
	KERNEL_LOCK();

	iov.iov_base = buf;
	iov.iov_len = vd->size;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = vd->offset * DEV_BSIZE;
	uio.uio_resid = vd->size;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_WRITE;
	uio.uio_procp = p;

	vn_lock(sc->sc_vp, LK_EXCLUSIVE | LK_RETRY, p);
	vd->status = VOP_WRITE(sc->sc_vp, &uio, 0, p->p_ucred);
	VOP_UNLOCK(sc->sc_vp, 0, p);

fail:
	free(buf, M_DEVBUF, 0);

	/* ACK the descriptor. */
	vd->hdr.dstate = VIO_DESC_DONE;
	vdsp_ack_desc(sc, vd);
}
예제 #5
0
void
vdsp_read_desc(struct vdsp_softc *sc, struct vdsk_desc_msg *dm)
{
	struct ldc_conn *lc = &sc->sc_lc;
	struct proc *p = curproc;
	struct iovec iov;
	struct uio uio;
	caddr_t buf;
	vaddr_t va;
	paddr_t pa;
	uint64_t size, off;
	psize_t nbytes;
	int err, i;

	if (sc->sc_vp == NULL)
		return;

	buf = malloc(dm->size, M_DEVBUF, M_WAITOK);

	iov.iov_base = buf;
	iov.iov_len = dm->size;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = dm->offset * DEV_BSIZE;
	uio.uio_resid = dm->size;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_READ;
	uio.uio_procp = p;

	vn_lock(sc->sc_vp, LK_EXCLUSIVE | LK_RETRY, p);
	dm->status = VOP_READ(sc->sc_vp, &uio, 0, p->p_ucred);
	VOP_UNLOCK(sc->sc_vp, 0, p);

	KERNEL_UNLOCK();
	if (dm->status == 0) {
		i = 0;
		va = (vaddr_t)buf;
		size = dm->size;
		off = 0;
		while (size > 0 && i < dm->ncookies) {
			pmap_extract(pmap_kernel(), va, &pa);
			nbytes = MIN(size, dm->cookie[i].size - off);
			nbytes = MIN(nbytes, PAGE_SIZE - (off & PAGE_MASK));
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_OUT,
			    dm->cookie[i].addr + off, pa, nbytes, &nbytes);
			if (err != H_EOK) {
				printf("%s: hv_ldc_copy: %d\n", __func__, err);
				dm->status = EIO;
				KERNEL_LOCK();
				goto fail;
			}
			va += nbytes;
			size -= nbytes;
			off += nbytes;
			if (off >= dm->cookie[i].size) {
				off = 0;
				i++;
			}
		}
	}
	KERNEL_LOCK();

fail:
	free(buf, M_DEVBUF, 0);

	/* ACK the descriptor. */
	dm->tag.stype = VIO_SUBTYPE_ACK;
	dm->tag.sid = sc->sc_local_sid;
	vdsp_sendmsg(sc, dm, sizeof(*dm) +
	    (dm->ncookies - 1) * sizeof(struct ldc_cookie), 1);
}
예제 #6
0
void
vdsp_rx_vio_dring_data(struct vdsp_softc *sc, struct vio_msg_tag *tag)
{
	struct vio_dring_msg *dm = (struct vio_dring_msg *)tag;
	struct vd_desc *vd;
	struct task *task;
	vaddr_t va;
	paddr_t pa;
	uint64_t size, off;
	psize_t nbytes;
	int err;

	switch(tag->stype) {
	case VIO_SUBTYPE_INFO:
		DPRINTF(("DATA/INFO/DRING_DATA\n"));

		if (dm->dring_ident != sc->sc_dring_ident ||
		    dm->start_idx >= sc->sc_num_descriptors) {
			dm->tag.stype = VIO_SUBTYPE_NACK;
			vdsp_sendmsg(sc, dm, sizeof(*dm), 0);
			return;
		}

		off = dm->start_idx * sc->sc_descriptor_size;
		vd = (struct vd_desc *)(sc->sc_vd + off);
		va = (vaddr_t)vd;
		size = sc->sc_descriptor_size;
		while (size > 0) {
			pmap_extract(pmap_kernel(), va, &pa);
			nbytes = MIN(size, PAGE_SIZE - (off & PAGE_MASK));
			err = hv_ldc_copy(sc->sc_lc.lc_id, LDC_COPY_IN,
			    sc->sc_dring_cookie.addr + off, pa,
			    nbytes, &nbytes);
			if (err != H_EOK) {
				printf("%s: hv_ldc_copy %d\n", __func__, err);
				return;
			}
			va += nbytes;
			size -= nbytes;
			off += nbytes;
		}
		task = &sc->sc_vd_task[dm->start_idx];

		DPRINTF(("%s: start_idx %d, end_idx %d, operation %x\n",
		    sc->sc_dv.dv_xname, dm->start_idx, dm->end_idx,
		    vd->operation));
		switch (vd->operation) {
		case VD_OP_BREAD:
			task_set(task, vdsp_read_dring, sc, vd);
			break;
		case VD_OP_BWRITE:
			task_set(task, vdsp_write_dring, sc, vd);
			break;
		case VD_OP_FLUSH:
			task_set(task, vdsp_flush_dring, sc, vd);
			break;
		case VD_OP_GET_VTOC:
			task_set(task, vdsp_get_vtoc, sc, vd);
			break;
		case VD_OP_SET_VTOC:
			task_set(task, vdsp_set_vtoc, sc, vd);
			break;
		case VD_OP_GET_DISKGEOM:
			task_set(task, vdsp_get_diskgeom, sc, vd);
			break;
		case VD_OP_GET_WCE:
		case VD_OP_SET_WCE:
		case VD_OP_GET_DEVID:
			/*
			 * Solaris issues VD_OP_GET_DEVID despite the
			 * fact that we don't advertise it.  It seems
			 * to be able to handle failure just fine, so
			 * we silently ignore it.
			 */
			task_set(task, vdsp_unimp, sc, vd);
			break;
		default:
			printf("%s: unsupported operation 0x%02x\n",
			    sc->sc_dv.dv_xname, vd->operation);
			task_set(task, vdsp_unimp, sc, vd);
			break;
		}
		task_add(systq, task);
		break;

	case VIO_SUBTYPE_ACK:
		DPRINTF(("DATA/ACK/DRING_DATA\n"));
		break;

	case VIO_SUBTYPE_NACK:
		DPRINTF(("DATA/NACK/DRING_DATA\n"));
		break;

	default:
		DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype));
		break;
	}
}
예제 #7
0
int
vldcpioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p)
{
	struct vldcp_softc *sc;
	struct ldc_conn *lc;
	struct hv_io *hi = (struct hv_io *)data;
	paddr_t pa, offset;
	psize_t nbytes;
	caddr_t buf;
	size_t size;
	int err;

	sc = vldcp_lookup(dev);
	if (sc == NULL)
		return (ENXIO);
	lc = &sc->sc_lc;

	switch (cmd) {
	case HVIOCREAD:
	case HVIOCWRITE:
		break;
	default:
		device_unref(&sc->sc_dv);
		return (ENOTTY);
	}

	buf = malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK);

	switch(cmd) {
	case HVIOCREAD:
		size = hi->hi_len;
		offset = 0;
		while (size > 0) {
			pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa);
			nbytes = min(PAGE_SIZE, size);
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN,
			    hi->hi_cookie + offset, pa, nbytes, &nbytes);
			if (err != H_EOK) {
				printf("hv_ldc_copy %d\n", err);
				free(buf, M_DEVBUF);
				device_unref(&sc->sc_dv);
				return (EINVAL);
			}
			err = copyout(buf, (caddr_t)hi->hi_addr + offset, nbytes);
			if (err) {
				free(buf, M_DEVBUF);
				device_unref(&sc->sc_dv);
				return (err);
			}
			size -= nbytes;
			offset += nbytes;
		}
		break;
	case HVIOCWRITE:
		size = hi->hi_len;
		offset = 0;
		while (size > 0) {
			pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa);
			nbytes = min(PAGE_SIZE, size);
			err = copyin((caddr_t)hi->hi_addr + offset, buf, nbytes);
			if (err) {
				free(buf, M_DEVBUF);
				device_unref(&sc->sc_dv);
				return (err);
			}
			err = hv_ldc_copy(lc->lc_id, LDC_COPY_OUT,
			    hi->hi_cookie + offset, pa, nbytes, &nbytes);
			if (err != H_EOK) {
				printf("hv_ldc_copy %d\n", err);
				free(buf, M_DEVBUF);
				device_unref(&sc->sc_dv);
				return (EINVAL);
			}
			size -= nbytes;
			offset += nbytes;
		}
		break;

	}

	free(buf, M_DEVBUF);

	device_unref(&sc->sc_dv);
	return (0);
}