Beispiel #1
0
static void
disk_foo(struct somestat *tbuf)
{
	struct cdevsw *cdevsw;
	struct cdev *dev;

	/* Lie about disk drives which are character devices
	 * in FreeBSD but block devices under Linux.
	 */
	if (S_ISCHR(tbuf.st_mode) &&
	    (dev = findcdev(buf->st_rdev)) != NULL) {
		cdevsw = dev_refthread(dev);
		if (cdevsw != NULL) {
			if (cdevsw->d_flags & D_DISK) {
				tbuf.st_mode &= ~S_IFMT;
				tbuf.st_mode |= S_IFBLK;

				/* XXX this may not be quite right */
				/* Map major number to 0 */
				tbuf.st_dev = minor(buf->st_dev) & 0xf;
				tbuf.st_rdev = buf->st_rdev & 0xff;
			}
			dev_relthread(dev);
		}
	}

}
Beispiel #2
0
/* See: old_dev_pager_ctor() in device_pager.c as an example. */
static int
cheri_compositor_cfb_pg_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
    vm_ooffset_t foff, struct ucred *cred, u_short *color)
{
	struct cfb_vm_object *cfb_vm_obj;
	struct cdev *dev;
	struct cheri_compositor_softc *sc;
	struct cdevsw *csw;
	vm_ooffset_t top_offset; /* offset of the first byte above the alloc. */
	unsigned int npages;
	int ref;
	int retval = 0;

	cfb_vm_obj = handle;
	dev = cfb_vm_obj->dev;
	sc = dev->si_drv1;

	CHERI_COMPOSITOR_DEBUG(sc,
	    "handle: %p, size: %lu, prot: %i, foff: %lu, cred: %p",
	    handle, size, prot, foff, cred);

	/* Make sure this device can be mapped. */
	csw = dev_refthread(dev, &ref);
	if (csw == NULL) {
		retval = ENXIO;
		goto done_unreffed;
	}

	/* Protection, alignment and bounds checks. */
	npages = OFF_TO_IDX(size);
	top_offset = foff + (npages - 1) * PAGE_SIZE;

	retval = validate_prot_and_offset(sc, cfb_vm_obj->pool->mapped_fd, prot,
	    top_offset);
	if (retval != 0) {
		goto done;
	}

	/* Hold a reference to the device until this mapping is destroyed in
	 * cheri_compositor_cfb_pg_dtor(). */
	dev_ref(dev);

	/* All compositor pages are uncached, so colouring them (to reduce cache
	 * collisions; see
	 * http://docs.freebsd.org/doc/4.4-RELEASE/usr/share/doc/en/articles/vm-design/x103.html)
	 * is pointless. */
	*color = 0;

	/* Success. */
	retval = 0;

done:
	dev_relthread(dev, ref);
done_unreffed:
	CHERI_COMPOSITOR_DEBUG(sc,
	    "Finished with color: %u (retval: %u).", *color, retval);

	return (retval);
}
Beispiel #3
0
int
nvme_ns_physio(struct cdev *dev, struct uio *uio, int ioflag)
{
	struct uio		uio_tmp;
	struct iovec		uio_iov_tmp;
	struct nvme_namespace	*ns;
	struct mtx		*mtx;
	int			i, nvme_err, physio_err = 0;
#if __FreeBSD_version > 900017
	int			ref;
#endif

	PHOLD(curproc);

	ns = dev->si_drv1;
	mtx = mtx_pool_find(mtxpool_sleep, &uio_tmp);

#if __FreeBSD_version > 900017
	dev_refthread(dev, &ref);
#else
	dev_refthread(dev);
#endif

	/*
	 * NVM Express doesn't really support true SGLs.  All SG elements
	 *  must be PAGE_SIZE, except for the first and last element.
	 *  Because of this, we need to break up each iovec into a separate
	 *  NVMe command - otherwise we could end up with sub-PAGE_SIZE
	 *  elements in the middle of an SGL which is not allowed.
	 */
	uio_tmp.uio_iov = &uio_iov_tmp;
	uio_tmp.uio_iovcnt = 1;
	uio_tmp.uio_offset = uio->uio_offset;
	uio_tmp.uio_segflg = uio->uio_segflg;
	uio_tmp.uio_rw = uio->uio_rw;
	uio_tmp.uio_td = uio->uio_td;

	for (i = 0; i < uio->uio_iovcnt; i++) {

		uio_iov_tmp.iov_base = uio->uio_iov[i].iov_base;
		uio_iov_tmp.iov_len = uio->uio_iov[i].iov_len;
		uio_tmp.uio_resid = uio_iov_tmp.iov_len;

		mtx_lock(mtx);

		if (uio->uio_rw == UIO_READ)
			nvme_err = nvme_read_uio(ns, &uio_tmp);
		else
			nvme_err = nvme_write_uio(ns, &uio_tmp);

		if (nvme_err == 0)
			msleep(&uio_tmp, mtx, PRIBIO, "nvme_physio", 0);

		mtx_unlock(mtx);

		if (uio_tmp.uio_resid == 0) {
			uio->uio_resid -= uio_iov_tmp.iov_len;
			uio->uio_offset += uio_iov_tmp.iov_len;
		} else {
			physio_err = EFAULT;
			break;
		}

		uio_tmp.uio_offset += uio_iov_tmp.iov_len;
	}

#if __FreeBSD_version > 900017
	dev_relthread(dev, ref);
#else
	dev_relthread(dev);
#endif

	PRELE(curproc);
	return (physio_err);
}
Beispiel #4
0
static void
nvme_ns_bio_test(void *arg)
{
	struct nvme_io_test_internal	*io_test = arg;
	struct cdevsw			*csw;
	struct mtx			*mtx;
	struct bio			*bio;
	struct cdev			*dev;
	void				*buf;
	struct timeval			t;
	uint64_t			offset;
	uint32_t			idx, io_completed = 0;
#if __FreeBSD_version >= 900017
	int				ref;
#endif

	buf = malloc(io_test->size, M_NVME, M_WAITOK);
	idx = atomic_fetchadd_int(&io_test->td_idx, 1);
	dev = io_test->ns->cdev;

	offset = idx * 2048 * nvme_ns_get_sector_size(io_test->ns);

	while (1) {

		bio = g_alloc_bio();

		memset(bio, 0, sizeof(*bio));
		bio->bio_cmd = (io_test->opc == NVME_OPC_READ) ?
		    BIO_READ : BIO_WRITE;
		bio->bio_done = nvme_ns_bio_test_cb;
		bio->bio_dev = dev;
		bio->bio_offset = offset;
		bio->bio_data = buf;
		bio->bio_bcount = io_test->size;

		if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
#if __FreeBSD_version >= 900017
			csw = dev_refthread(dev, &ref);
#else
			csw = dev_refthread(dev);
#endif
		} else
			csw = dev->si_devsw;

		mtx = mtx_pool_find(mtxpool_sleep, bio);
		mtx_lock(mtx);
		(*csw->d_strategy)(bio);
		msleep(bio, mtx, PRIBIO, "biotestwait", 0);
		mtx_unlock(mtx);

		if (io_test->flags & NVME_TEST_FLAG_REFTHREAD) {
#if __FreeBSD_version >= 900017
			dev_relthread(dev, ref);
#else
			dev_relthread(dev);
#endif
		}

		if ((bio->bio_flags & BIO_ERROR) || (bio->bio_resid > 0))
			break;

		g_destroy_bio(bio);

		io_completed++;

		getmicrouptime(&t);
		timevalsub(&t, &io_test->start);

		if (t.tv_sec >= io_test->time)
			break;

		offset += io_test->size;
		if ((offset + io_test->size) > nvme_ns_get_size(io_test->ns))
			offset = 0;
	}

	io_test->io_completed[idx] = io_completed;
	wakeup_one(io_test);

	free(buf, M_NVME);

	atomic_subtract_int(&io_test->td_active, 1);
	mb();

#if __FreeBSD_version >= 800000
	kthread_exit();
#else
	kthread_exit(0);
#endif
}
Beispiel #5
0
/* See: old_dev_pager_fault() in device_pager.c as an example. */
static int
cheri_compositor_cfb_pg_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
    vm_page_t *mres)
{
	vm_pindex_t pidx;
	vm_paddr_t paddr;
	vm_page_t page;
	struct cfb_vm_object *cfb_vm_obj;
	struct cdev *dev;
	struct cheri_compositor_softc *sc;
	struct cdevsw *csw;
	vm_memattr_t memattr;
	int ref;
	int retval;

	pidx = OFF_TO_IDX(offset);

	VM_OBJECT_WUNLOCK(vm_obj);

	cfb_vm_obj = vm_obj->handle;
	dev = cfb_vm_obj->dev;
	sc = dev->si_drv1;

	retval = VM_PAGER_OK;

	CHERI_COMPOSITOR_DEBUG(sc, "vm_obj: %p, offset: %lu, prot: %i", vm_obj,
	    offset, prot);

	csw = dev_refthread(dev, &ref);

	if (csw == NULL) {
		retval = VM_PAGER_FAIL;
		goto done_unlocked;
	}

	/* Traditional d_mmap() call. */
	CHERI_COMPOSITOR_DEBUG(sc, "offset: %lu, nprot: %i", offset, prot);

	if (validate_prot_and_offset(sc, cfb_vm_obj->pool->mapped_fd,
	    prot, offset) != 0) {
		retval = VM_PAGER_FAIL;
		goto done_unlocked;
	}

	paddr = calculate_physical_address(sc, cfb_vm_obj->pool, offset);
	memattr = VM_MEMATTR_UNCACHEABLE;

	CHERI_COMPOSITOR_DEBUG(sc, "paddr: %p, memattr: %i",
	    (void *) paddr, memattr);

	dev_relthread(dev, ref);

	/* Sanity checks. */
	KASSERT((((*mres)->flags & PG_FICTITIOUS) == 0),
	    ("Expected non-fictitious page."));

	/*
	 * Replace the passed in reqpage page with our own fake page and
	 * free up the all of the original pages.
	 */
	page = vm_page_getfake(paddr, memattr);
	VM_OBJECT_WLOCK(vm_obj);
	vm_page_lock(*mres);
	vm_page_free(*mres);
	vm_page_unlock(*mres);
	*mres = page;
	vm_page_insert(page, vm_obj, pidx);

	page->valid = VM_PAGE_BITS_ALL;

	/* Success! */
	retval = VM_PAGER_OK;
	goto done;

done_unlocked:
	VM_OBJECT_WLOCK(vm_obj);
done:
	CHERI_COMPOSITOR_DEBUG(sc, "Finished with mres: %p (retval: %i)", *mres,
	    retval);

	return (retval);
}