Example #1
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	int error = 0;
	vm_offset_t va, eva, off, v;
	vm_prot_t prot;
	struct vm_page m;
	vm_page_t marr;
	vm_size_t cnt;

	cnt = 0;
	error = 0;

	while (uio->uio_resid > 0 && !error) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;

kmem_direct_mapped:	off = v & PAGE_MASK;
			cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
			    PAGE_MASK);
			cnt = min(cnt, PAGE_SIZE - off);
			cnt = min(cnt, iov->iov_len);

			if (mem_valid(v, cnt)) {
				error = EFAULT;
				break;
			}
	
			if (hw_direct_map && !pmap_dev_direct_mapped(v, cnt)) {
				error = uiomove((void *)PHYS_TO_DMAP(v), cnt,
				    uio);
			} else {
				m.phys_addr = trunc_page(v);
				marr = &m;
				error = uiomove_fromphys(&marr, off, cnt, uio);
			}
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			va = uio->uio_offset;

			if ((va < VM_MIN_KERNEL_ADDRESS) || (va > virtual_end)) {
				v = DMAP_TO_PHYS(va);
				goto kmem_direct_mapped;
			}

			va = trunc_page(uio->uio_offset);
			eva = round_page(uio->uio_offset
			    + iov->iov_len);

			/* 
			 * Make sure that all the pages are currently resident
			 * so that we don't create any zero-fill pages.
			 */

			for (; va < eva; va += PAGE_SIZE)
				if (pmap_extract(kernel_pmap, va) == 0)
					return (EFAULT);

			prot = (uio->uio_rw == UIO_READ)
			    ? VM_PROT_READ : VM_PROT_WRITE;

			va = uio->uio_offset;
			if (kernacc((void *) va, iov->iov_len, prot)
			    == FALSE)
				return (EFAULT);

			error = uiomove((void *)va, iov->iov_len, uio);

			continue;
		}
	}

	return (error);
}
Example #2
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	int o;
	u_int c = 0, v;
	struct iovec *iov;
	int error = 0;
	vm_offset_t addr, eaddr;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			int i;
			int address_valid = 0;

			v = uio->uio_offset;
			v &= ~PAGE_MASK;
			for (i = 0; dump_avail[i] || dump_avail[i + 1];
			i += 2) {
				if (v >= dump_avail[i] &&
				    v < dump_avail[i + 1]) {
					address_valid = 1;
					break;
				}
			}
			if (!address_valid)
				return (EINVAL);
			sx_xlock(&tmppt_lock);
			pmap_kenter((vm_offset_t)_tmppt, v);
			o = (int)uio->uio_offset & PAGE_MASK;
			c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
			c = min(c, (u_int)(PAGE_SIZE - o));
			c = min(c, (u_int)iov->iov_len);
			error = uiomove((caddr_t)&_tmppt[o], (int)c, uio);
			pmap_qremove((vm_offset_t)_tmppt, 1);
			sx_xunlock(&tmppt_lock);
			continue;
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			c = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			addr = trunc_page(uio->uio_offset);
			eaddr = round_page(uio->uio_offset + c);

			for (; addr < eaddr; addr += PAGE_SIZE)
				if (pmap_extract(kernel_pmap, addr) == 0)
					return (EFAULT);
			if (!kernacc((caddr_t)(int)uio->uio_offset, c,
			    uio->uio_rw == UIO_READ ?
			    VM_PROT_READ : VM_PROT_WRITE))
					return (EFAULT);
			error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
			continue;
		}
		/* else panic! */
	}
	return (error);
}
Example #3
0
File: mem.c Project: 2asoft/freebsd
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	void *p;
	ssize_t orig_resid;
	u_long v, vd;
	u_int c;
	int error;

	error = 0;
	orig_resid = uio->uio_resid;
	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		v = uio->uio_offset;
		c = ulmin(iov->iov_len, PAGE_SIZE - (u_int)(v & PAGE_MASK));

		switch (dev2unit(dev)) {
		case CDEV_MINOR_KMEM:
			/*
			 * Since c is clamped to be less or equal than
			 * PAGE_SIZE, the uiomove() call does not
			 * access past the end of the direct map.
			 */
			if (v >= DMAP_MIN_ADDRESS &&
			    v < DMAP_MIN_ADDRESS + dmaplimit) {
				error = uiomove((void *)v, c, uio);
				break;
			}

			if (!kernacc((void *)v, c, uio->uio_rw == UIO_READ ?
			    VM_PROT_READ : VM_PROT_WRITE)) {
				error = EFAULT;
				break;
			}

			/*
			 * If the extracted address is not accessible
			 * through the direct map, then we make a
			 * private (uncached) mapping because we can't
			 * depend on the existing kernel mapping
			 * remaining valid until the completion of
			 * uiomove().
			 *
			 * XXX We cannot provide access to the
			 * physical page 0 mapped into KVA.
			 */
			v = pmap_extract(kernel_pmap, v);
			if (v == 0) {
				error = EFAULT;
				break;
			}
			/* FALLTHROUGH */
		case CDEV_MINOR_MEM:
			if (v < dmaplimit) {
				vd = PHYS_TO_DMAP(v);
				error = uiomove((void *)vd, c, uio);
				break;
			}
			if (v >= (1ULL << cpu_maxphyaddr)) {
				error = EFAULT;
				break;
			}
			p = pmap_mapdev(v, PAGE_SIZE);
			error = uiomove(p, c, uio);
			pmap_unmapdev((vm_offset_t)p, PAGE_SIZE);
			break;
		}
	}
	/*
	 * Don't return error if any byte was written.  Read and write
	 * can return error only if no i/o was performed.
	 */
	if (uio->uio_resid != orig_resid)
		error = 0;
	return (error);
}
Example #4
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	u_long c, v;
	int error, o, sflags;
	vm_offset_t addr, eaddr;

	GIANT_REQUIRED;

	error = 0;
	c = 0;
	sflags = curthread_pflags_set(TDP_DEVMEMIO);
	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;
kmemphys:
			o = v & PAGE_MASK;
			c = min(uio->uio_resid, (u_int)(PAGE_SIZE - o));
			v = PHYS_TO_DMAP(v);
			if (v < DMAP_MIN_ADDRESS ||
			    (v > DMAP_MIN_ADDRESS + dmaplimit &&
			    v <= DMAP_MAX_ADDRESS) ||
			    pmap_kextract(v) == 0) {
				error = EFAULT;
				goto ret;
			}
			error = uiomove((void *)v, (int)c, uio);
			continue;
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			v = uio->uio_offset;

			if (v >= DMAP_MIN_ADDRESS && v < DMAP_MAX_ADDRESS) {
				v = DMAP_TO_PHYS(v);
				goto kmemphys;
			}

			c = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			addr = trunc_page(v);
			eaddr = round_page(v + c);

			if (addr < VM_MIN_KERNEL_ADDRESS) {
				error = EFAULT;
				goto ret;
			}
			for (; addr < eaddr; addr += PAGE_SIZE) {
				if (pmap_extract(kernel_pmap, addr) == 0) {
					error = EFAULT;
					goto ret;
				}
			}
			if (!kernacc((caddr_t)(long)v, c,
			    uio->uio_rw == UIO_READ ? 
			    VM_PROT_READ : VM_PROT_WRITE)) {
				error = EFAULT;
				goto ret;
			}

			error = uiomove((caddr_t)(long)v, (int)c, uio);
			continue;
		}
		/* else panic! */
	}
ret:
	curthread_pflags_restore(sflags);
	return (error);
}
Example #5
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
    struct iovec *iov;
    int error = 0;
    vm_offset_t va, eva, off, v;
    vm_prot_t prot;
    struct vm_page m;
    vm_page_t marr;
    vm_size_t cnt;

    cnt = 0;
    error = 0;

    GIANT_REQUIRED;

    while (uio->uio_resid > 0 && !error) {
        iov = uio->uio_iov;
        if (iov->iov_len == 0) {
            uio->uio_iov++;
            uio->uio_iovcnt--;
            if (uio->uio_iovcnt < 0)
                panic("memrw");
            continue;
        }
        if (dev2unit(dev) == CDEV_MINOR_MEM) {
            v = uio->uio_offset;

            off = uio->uio_offset & PAGE_MASK;
            cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
                               PAGE_MASK);
            cnt = min(cnt, PAGE_SIZE - off);
            cnt = min(cnt, iov->iov_len);

            m.phys_addr = trunc_page(v);
            marr = &m;
            error = uiomove_fromphys(&marr, off, cnt, uio);
        }
        else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
            va = uio->uio_offset;

            va = trunc_page(uio->uio_offset);
            eva = round_page(uio->uio_offset
                             + iov->iov_len);

            /*
             * Make sure that all the pages are currently resident
             * so that we don't create any zero-fill pages.
             */
            if (va >= VM_MIN_KERNEL_ADDRESS &&
                    eva <= VM_MAX_KERNEL_ADDRESS) {
                for (; va < eva; va += PAGE_SIZE)
                    if (pmap_extract(kernel_pmap, va) == 0)
                        return (EFAULT);

                prot = (uio->uio_rw == UIO_READ)
                       ? VM_PROT_READ : VM_PROT_WRITE;

                va = uio->uio_offset;
                if (kernacc((void *) va, iov->iov_len, prot)
                        == FALSE)
                    return (EFAULT);
            }

            va = uio->uio_offset;
            error = uiomove((void *)va, iov->iov_len, uio);
            continue;
        }
    }

    return (error);
}
Example #6
0
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	struct vm_page m;
	vm_page_t marr;
	vm_offset_t off, v;
	u_int cnt;
	int error;

	error = 0;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}

		v = uio->uio_offset;
		off = v & PAGE_MASK;
		cnt = ulmin(iov->iov_len, PAGE_SIZE - (u_int)off);
		if (cnt == 0)
			continue;

		switch(dev2unit(dev)) {
		case CDEV_MINOR_KMEM:
			/* If the address is in the DMAP just copy it */
			if (VIRT_IN_DMAP(v)) {
				error = uiomove((void *)v, cnt, uio);
				break;
			}

			if (!kernacc((void *)v, cnt, uio->uio_rw == UIO_READ ?
			    VM_PROT_READ : VM_PROT_WRITE)) {
				error = EFAULT;
				break;
			}

			/* Get the physical address to read */
			v = pmap_extract(kernel_pmap, v);
			if (v == 0) {
				error = EFAULT;
				break;
			}

			/* FALLTHROUGH */
		case CDEV_MINOR_MEM:
			/* If within the DMAP use this to copy from */
			if (PHYS_IN_DMAP(v)) {
				v = PHYS_TO_DMAP(v);
				error = uiomove((void *)v, cnt, uio);
				break;
			}

			/* Have uiomove_fromphys handle the data */
			m.phys_addr = trunc_page(v);
			marr = &m;
			uiomove_fromphys(&marr, off, cnt, uio);
			break;
		}
	}

	return (error);
}