/* drm_open_helper is called whenever a process opens /dev/drm. */
int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
		    struct drm_device *dev)
{
	struct drm_file *priv;
	int m = dev2unit(kdev);
	int retcode;

	if (flags & O_EXCL)
		return EBUSY; /* No exclusive opens */
	dev->flags = flags;

	DRM_DEBUG("pid = %d, minor = %d\n", DRM_CURRENTPID, m);

	priv = malloc(sizeof(*priv), DRM_MEM_FILES, M_NOWAIT | M_ZERO);
	if (priv == NULL) {
		return ENOMEM;
	}

	retcode = devfs_set_cdevpriv(priv, drm_close);
	if (retcode != 0) {
		free(priv, DRM_MEM_FILES);
		return retcode;
	}

	DRM_LOCK();
	priv->dev		= dev;
	priv->uid		= p->td_ucred->cr_svuid;
	priv->pid		= p->td_proc->p_pid;
	priv->minor		= m;
	priv->ioctl_count 	= 0;

	/* for compatibility root is always authenticated */
	priv->authenticated	= DRM_SUSER(p);

	if (dev->driver->open) {
		/* shared code returns -errno */
		retcode = -dev->driver->open(dev, priv);
		if (retcode != 0) {
			devfs_clear_cdevpriv();
			free(priv, DRM_MEM_FILES);
			DRM_UNLOCK();
			return retcode;
		}
	}

	/* first opener automatically becomes master */
	priv->master = TAILQ_EMPTY(&dev->files);

	TAILQ_INSERT_TAIL(&dev->files, priv, link);
	DRM_UNLOCK();
	kdev->si_drv1 = dev;
	return 0;
}
Esempio n. 2
0
int
cpuctl_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
    int flags, struct thread *td)
{
	int cpu, ret;

	cpu = dev2unit(dev);
	if (cpu > mp_maxid || !cpu_enabled(cpu)) {
		DPRINTF("[cpuctl,%d]: bad cpu number %d\n", __LINE__, cpu);
		return (ENXIO);
	}
	/* Require write flag for "write" requests. */
	if ((cmd == CPUCTL_MSRCBIT || cmd == CPUCTL_MSRSBIT ||
	    cmd == CPUCTL_UPDATE || cmd == CPUCTL_WRMSR ||
	    cmd == CPUCTL_EVAL_CPU_FEATURES) &&
	    (flags & FWRITE) == 0)
		return (EPERM);
	switch (cmd) {
	case CPUCTL_RDMSR:
		ret = cpuctl_do_msr(cpu, (cpuctl_msr_args_t *)data, cmd, td);
		break;
	case CPUCTL_MSRSBIT:
	case CPUCTL_MSRCBIT:
	case CPUCTL_WRMSR:
		ret = priv_check(td, PRIV_CPUCTL_WRMSR);
		if (ret != 0)
			goto fail;
		ret = cpuctl_do_msr(cpu, (cpuctl_msr_args_t *)data, cmd, td);
		break;
	case CPUCTL_CPUID:
		ret = cpuctl_do_cpuid(cpu, (cpuctl_cpuid_args_t *)data, td);
		break;
	case CPUCTL_UPDATE:
		ret = priv_check(td, PRIV_CPUCTL_UPDATE);
		if (ret != 0)
			goto fail;
		ret = cpuctl_do_update(cpu, (cpuctl_update_args_t *)data, td);
		break;
	case CPUCTL_CPUID_COUNT:
		ret = cpuctl_do_cpuid_count(cpu,
		    (cpuctl_cpuid_count_args_t *)data, td);
		break;
	case CPUCTL_EVAL_CPU_FEATURES:
		ret = cpuctl_do_eval_cpu_features(cpu, td);
		break;
	default:
		ret = EINVAL;
		break;
	}
fail:
	return (ret);
}
Esempio n. 3
0
static	int
lptopen (struct cdev *dev, int flags, int fmt, struct thread *td)
{
	struct lpt_softc *sc = dev->si_drv1;
	int s;
	int port;

	if (sc->sc_port == 0)
		return (ENXIO);

	if (sc->sc_state) {
		lprintf(("lp: still open %x\n", sc->sc_state));
		return(EBUSY);
	} else
		sc->sc_state |= INIT;

	sc->sc_flags = dev2unit(dev);

	/* Check for open with BYPASS flag set. */
	if (sc->sc_flags & LP_BYPASS) {
		sc->sc_state = OPEN;
		return(0);
	}

	s = spltty();
	lprintf(("lp flags 0x%x\n", sc->sc_flags));
	port = sc->sc_port;

	/* set IRQ status according to ENABLE_IRQ flag */
	if (sc->sc_irq & LP_ENABLE_IRQ)
		sc->sc_irq |= LP_USE_IRQ;
	else
		sc->sc_irq &= ~LP_USE_IRQ;

	/* init printer */
	sc->sc_state = OPEN;
	sc->sc_inbuf = malloc(BUFSIZE, M_DEVBUF, M_WAITOK);
	sc->sc_xfercnt = 0;
	splx(s);

	/* only use timeout if using interrupt */
	lprintf(("irq %x\n", sc->sc_irq));
	if (sc->sc_irq & LP_USE_IRQ) {
		sc->sc_state |= TOUT;
		timeout (lptout, (caddr_t)sc,
			 (sc->sc_backoff = hz/LPTOUTINITIAL));
	}

	lprintf(("opened.\n"));
	return(0);
}
Esempio n. 4
0
/*
 * allow user processes to MMAP some memory sections
 * instead of going through read/write
 */
int
memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
    int prot, vm_memattr_t *memattr)
{
	int i;

	/*
	 * /dev/mem is the only one that makes sense through this
	 * interface.  For /dev/kmem any physaddr we return here
	 * could be transient and hence incorrect or invalid at
	 * a later time.
	 */
	if (dev2unit(dev) != CDEV_MINOR_MEM)
		return (-1);

	/* Only direct-mapped addresses. */
	if (mem_valid(offset, 0)
	    && pmap_dev_direct_mapped(offset, 0))
		return (EFAULT);

	*paddr = offset;

	for (i = 0; i < mem_range_softc.mr_ndesc; i++) {
		if (!(mem_range_softc.mr_desc[i].mr_flags & MDF_ACTIVE))
			continue;

		if (offset >= mem_range_softc.mr_desc[i].mr_base &&
		    offset < mem_range_softc.mr_desc[i].mr_base +
		    mem_range_softc.mr_desc[i].mr_len) {
			switch (mem_range_softc.mr_desc[i].mr_flags &
			    MDF_ATTRMASK) {
			case MDF_WRITEBACK:
				*memattr = VM_MEMATTR_WRITE_BACK;
				break;
			case MDF_WRITECOMBINE:
				*memattr = VM_MEMATTR_WRITE_COMBINING;
				break;
			case MDF_UNCACHEABLE:
				*memattr = VM_MEMATTR_UNCACHEABLE;
				break;
			case MDF_WRITETHROUGH:
				*memattr = VM_MEMATTR_WRITE_THROUGH;
				break;
			}

			break;
		}
	}

	return (0);
}
Esempio n. 5
0
/*
 * allow user processes to MMAP some memory sections
 * instead of going through read/write
 */
int
memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
    int prot, vm_memattr_t *memattr)
{
	/*
	 * /dev/mem is the only one that makes sense through this
	 * interface.  For /dev/kmem any physaddr we return here
	 * could be transient and hence incorrect or invalid at
	 * a later time.
	 */
	if (dev2unit(dev) != CDEV_MINOR_MEM)
		return (-1);

	*paddr = offset;

	return (0);
}
Esempio n. 6
0
static int
fw_open (struct cdev *dev, int flags, int fmt, fw_proc *td)
{
	int err = 0;
	int unit = DEV2UNIT(dev);
	struct fw_drv1 *d;
	struct firewire_softc *sc;

	if (DEV_FWMEM(dev))
		return fwmem_open(dev, flags, fmt, td);

	sc = devclass_get_softc(firewire_devclass, unit);
	if (sc == NULL)
		return (ENXIO);

	FW_GLOCK(sc->fc);
	if (dev->si_drv1 != NULL) {
		FW_GUNLOCK(sc->fc);
		return (EBUSY);
	}
	/* set dummy value for allocation */
	dev->si_drv1 = (void *)-1;
	FW_GUNLOCK(sc->fc);

	dev->si_drv1 = malloc(sizeof(struct fw_drv1), M_FW, M_WAITOK | M_ZERO);
	if (dev->si_drv1 == NULL)
		return (ENOMEM);

#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
	if ((dev->si_flags & SI_NAMED) == 0) {
		int unit = DEV2UNIT(dev);
		int sub = DEV2SUB(dev);

		make_dev(&firewire_cdevsw, dev2unit(dev),
			UID_ROOT, GID_OPERATOR, 0660,
			"fw%d.%d", unit, sub);
	}
#endif
	d = (struct fw_drv1 *)dev->si_drv1;
	d->fc = sc->fc;
	STAILQ_INIT(&d->binds);
	STAILQ_INIT(&d->rq);

	return err;
}
Esempio n. 7
0
/*
 * allow user processes to MMAP some memory sections
 * instead of going through read/write
 */
int
memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
    int prot, vm_memattr_t *memattr)
{
	/*
	 * /dev/mem is the only one that makes sense through this
	 * interface.  For /dev/kmem any physaddr we return here
	 * could be transient and hence incorrect or invalid at
	 * a later time.
	 */
	if (dev2unit(dev) != CDEV_MINOR_MEM)
		return (-1);

	/*
	 * Allow access only in RAM.
	 */
	if ((prot & ia64_pa_access(atop((vm_offset_t)offset))) != prot)
		return (-1);
	*paddr = IA64_PHYS_TO_RR7(offset);
	return (0);
}
Esempio n. 8
0
/*
 * allow user processes to MMAP some memory sections
 * instead of going through read/write
 */
int
memmmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
    int prot, vm_memattr_t *memattr)
{
	int i;

	if (dev2unit(dev) == CDEV_MINOR_MEM)
		*paddr = offset;
	else
		return (EFAULT);

	for (i = 0; i < mem_range_softc.mr_ndesc; i++) {
		if (!(mem_range_softc.mr_desc[i].mr_flags & MDF_ACTIVE))
			continue;

		if (offset >= mem_range_softc.mr_desc[i].mr_base &&
		    offset < mem_range_softc.mr_desc[i].mr_base +
		    mem_range_softc.mr_desc[i].mr_len) {
			switch (mem_range_softc.mr_desc[i].mr_flags &
			    MDF_ATTRMASK) {
			case MDF_WRITEBACK:
				*memattr = VM_MEMATTR_WRITE_BACK;
				break;
			case MDF_WRITECOMBINE:
				*memattr = VM_MEMATTR_WRITE_COMBINING;
				break;
			case MDF_UNCACHEABLE:
				*memattr = VM_MEMATTR_UNCACHEABLE;
				break;
			case MDF_WRITETHROUGH:
				*memattr = VM_MEMATTR_WRITE_THROUGH;
				break;
			}

			break;
		}
	}

	return (0);
}
Esempio n. 9
0
int
nsmb_dev_open(dev_t dev, int oflags, int devtype, struct proc *p)
{
	struct smb_dev *sdp;
	int s;

	sdp = SMB_GETDEV(dev);
	if (sdp && (sdp->sd_flags & NSMBFL_OPEN))
		return EBUSY;
	if (sdp == NULL) {
		sdp = malloc(sizeof(*sdp), M_SMBDATA, M_WAITOK);
		smb_devtbl[minor(dev)] = (void*)sdp;
	}

#ifndef __NetBSD__
	/*
	 * XXX: this is just crazy - make a device for an already passed device...
	 * someone should take care of it.
	 */
	if ((dev->si_flags & SI_NAMED) == 0)
		make_dev(&nsmb_cdevsw, minor(dev), cred->cr_uid, cred->cr_gid, 0700,
		    NSMB_NAME"%d", dev2unit(dev));
#endif /* !__NetBSD__ */

	bzero(sdp, sizeof(*sdp));
/*
	STAILQ_INIT(&sdp->sd_rqlist);
	STAILQ_INIT(&sdp->sd_rplist);
	bzero(&sdp->sd_pollinfo, sizeof(struct selinfo));
*/
	s = splnet();
	sdp->sd_level = -1;
	sdp->sd_flags |= NSMBFL_OPEN;
	splx(s);
	return 0;
}
Esempio n. 10
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	int o;
	u_int c = 0;
	vm_paddr_t pa;
	struct iovec *iov;
	int error = 0;
	vm_offset_t addr;

	/* XXX UPS Why ? */
	GIANT_REQUIRED;


	if (dev2unit(dev) != CDEV_MINOR_MEM && dev2unit(dev) != CDEV_MINOR_KMEM)
		return EIO;

	if (dev2unit(dev) == CDEV_MINOR_KMEM && uio->uio_resid > 0) {
		if (uio->uio_offset < (vm_offset_t)VADDR(PTDPTDI, 0))
				return (EFAULT);

		if (!kernacc((caddr_t)(int)uio->uio_offset, uio->uio_resid,
		    uio->uio_rw == UIO_READ ?  VM_PROT_READ : VM_PROT_WRITE))
			return (EFAULT);
	}

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			pa = uio->uio_offset;
			pa &= ~PAGE_MASK;
		} else {
			/*
			 * Extract the physical page since the mapping may
			 * change at any time. This avoids panics on page 
			 * fault in this case but will cause reading/writing
			 * to the wrong page.
			 * Hopefully an application will notice the wrong
			 * data on read access and refrain from writing.
			 * This should be replaced by a special uiomove
			 * type function that just returns an error if there
			 * is a page fault on a kernel page. 
			 */
			addr = trunc_page(uio->uio_offset);
			pa = pmap_extract(kernel_pmap, addr);
			if (pa == 0) 
				return EFAULT;

		}
		
		/* 
		 * XXX UPS This should just use sf_buf_alloc.
		 * Unfortunately sf_buf_alloc needs a vm_page
		 * and we may want to look at memory not covered
		 * by the page array.
		 */

		sx_xlock(&memsxlock);
		pmap_kenter((vm_offset_t)ptvmmap, pa);
		pmap_invalidate_page(kernel_pmap,(vm_offset_t)ptvmmap);

		o = (int)uio->uio_offset & PAGE_MASK;
		c = PAGE_SIZE - o;
		c = min(c, (u_int)iov->iov_len);
		error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio);
		pmap_qremove((vm_offset_t)ptvmmap, 1);
		sx_xunlock(&memsxlock);
		
	}

	return (error);
}
Esempio n. 11
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	off_t ofs;
	vm_offset_t addr;
	void *ptr;
	u_long limit;
	int count, error, phys, rw;

	error = 0;
	rw = (uio->uio_rw == UIO_READ) ? VM_PROT_READ : VM_PROT_WRITE;

	while (uio->uio_resid > 0 && !error) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}

		ofs = uio->uio_offset;

		phys = (dev2unit(dev) == CDEV_MINOR_MEM) ? 1 : 0;
		if (phys == 0 && ofs >= IA64_RR_BASE(6)) {
			ofs = IA64_RR_MASK(ofs);
			phys++;
		}

		if (phys) {
			error = mem_phys2virt(ofs, rw, &ptr, &limit);
			if (error)
				return (error);

			count = min(uio->uio_resid, limit);
			error = uiomove(ptr, count, uio);
		} else {
			ptr = (void *)ofs;
			count = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			limit = round_page(ofs + count);
			addr = trunc_page(ofs);
			if (addr < VM_MAXUSER_ADDRESS)
				return (EINVAL);
			for (; addr < limit; addr += PAGE_SIZE) {
				if (pmap_kextract(addr) == 0)
					return (EFAULT);
			}
			if (!kernacc(ptr, count, rw))
				return (EFAULT);
			error = uiomove(ptr, count, uio);
		}
		/* else panic! */
	}
	return (error);
}
Esempio n. 12
0
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	struct vm_page m;
	vm_page_t marr;
	vm_offset_t off, v;
	u_int cnt;
	int error;

	error = 0;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}

		v = uio->uio_offset;
		off = v & PAGE_MASK;
		cnt = ulmin(iov->iov_len, PAGE_SIZE - (u_int)off);
		if (cnt == 0)
			continue;

		switch(dev2unit(dev)) {
		case CDEV_MINOR_KMEM:
			/* If the address is in the DMAP just copy it */
			if (VIRT_IN_DMAP(v)) {
				error = uiomove((void *)v, cnt, uio);
				break;
			}

			if (!kernacc((void *)v, cnt, uio->uio_rw == UIO_READ ?
			    VM_PROT_READ : VM_PROT_WRITE)) {
				error = EFAULT;
				break;
			}

			/* Get the physical address to read */
			v = pmap_extract(kernel_pmap, v);
			if (v == 0) {
				error = EFAULT;
				break;
			}

			/* FALLTHROUGH */
		case CDEV_MINOR_MEM:
			/* If within the DMAP use this to copy from */
			if (PHYS_IN_DMAP(v)) {
				v = PHYS_TO_DMAP(v);
				error = uiomove((void *)v, cnt, uio);
				break;
			}

			/* Have uiomove_fromphys handle the data */
			m.phys_addr = trunc_page(v);
			marr = &m;
			uiomove_fromphys(&marr, off, cnt, uio);
			break;
		}
	}

	return (error);
}
Esempio n. 13
0
/*
 * We only need open() and close() routines.  open() calls socreate()
 * to allocate a "real" object behind the stream and mallocs some state
 * info for use by the svr4 emulator;  close() deallocates the state
 * information and passes the underlying object to the normal socket close
 * routine.
 */
static  int
streamsopen(struct cdev *dev, int oflags, int devtype, struct thread *td)
{
	struct filedesc *fdp;
	struct svr4_strm *st;
	struct socket *so;
	struct file *fp;
	int family, type, protocol;
	int error, fd;
	
	if (td->td_dupfd >= 0)
	  return ENODEV;

	switch (dev2unit(dev)) {
	case dev_udp:
	  family = AF_INET;
	  type = SOCK_DGRAM;
	  protocol = IPPROTO_UDP;
	  break;

	case dev_tcp:
	  family = AF_INET;
	  type = SOCK_STREAM;
	  protocol = IPPROTO_TCP;
	  break;

	case dev_ip:
	case dev_rawip:
	  family = AF_INET;
	  type = SOCK_RAW;
	  protocol = IPPROTO_IP;
	  break;

	case dev_icmp:
	  family = AF_INET;
	  type = SOCK_RAW;
	  protocol = IPPROTO_ICMP;
	  break;

	case dev_unix_dgram:
	  family = AF_LOCAL;
	  type = SOCK_DGRAM;
	  protocol = 0;
	  break;

	case dev_unix_stream:
	case dev_unix_ord_stream:
	  family = AF_LOCAL;
	  type = SOCK_STREAM;
	  protocol = 0;
	  break;

	case dev_ptm:
	  return svr4_ptm_alloc(td);

	default:
	  return EOPNOTSUPP;
	}

	fdp = td->td_proc->p_fd;
	if ((error = falloc(td, &fp, &fd, 0)) != 0)
	  return error;
	/* An extra reference on `fp' has been held for us by falloc(). */

	error = socreate(family, &so, type, protocol, td->td_ucred, td);
	if (error) {
	   fdclose(fdp, fp, fd, td);
	   fdrop(fp, td);
	   return error;
	}

	finit(fp, FREAD | FWRITE, DTYPE_SOCKET, so, &svr4_netops);

	/*
	 * Allocate a stream structure and attach it to this socket.
	 * We don't bother locking so_emuldata for SVR4 stream sockets as
	 * its value is constant for the lifetime of the stream once it
	 * is initialized here.
	 */
	st = malloc(sizeof(struct svr4_strm), M_TEMP, M_WAITOK);
	st->s_family = so->so_proto->pr_domain->dom_family;
	st->s_cmd = ~0;
	st->s_afd = -1;
	st->s_eventmask = 0;
	so->so_emuldata = st;

	fdrop(fp, td);
	td->td_dupfd = fd;
	return ENXIO;
}
Esempio n. 14
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	vm_offset_t addr, eaddr, o, v;
	int c, error, rw;

	error = 0;
	while (uio->uio_resid > 0 && !error) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}

		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;
kmemphys:
			/* Allow reads only in RAM. */
			rw = (uio->uio_rw == UIO_READ)
			    ? VM_PROT_READ : VM_PROT_WRITE;
			if ((ia64_pa_access(v) & rw) != rw) {
				error = EFAULT;
				c = 0;
				break;
			}

			o = uio->uio_offset & PAGE_MASK;
			c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
			error = uiomove((caddr_t)IA64_PHYS_TO_RR7(v), c, uio);
			continue;
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			v = uio->uio_offset;

			if (v >= IA64_RR_BASE(6)) {
				v = IA64_RR_MASK(v);
				goto kmemphys;
			}

			c = min(iov->iov_len, MAXPHYS);

			/*
			 * Make sure that all of the pages are currently
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			addr = trunc_page(v);
			eaddr = round_page(v + c);
			for (; addr < eaddr; addr += PAGE_SIZE) {
				if (pmap_extract(kernel_pmap, addr) == 0)
					return (EFAULT);
			}
			if (!kernacc((caddr_t)v, c, (uio->uio_rw == UIO_READ)
			    ? VM_PROT_READ : VM_PROT_WRITE))
				return (EFAULT);
			error = uiomove((caddr_t)v, c, uio);
			continue;
		}
		/* else panic! */
	}
	return (error);
}
Esempio n. 15
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	int error = 0;
	vm_offset_t va, eva, off, v;
	vm_prot_t prot;
	struct vm_page m;
	vm_page_t marr;
	vm_size_t cnt;

	cnt = 0;
	error = 0;

	while (uio->uio_resid > 0 && !error) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;

kmem_direct_mapped:	off = v & PAGE_MASK;
			cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
			    PAGE_MASK);
			cnt = min(cnt, PAGE_SIZE - off);
			cnt = min(cnt, iov->iov_len);

			if (mem_valid(v, cnt)) {
				error = EFAULT;
				break;
			}
	
			if (hw_direct_map && !pmap_dev_direct_mapped(v, cnt)) {
				error = uiomove((void *)PHYS_TO_DMAP(v), cnt,
				    uio);
			} else {
				m.phys_addr = trunc_page(v);
				marr = &m;
				error = uiomove_fromphys(&marr, off, cnt, uio);
			}
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			va = uio->uio_offset;

			if ((va < VM_MIN_KERNEL_ADDRESS) || (va > virtual_end)) {
				v = DMAP_TO_PHYS(va);
				goto kmem_direct_mapped;
			}

			va = trunc_page(uio->uio_offset);
			eva = round_page(uio->uio_offset
			    + iov->iov_len);

			/* 
			 * Make sure that all the pages are currently resident
			 * so that we don't create any zero-fill pages.
			 */

			for (; va < eva; va += PAGE_SIZE)
				if (pmap_extract(kernel_pmap, va) == 0)
					return (EFAULT);

			prot = (uio->uio_rw == UIO_READ)
			    ? VM_PROT_READ : VM_PROT_WRITE;

			va = uio->uio_offset;
			if (kernacc((void *) va, iov->iov_len, prot)
			    == FALSE)
				return (EFAULT);

			error = uiomove((void *)va, iov->iov_len, uio);

			continue;
		}
	}

	return (error);
}
Esempio n. 16
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	int o;
	u_int c = 0, v;
	struct iovec *iov;
	int error = 0;
	vm_offset_t addr, eaddr;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			int i;
			int address_valid = 0;

			v = uio->uio_offset;
			v &= ~PAGE_MASK;
			for (i = 0; dump_avail[i] || dump_avail[i + 1];
			i += 2) {
				if (v >= dump_avail[i] &&
				    v < dump_avail[i + 1]) {
					address_valid = 1;
					break;
				}
			}
			if (!address_valid)
				return (EINVAL);
			sx_xlock(&tmppt_lock);
			pmap_kenter((vm_offset_t)_tmppt, v);
			o = (int)uio->uio_offset & PAGE_MASK;
			c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK));
			c = min(c, (u_int)(PAGE_SIZE - o));
			c = min(c, (u_int)iov->iov_len);
			error = uiomove((caddr_t)&_tmppt[o], (int)c, uio);
			pmap_qremove((vm_offset_t)_tmppt, 1);
			sx_xunlock(&tmppt_lock);
			continue;
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			c = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			addr = trunc_page(uio->uio_offset);
			eaddr = round_page(uio->uio_offset + c);

			for (; addr < eaddr; addr += PAGE_SIZE)
				if (pmap_extract(kernel_pmap, addr) == 0)
					return (EFAULT);
			if (!kernacc((caddr_t)(int)uio->uio_offset, c,
			    uio->uio_rw == UIO_READ ?
			    VM_PROT_READ : VM_PROT_WRITE))
					return (EFAULT);
			error = uiomove((caddr_t)(int)uio->uio_offset, (int)c, uio);
			continue;
		}
		/* else panic! */
	}
	return (error);
}
Esempio n. 17
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	int error = 0;
	vm_offset_t va, eva, off, v;
	vm_prot_t prot;
	struct vm_page m;
	vm_page_t marr;
	vm_size_t cnt;

	cnt = 0;
	error = 0;

	GIANT_REQUIRED;

	while (uio->uio_resid > 0 && !error) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;

			off = uio->uio_offset & PAGE_MASK;
			cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
			    PAGE_MASK);
			cnt = min(cnt, PAGE_SIZE - off);
			cnt = min(cnt, iov->iov_len);

			m.phys_addr = trunc_page(v);
			marr = &m;
			error = uiomove_fromphys(&marr, off, cnt, uio);
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			va = uio->uio_offset;

			va = trunc_page(uio->uio_offset);
			eva = round_page(uio->uio_offset
			    + iov->iov_len);

			/* 
			 * Make sure that all the pages are currently resident
			 * so that we don't create any zero-fill pages.
			 */
			if (va >= VM_MIN_KERNEL_ADDRESS &&
			    eva <= VM_MAX_KERNEL_ADDRESS) {
				for (; va < eva; va += PAGE_SIZE)
					if (pmap_extract(kernel_pmap, va) == 0)
						return (EFAULT);

				prot = (uio->uio_rw == UIO_READ)
				    ? VM_PROT_READ : VM_PROT_WRITE;

				va = uio->uio_offset;
				if (kernacc((void *) va, iov->iov_len, prot)
				    == FALSE)
					return (EFAULT);
			}

			va = uio->uio_offset;
			error = uiomove((void *)va, iov->iov_len, uio);
			continue;
		}
	}

	return (error);
}
Esempio n. 18
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	u_long c, v;
	int error, o, sflags;
	vm_offset_t addr, eaddr;

	GIANT_REQUIRED;

	error = 0;
	c = 0;
	sflags = curthread_pflags_set(TDP_DEVMEMIO);
	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;
kmemphys:
			o = v & PAGE_MASK;
			c = min(uio->uio_resid, (u_int)(PAGE_SIZE - o));
			v = PHYS_TO_DMAP(v);
			if (v < DMAP_MIN_ADDRESS ||
			    (v > DMAP_MIN_ADDRESS + dmaplimit &&
			    v <= DMAP_MAX_ADDRESS) ||
			    pmap_kextract(v) == 0) {
				error = EFAULT;
				goto ret;
			}
			error = uiomove((void *)v, (int)c, uio);
			continue;
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			v = uio->uio_offset;

			if (v >= DMAP_MIN_ADDRESS && v < DMAP_MAX_ADDRESS) {
				v = DMAP_TO_PHYS(v);
				goto kmemphys;
			}

			c = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			addr = trunc_page(v);
			eaddr = round_page(v + c);

			if (addr < VM_MIN_KERNEL_ADDRESS) {
				error = EFAULT;
				goto ret;
			}
			for (; addr < eaddr; addr += PAGE_SIZE) {
				if (pmap_extract(kernel_pmap, addr) == 0) {
					error = EFAULT;
					goto ret;
				}
			}
			if (!kernacc((caddr_t)(long)v, c,
			    uio->uio_rw == UIO_READ ? 
			    VM_PROT_READ : VM_PROT_WRITE)) {
				error = EFAULT;
				goto ret;
			}

			error = uiomove((caddr_t)(long)v, (int)c, uio);
			continue;
		}
		/* else panic! */
	}
ret:
	curthread_pflags_restore(sflags);
	return (error);
}
Esempio n. 19
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	void *p;
	ssize_t orig_resid;
	u_long v, vd;
	u_int c;
	int error;

	error = 0;
	orig_resid = uio->uio_resid;
	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		v = uio->uio_offset;
		c = ulmin(iov->iov_len, PAGE_SIZE - (u_int)(v & PAGE_MASK));

		switch (dev2unit(dev)) {
		case CDEV_MINOR_KMEM:
			/*
			 * Since c is clamped to be less or equal than
			 * PAGE_SIZE, the uiomove() call does not
			 * access past the end of the direct map.
			 */
			if (v >= DMAP_MIN_ADDRESS &&
			    v < DMAP_MIN_ADDRESS + dmaplimit) {
				error = uiomove((void *)v, c, uio);
				break;
			}

			if (!kernacc((void *)v, c, uio->uio_rw == UIO_READ ?
			    VM_PROT_READ : VM_PROT_WRITE)) {
				error = EFAULT;
				break;
			}

			/*
			 * If the extracted address is not accessible
			 * through the direct map, then we make a
			 * private (uncached) mapping because we can't
			 * depend on the existing kernel mapping
			 * remaining valid until the completion of
			 * uiomove().
			 *
			 * XXX We cannot provide access to the
			 * physical page 0 mapped into KVA.
			 */
			v = pmap_extract(kernel_pmap, v);
			if (v == 0) {
				error = EFAULT;
				break;
			}
			/* FALLTHROUGH */
		case CDEV_MINOR_MEM:
			if (v < dmaplimit) {
				vd = PHYS_TO_DMAP(v);
				error = uiomove((void *)vd, c, uio);
				break;
			}
			if (v > cpu_getmaxphyaddr()) {
				error = EFAULT;
				break;
			}
			p = pmap_mapdev(v, PAGE_SIZE);
			error = uiomove(p, c, uio);
			pmap_unmapdev((vm_offset_t)p, PAGE_SIZE);
			break;
		}
	}
	/*
	 * Don't return error if any byte was written.  Read and write
	 * can return error only if no i/o was performed.
	 */
	if (uio->uio_resid != orig_resid)
		error = 0;
	return (error);
}
Esempio n. 20
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	vm_offset_t eva;
	vm_offset_t off;
	vm_offset_t ova;
	vm_offset_t va;
	vm_prot_t prot;
	vm_paddr_t pa;
	vm_size_t cnt;
	vm_page_t m;
	int error;
	int i;
	uint32_t colors;

	cnt = 0;
	colors = 1;
	error = 0;
	ova = 0;

	GIANT_REQUIRED;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			pa = uio->uio_offset & ~PAGE_MASK;
			if (!is_physical_memory(pa)) {
				error = EFAULT;
				break;
			}

			off = uio->uio_offset & PAGE_MASK;
			cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
			    PAGE_MASK);
			cnt = ulmin(cnt, PAGE_SIZE - off);
			cnt = ulmin(cnt, iov->iov_len);

			m = NULL;
			for (i = 0; phys_avail[i] != 0; i += 2) {
				if (pa >= phys_avail[i] &&
				    pa < phys_avail[i + 1]) {
					m = PHYS_TO_VM_PAGE(pa);
					break;
				}
			}

			if (m != NULL) {
				if (ova == 0) {
					if (dcache_color_ignore == 0)
						colors = DCACHE_COLORS;
					ova = kmem_alloc_wait(kernel_map,
					    PAGE_SIZE * colors);
				}
				if (colors != 1 && m->md.color != -1)
					va = ova + m->md.color * PAGE_SIZE;
				else
					va = ova;
				pmap_qenter(va, &m, 1);
				error = uiomove((void *)(va + off), cnt,
				    uio);
				pmap_qremove(va, 1);
			} else {
				va = TLB_PHYS_TO_DIRECT(pa);
				error = uiomove((void *)(va + off), cnt,
				    uio);
			}
			break;
		} else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			va = trunc_page(uio->uio_offset);
			eva = round_page(uio->uio_offset + iov->iov_len);

			/*
			 * Make sure that all of the pages are currently
			 * resident so we don't create any zero fill pages.
			 */
			for (; va < eva; va += PAGE_SIZE)
				if (pmap_kextract(va) == 0)
					return (EFAULT);

			prot = (uio->uio_rw == UIO_READ) ? VM_PROT_READ :
			    VM_PROT_WRITE;
			va = uio->uio_offset;
			if (va < VM_MIN_DIRECT_ADDRESS &&
			    kernacc((void *)va, iov->iov_len, prot) == FALSE)
				return (EFAULT);

			error = uiomove((void *)va, iov->iov_len, uio);
			break;
		}
		/* else panic! */
	}
	if (ova != 0)
		kmem_free_wakeup(kernel_map, ova, PAGE_SIZE * colors);
	return (error);
}