static paddr_t pcimmap(dev_t dev, off_t offset, int prot) { struct pci_softc *sc = device_lookup_private(&pci_cd, minor(dev)); struct pci_child *c; struct pci_range *r; int flags = 0; int device, range; if (kauth_authorize_machdep(kauth_cred_get(), KAUTH_MACHDEP_UNMANAGEDMEM, NULL, NULL, NULL, NULL) != 0) { return -1; } /* * Since we allow mapping of the entire bus, we * take the offset to be the address on the bus, * and pass 0 as the offset into that range. * * XXX Need a way to deal with linear/etc. * * XXX we rely on MD mmap() methods to enforce limits since these * are hidden in *_tag_t structs if they exist at all */ #ifdef PCI_MAGIC_IO_RANGE /* * first, check if someone's trying to map the IO range * XXX this assumes 64kB IO space even though some machines can have * significantly more than that - macppc's bandit host bridge allows * 8MB IO space and sparc64 may have the entire 4GB available. The * firmware on both tries to use the lower 64kB first though and * exausting it is pretty difficult so we should be safe */ if ((offset >= PCI_MAGIC_IO_RANGE) && (offset < (PCI_MAGIC_IO_RANGE + 0x10000))) { return bus_space_mmap(sc->sc_iot, offset - PCI_MAGIC_IO_RANGE, 0, prot, 0); } #endif /* PCI_MAGIC_IO_RANGE */ for (device = 0; device < __arraycount(sc->sc_devices); device++) { c = &sc->sc_devices[device]; if (c->c_dev == NULL) continue; for (range = 0; range < __arraycount(c->c_range); range++) { r = &c->c_range[range]; if (r->r_size == 0) break; if (offset >= r->r_offset && offset < r->r_offset + r->r_size) { flags = r->r_flags; break; } } } return bus_space_mmap(sc->sc_memt, offset, 0, prot, flags); }
static paddr_t ofb_mmap(void *v, void *vs, off_t offset, int prot) { struct vcons_data *vd = v; struct ofb_softc *sc = vd->cookie; struct rasops_info *ri; u_int32_t *ap = sc->sc_addrs; int i; if (vd->active == NULL) { printf("%s: no active screen.\n", device_xname(sc->sc_dev)); return -1; } ri = &vd->active->scr_ri; /* framebuffer at offset 0 */ if ((offset >= 0) && (offset < sc->sc_fbsize)) return bus_space_mmap(sc->sc_memt, sc->sc_fbaddr, offset, prot, BUS_SPACE_MAP_LINEAR); /* * restrict all other mappings to processes with superuser privileges * or the kernel itself */ if (kauth_authorize_machdep(kauth_cred_get(), KAUTH_MACHDEP_UNMANAGEDMEM, NULL, NULL, NULL, NULL) != 0) { printf("%s: mmap() rejected.\n", device_xname(sc->sc_dev)); return -1; } /* let them mmap() 0xa0000 - 0xbffff if it's not covered above */ #ifdef OFB_FAKE_VGA_FB if (offset >=0xa0000 && offset < 0xbffff) return sc->sc_fbaddr + offset - 0xa0000; #endif /* allow to map our IO space */ if ((offset >= 0xf2000000) && (offset < 0xf2800000)) { return bus_space_mmap(sc->sc_iot, offset-0xf2000000, 0, prot, BUS_SPACE_MAP_LINEAR); } for (i = 0; i < 6; i++) { switch (ap[0] & OFW_PCI_PHYS_HI_SPACEMASK) { case OFW_PCI_PHYS_HI_SPACE_MEM32: if (offset >= ap[2] && offset < ap[2] + ap[4]) return bus_space_mmap(sc->sc_memt, offset, 0, prot, BUS_SPACE_MAP_LINEAR); } ap += 5; } return -1; }
paddr_t mmmmap(dev_t dev, off_t off, int prot) { struct lwp *l = curlwp; if (minor(dev) != DEV_MEM) return (-1); if (!__mm_mem_addr(off) && kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_UNMANAGEDMEM, NULL, NULL, NULL, NULL) != 0) return (-1); return (sh3_btop((paddr_t)off)); }
/* * Set I/O permissions for a process. Just set the maximum level * right away (ignoring the argument), otherwise we would have * to rely on I/O permission maps, which are not implemented. */ int linux_sys_iopl(struct lwp *l, const struct linux_sys_iopl_args *uap, register_t *retval) { /* { syscallarg(int) level; } */ struct trapframe *fp = l->l_md.md_regs; if (kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPL, NULL, NULL, NULL, NULL) != 0) return EPERM; fp->tf_eflags |= PSL_IOPL; *retval = 0; return 0; }
/* * See above. If a root process tries to set access to an I/O port, * just let it have the whole range. */ int linux_sys_ioperm(struct lwp *l, const struct linux_sys_ioperm_args *uap, register_t *retval) { /* { syscallarg(unsigned int) lo; syscallarg(unsigned int) hi; syscallarg(int) val; } */ struct trapframe *fp = l->l_md.md_regs; if (kauth_authorize_machdep(l->l_cred, SCARG(uap, val) ? KAUTH_MACHDEP_IOPERM_SET : KAUTH_MACHDEP_IOPERM_GET, NULL, NULL, NULL, NULL) != 0) return EPERM; if (SCARG(uap, val)) fp->tf_eflags |= PSL_IOPL; *retval = 0; return 0; }
/*ARGSUSED*/ int mmopen(dev_t dev, int flag, int mode, struct lwp *l) { static bool again; if (!again) { /* XXX UNSAFE. Need an mmattach(). */ again = true; mutex_init(&mm_lock, MUTEX_DEFAULT, IPL_NONE); zeropage = kmem_zalloc(PAGE_SIZE, KM_SLEEP); } switch (minor(dev)) { #if defined(COMPAT_10) || defined(COMPAT_FREEBSD) /* This is done by i386_iopl(3) now. */ case DEV_IO: if (flag & FWRITE) { struct trapframe *fp; int error; error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_IOPL, NULL, NULL, NULL, NULL); if (error) return (error); fp = curlwp->l_md.md_regs; fp->tf_eflags |= PSL_IOPL; } break; #else (void) flag; #endif default: break; } return (0); }
static paddr_t drmfb_genfb_mmap(void *v, void *vs, off_t offset, int prot) { struct genfb_softc *const genfb = v; struct drmfb_softc *const sc = container_of(genfb, struct drmfb_softc, sc_genfb); KASSERT(0 <= offset); if (offset < genfb->sc_fbsize) { if (sc->sc_da.da_params->dp_mmapfb == NULL) return -1; return (*sc->sc_da.da_params->dp_mmapfb)(sc, offset, prot); } else { if (kauth_authorize_machdep(kauth_cred_get(), KAUTH_MACHDEP_UNMANAGEDMEM, NULL, NULL, NULL, NULL) != 0) return -1; if (sc->sc_da.da_params->dp_mmap == NULL) return -1; return (*sc->sc_da.da_params->dp_mmap)(sc, offset, prot); } }
static int x86_64_set_mtrr32(struct lwp *l, void *args, register_t *retval) { struct x86_64_set_mtrr_args32 args32; struct mtrr32 *m32p, m32; struct mtrr *m64p, *mp; int error, i; int32_t n; size_t size; m64p = NULL; if (mtrr_funcs == NULL) return ENOSYS; error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET, NULL, NULL, NULL, NULL); if (error) return (error); error = copyin(args, &args32, sizeof args32); if (error != 0) return error; error = copyin((void *)(uintptr_t)args32.n, &n, sizeof n); if (error != 0) return error; if (n <= 0 || n > (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX)) { error = EINVAL; goto fail; } size = n * sizeof(struct mtrr); m64p = kmem_zalloc(size, KM_SLEEP); if (m64p == NULL) { error = ENOMEM; goto fail; } m32p = (struct mtrr32 *)(uintptr_t)args32.mtrrp; mp = m64p; for (i = 0; i < n; i++) { error = copyin(m32p, &m32, sizeof m32); if (error != 0) goto fail; mp->base = m32.base; mp->len = m32.len; mp->type = m32.type; mp->flags = m32.flags; mp->owner = m32.owner; m32p++; mp++; } error = mtrr_set(m64p, &n, l->l_proc, 0); fail: if (m64p != NULL) kmem_free(m64p, size); if (error != 0) n = 0; copyout(&n, (void *)(uintptr_t)args32.n, sizeof n); return error; }