/* * dev_kmem_readwrite: helper for DEV_KMEM (/dev/kmem) case of R/W. */ static int dev_kmem_readwrite(struct uio *uio, struct iovec *iov) { void *addr; size_t len, offset; vm_prot_t prot; int error; bool md_kva; /* Check for wrap around. */ addr = (void *)(intptr_t)uio->uio_offset; if ((uintptr_t)addr != uio->uio_offset) { return EFAULT; } /* * Handle non-page aligned offset. * Otherwise, we operate in page-by-page basis. */ offset = uio->uio_offset & PAGE_MASK; len = MIN(uio->uio_resid, PAGE_SIZE - offset); prot = (uio->uio_rw == UIO_WRITE) ? VM_PROT_WRITE : VM_PROT_READ; md_kva = false; #ifdef __HAVE_MM_MD_DIRECT_MAPPED_IO paddr_t paddr; /* MD case: is this is a directly mapped address? */ if (mm_md_direct_mapped_io(addr, &paddr)) { /* If so, validate physical address. */ error = mm_md_physacc(paddr, prot); if (error) { return error; } md_kva = true; } #endif if (!md_kva) { bool checked = false; #ifdef __HAVE_MM_MD_KERNACC /* MD check for the address. */ error = mm_md_kernacc(addr, prot, &checked); if (error) { return error; } #endif /* UVM check for the address (unless MD indicated to not). */ if (!checked && !uvm_kernacc(addr, len, prot)) { return EFAULT; } } error = uiomove(addr, len, uio); return error; }
/*ARGSUSED*/ int mmrw(dev_t dev, struct uio *uio, int flags) { struct iovec *iov; boolean_t allowed; int error = 0; size_t c; vaddr_t v; while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } switch (minor(dev)) { /* minor device 0 is physical memory */ case 0: v = uio->uio_offset; c = iov->iov_len; if (v + c < v || v + c > ptoa((psize_t)physmem)) return (EFAULT); v = (vaddr_t)PHYS_TO_XKPHYS(v, CCA_NONCOHERENT); error = uiomove((caddr_t)v, c, uio); continue; /* minor device 1 is kernel memory */ case 1: v = uio->uio_offset; c = ulmin(iov->iov_len, MAXPHYS); /* Allow access to RAM through XKPHYS... */ if (IS_XKPHYS(v)) allowed = is_memory_range(XKPHYS_TO_PHYS(v), (psize_t)c, 0); /* ...or through CKSEG0... */ else if (v >= CKSEG0_BASE && v < CKSEG0_BASE + CKSEG_SIZE) allowed = is_memory_range(CKSEG0_TO_PHYS(v), (psize_t)c, CKSEG_SIZE); /* ...or through CKSEG1... */ else if (v >= CKSEG1_BASE && v < CKSEG1_BASE + CKSEG_SIZE) allowed = is_memory_range(CKSEG1_TO_PHYS(v), (psize_t)c, CKSEG_SIZE); /* ...otherwise, check it's within kernel kvm limits. */ else allowed = uvm_kernacc((caddr_t)v, c, uio->uio_rw == UIO_READ ? B_READ : B_WRITE); if (allowed) { error = uiomove((caddr_t)v, c, uio); continue; } else { return (EFAULT); } /* minor device 2 is EOF/RATHOLE */ case 2: if (uio->uio_rw == UIO_WRITE) uio->uio_resid = 0; return (0); /* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */ case 12: if (uio->uio_rw == UIO_WRITE) { c = iov->iov_len; break; } if (zeropage == NULL) zeropage = malloc(PAGE_SIZE, M_TEMP, M_WAITOK | M_ZERO); c = ulmin(iov->iov_len, PAGE_SIZE); error = uiomove(zeropage, c, uio); continue; default: return (ENODEV); } if (error) break; iov->iov_base += c; iov->iov_len -= c; uio->uio_offset += c; uio->uio_resid -= c; } return error; }
/*ARGSUSED*/ int mmrw(dev_t dev, struct uio *uio, int flags) { vaddr_t o, v; int c; struct iovec *iov; int error = 0; static int physlock; vm_prot_t prot; if (minor(dev) == DEV_MEM) { /* lock against other uses of shared vmmap */ while (physlock > 0) { physlock++; error = tsleep((void *)&physlock, PZERO | PCATCH, "mmrw", 0); if (error) return error; } physlock = 1; } while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } switch (minor(dev)) { case DEV_MEM: v = uio->uio_offset; /* * Only allow reads in physical RAM. */ if (v >= 0xFFFFFFFC || v < lowram) { error = EFAULT; goto unlock; } prot = uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE; pmap_enter(pmap_kernel(), (vaddr_t)vmmap, trunc_page(v), prot, prot|PMAP_WIRED); pmap_update(pmap_kernel()); o = m68k_page_offset(uio->uio_offset); c = min(uio->uio_resid, (int)(PAGE_SIZE - o)); error = uiomove(vmmap + o, c, uio); pmap_remove(pmap_kernel(), (vaddr_t)vmmap, (vaddr_t)vmmap + PAGE_SIZE); pmap_update(pmap_kernel()); continue; case DEV_KMEM: v = uio->uio_offset; c = min(iov->iov_len, MAXPHYS); if (!uvm_kernacc((void *)v, c, uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) return EFAULT; /* * Don't allow reading intio * device space. This could lead to * corruption of device registers. */ if (ISIIOVA(v)) return EFAULT; error = uiomove((void *)v, c, uio); continue; case DEV_NULL: if (uio->uio_rw == UIO_WRITE) uio->uio_resid = 0; return 0; case DEV_ZERO: if (uio->uio_rw == UIO_WRITE) { c = iov->iov_len; break; } /* * On the first call, allocate and zero a page * of memory for use with /dev/zero. */ if (devzeropage == NULL) devzeropage = (void *) malloc(PAGE_SIZE, M_TEMP, M_WAITOK|M_ZERO); c = min(iov->iov_len, PAGE_SIZE); error = uiomove(devzeropage, c, uio); continue; default: return ENXIO; } if (error) break; iov->iov_base = (char *)iov->iov_base + c; iov->iov_len -= c; uio->uio_offset += c; uio->uio_resid -= c; } if (minor(dev) == DEV_MEM) { unlock: if (physlock > 1) wakeup((void *)&physlock); physlock = 0; } return error; }
/*ARGSUSED*/ int mmrw(dev_t dev, struct uio *uio, int flags) { register vaddr_t o, v; register int c; register struct iovec *iov; int error = 0; vm_prot_t prot; while (uio->uio_resid > 0 && !error) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } switch (minor(dev)) { case DEV_MEM: mutex_enter(&mm_lock); v = uio->uio_offset; prot = uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE; error = check_pa_acc(uio->uio_offset, prot); if (error) { mutex_exit(&mm_lock); break; } pmap_enter(pmap_kernel(), (vaddr_t)vmmap, trunc_page(v), prot, PMAP_WIRED|prot); o = uio->uio_offset & PGOFSET; c = min(uio->uio_resid, (int)(PAGE_SIZE - o)); error = uiomove((char *)vmmap + o, c, uio); pmap_remove(pmap_kernel(), (vaddr_t)vmmap, (vaddr_t)vmmap + PAGE_SIZE); mutex_exit(&mm_lock); break; case DEV_KMEM: v = uio->uio_offset; c = min(iov->iov_len, MAXPHYS); if (v >= (vaddr_t)&start && v < (vaddr_t)kern_end) { if (v < (vaddr_t)&__data_start && uio->uio_rw == UIO_WRITE) return EFAULT; } else if (v >= lkm_start && v < lkm_end) { if (!uvm_map_checkprot(lkm_map, v, v + c, uio->uio_rw == UIO_READ ? VM_PROT_READ: VM_PROT_WRITE)) return EFAULT; } else { if (!uvm_kernacc((void *)v, c, uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) return EFAULT; } error = uiomove((void *)v, c, uio); break; case DEV_NULL: if (uio->uio_rw == UIO_WRITE) uio->uio_resid = 0; return (0); case DEV_ZERO: if (uio->uio_rw == UIO_WRITE) { uio->uio_resid = 0; return (0); } c = min(iov->iov_len, PAGE_SIZE); error = uiomove(zeropage, c, uio); break; default: return (ENXIO); } } return (error); }
int mmrw(dev_t dev, struct uio *uio, int flags) { vaddr_t o, v; size_t c; struct iovec *iov; int error = 0; if (minor(dev) == 0) { /* lock against other uses of shared vmmap */ error = rw_enter(&physlock, RW_WRITE | RW_INTR); if (error) return (error); } while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } switch (minor(dev)) { /* minor device 0 is physical memory */ case 0: v = uio->uio_offset; pmap_enter(pmap_kernel(), (vaddr_t)vmmap, trunc_page(v), uio->uio_rw == UIO_READ ? PROT_READ : PROT_WRITE, PMAP_WIRED); pmap_update(pmap_kernel()); o = uio->uio_offset & PGOFSET; c = ulmin(uio->uio_resid, NBPG - o); error = uiomove((caddr_t)vmmap + o, c, uio); pmap_remove(pmap_kernel(), (vaddr_t)vmmap, (vaddr_t)vmmap + NBPG); pmap_update(pmap_kernel()); continue; /* minor device 1 is kernel memory */ case 1: v = uio->uio_offset; c = ulmin(iov->iov_len, MAXPHYS); if (!uvm_kernacc((caddr_t)v, c, uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) return (EFAULT); error = uiomove((caddr_t)v, c, uio); continue; /* minor device 2 is /dev/null */ case 2: if (uio->uio_rw == UIO_WRITE) uio->uio_resid = 0; return (0); /* minor device 12 is /dev/zero */ case 12: if (uio->uio_rw == UIO_WRITE) { c = iov->iov_len; break; } if (zeropage == NULL) { zeropage = malloc(PAGE_SIZE, M_TEMP, M_WAITOK|M_ZERO); } c = ulmin(iov->iov_len, PAGE_SIZE); error = uiomove(zeropage, c, uio); continue; default: return (ENXIO); } iov->iov_base = (char *)iov->iov_base + c; iov->iov_len -= c; uio->uio_offset += c; uio->uio_resid -= c; } if (minor(dev) == 0) { rw_exit(&physlock); } return (error); }
/*ARGSUSED*/ int mmrw(dev_t dev, struct uio *uio, int flags) { struct iovec *iov; vaddr_t v, o; int c; int error = 0; while (uio->uio_resid > 0 && !error) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } v = uio->uio_offset; switch (minor(dev)) { kmemphys: case DEV_MEM: /* Physical address */ if (__mm_mem_addr(v)) { o = v & PGOFSET; c = min(uio->uio_resid, (int)(PAGE_SIZE - o)); error = uiomove((void *)SH3_PHYS_TO_P1SEG(v), c, uio); } else { return (EFAULT); } break; case DEV_KMEM: /* P0 */ if (v < SH3_P1SEG_BASE) return (EFAULT); /* P1 */ if (v < SH3_P2SEG_BASE) { v = SH3_P1SEG_TO_PHYS(v); goto kmemphys; } /* P2 */ if (v < SH3_P3SEG_BASE) return (EFAULT); /* P3 */ c = min(iov->iov_len, MAXPHYS); if (!uvm_kernacc((void *)v, c, uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) return (EFAULT); error = uiomove((void *)v, c, uio); break; case DEV_NULL: if (uio->uio_rw == UIO_WRITE) uio->uio_resid = 0; return (0); case DEV_ZERO: if (uio->uio_rw == UIO_WRITE) { uio->uio_resid = 0; return (0); } if (zeropage == NULL) { zeropage = malloc(PAGE_SIZE, M_TEMP, M_WAITOK); memset(zeropage, 0, PAGE_SIZE); } c = min(iov->iov_len, PAGE_SIZE); error = uiomove(zeropage, c, uio); break; default: return (ENXIO); } } return (error); }
/*ARGSUSED*/ int mmrw(dev_t dev, struct uio *uio, int flags) { extern vaddr_t kern_end; vaddr_t v; int c; struct iovec *iov; int error = 0; while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } switch (minor(dev)) { /* minor device 0 is physical memory */ case 0: v = PMAP_DIRECT_MAP(uio->uio_offset); error = uiomove((caddr_t)v, uio->uio_resid, uio); continue; /* minor device 1 is kernel memory */ case 1: v = uio->uio_offset; c = min(iov->iov_len, MAXPHYS); if (v >= (vaddr_t)&start && v < kern_end) { if (v < (vaddr_t)&etext && uio->uio_rw == UIO_WRITE) return EFAULT; #ifdef LKM } else if (v >= lkm_start && v < lkm_end) { if (!uvm_map_checkprot(lkm_map, v, v + c, uio->uio_rw == UIO_READ ? UVM_PROT_READ: UVM_PROT_WRITE)) return (EFAULT); #endif } else if ((!uvm_kernacc((caddr_t)v, c, uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) && (v < PMAP_DIRECT_BASE && v > PMAP_DIRECT_END)) return (EFAULT); error = uiomove((caddr_t)v, c, uio); continue; /* minor device 2 is EOF/RATHOLE */ case 2: if (uio->uio_rw == UIO_WRITE) uio->uio_resid = 0; return (0); /* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */ case 12: if (uio->uio_rw == UIO_WRITE) { c = iov->iov_len; break; } if (zeropage == NULL) zeropage = (caddr_t) malloc(PAGE_SIZE, M_TEMP, M_WAITOK|M_ZERO); c = min(iov->iov_len, PAGE_SIZE); error = uiomove(zeropage, c, uio); continue; default: return (ENXIO); } iov->iov_base = (int8_t *)iov->iov_base + c; iov->iov_len -= c; uio->uio_offset += c; uio->uio_resid -= c; } return (error); }
/*ARGSUSED*/ int mmrw(dev_t dev, struct uio *uio, int flags) { struct iovec *iov; int error = 0, c; vaddr_t v; while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } switch (minor(dev)) { /* minor device 0 is physical memory */ case 0: v = uio->uio_offset; c = iov->iov_len; if (v + c > ctob(physmem)) return (EFAULT); v = (vaddr_t)PHYS_TO_KSEG0(v); error = uiomove((caddr_t)v, c, uio); continue; /* minor device 1 is kernel memory */ case 1: v = uio->uio_offset; c = min(iov->iov_len, MAXPHYS); if ((v > KSEG0_BASE && v + c <= KSEG0_BASE + ctob(physmem)) || uvm_kernacc((caddr_t)v, c, uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) { error = uiomove((caddr_t)v, c, uio); continue; } else { return (EFAULT); } /* minor device 2 is EOF/RATHOLE */ case 2: if (uio->uio_rw == UIO_WRITE) uio->uio_resid = 0; return (0); /* minor device 12 (/dev/zero) is source of nulls on read, rathole on write */ case 12: if (uio->uio_rw == UIO_WRITE) { c = iov->iov_len; break; } if (zeropage == NULL) { zeropage = (caddr_t) malloc(PAGE_SIZE, M_TEMP, M_WAITOK); bzero(zeropage, PAGE_SIZE); } c = min(iov->iov_len, PAGE_SIZE); error = uiomove(zeropage, c, uio); continue; default: return (ENODEV); } if (error) break; iov->iov_base += c; iov->iov_len -= c; uio->uio_offset += c; uio->uio_resid -= c; } return error; }
/*ARGSUSED*/ int mmrw(dev_t dev, struct uio *uio, int flags) { vaddr_t va; paddr_t pa; int o, c; struct iovec *iov; int error = 0; static int physlock; vm_prot_t prot; extern void *vmmap; if (minor(dev) == DEV_MEM) { /* lock against other uses of shared vmmap */ while (physlock > 0) { physlock++; error = tsleep((void *)&physlock, PZERO | PCATCH, "mmrw", 0); if (error) return (error); } physlock = 1; } while (uio->uio_resid > 0 && error == 0) { int n; iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } /* Note how much is still to go */ n = uio->uio_resid; switch (minor(dev)) { case DEV_MEM: pa = (paddr_t)uio->uio_offset; if (!pmap_pa_exists(pa)) { error = EFAULT; goto unlock; } prot = uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE; pmap_enter(pmap_kernel(), (vaddr_t)vmmap, trunc_page(pa), prot, prot|PMAP_WIRED); pmap_update(pmap_kernel()); o = uio->uio_offset & PGOFSET; c = min(uio->uio_resid, (int)(PAGE_SIZE - o)); error = uiomove((char *)vmmap + o, c, uio); pmap_remove(pmap_kernel(), (vaddr_t)vmmap, (vaddr_t)vmmap + PAGE_SIZE); pmap_update(pmap_kernel()); break; case DEV_KMEM: va = (vaddr_t)uio->uio_offset; if (va >= MSGBUF_VA && va < MSGBUF_VA+PAGE_SIZE) { c = min(iov->iov_len, 4096); } else if (va >= prom_vstart && va < prom_vend && uio->uio_rw == UIO_READ) { /* Allow read-only access to the PROM */ c = min(iov->iov_len, prom_vend - prom_vstart); } else { c = min(iov->iov_len, MAXPHYS); if (!uvm_kernacc((void *)va, c, uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) return (EFAULT); } error = uiomove((void *)va, c, uio); break; case DEV_NULL: if (uio->uio_rw == UIO_WRITE) uio->uio_resid = 0; return (0); /* XXX should add sbus, etc */ #if defined(SUN4) case DEV_EEPROM: if (cputyp == CPU_SUN4) error = eeprom_uio(uio); else error = ENXIO; break; #endif /* SUN4 */ case DEV_ZERO: if (uio->uio_rw == UIO_WRITE) { uio->uio_resid = 0; return(0); } if (zeropage == NULL) { zeropage = (void *) malloc(PAGE_SIZE, M_TEMP, M_WAITOK); bzero(zeropage, PAGE_SIZE); } c = min(iov->iov_len, PAGE_SIZE); error = uiomove(zeropage, c, uio); break; default: return (ENXIO); } /* If we didn't make any progress (i.e. EOF), we're done here */ if (n == uio->uio_resid) break; } if (minor(dev) == 0) { unlock: if (physlock > 1) wakeup((void *)&physlock); physlock = 0; } return (error); }
/*ARGSUSED*/ int mmrw(dev_t dev, struct uio *uio, int flags) { vaddr_t o, v; int c; struct iovec *iov; int error = 0; static int physlock; vm_prot_t prot; if (minor(dev) == DEV_MEM) { /* lock against other uses of shared vmmap */ while (physlock > 0) { physlock++; error = tsleep(&physlock, PZERO | PCATCH, "mmrw", 0); if (error) return (error); } physlock = 1; } while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("mmrw"); continue; } switch (minor(dev)) { case DEV_MEM: v = uio->uio_offset; #ifndef DEBUG /* allow reads only in RAM (except for DEBUG) */ if (v >= 0xFFFFFFFC || v < lowram) { error = EFAULT; goto unlock; } #endif prot = uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE; pmap_enter(pmap_kernel(), (vaddr_t)vmmap, trunc_page(v), prot, prot|PMAP_WIRED); pmap_update(pmap_kernel()); o = uio->uio_offset & PGOFSET; c = min(uio->uio_resid, (int)(PAGE_SIZE - o)); error = uiomove(vmmap + o, c, uio); pmap_remove(pmap_kernel(), (vaddr_t)vmmap, (vaddr_t)vmmap + PAGE_SIZE); pmap_update(pmap_kernel()); continue; case DEV_KMEM: v = uio->uio_offset; c = min(iov->iov_len, MAXPHYS); if (!uvm_kernacc((void *)v, c, uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) return (EFAULT); error = uiomove((void *)v, c, uio); continue; case DEV_NULL: if (uio->uio_rw == UIO_WRITE) uio->uio_resid = 0; return (0); case DEV_ZERO: if (uio->uio_rw == UIO_WRITE) { c = iov->iov_len; break; } /* * On the first call, allocate and zero a page * of memory for use with /dev/zero. * * XXX on the hp300 we already know where there * is a global zeroed page, the null segment table. */ if (devzeropage == NULL) { extern void *Segtabzero; devzeropage = Segtabzero; } c = min(iov->iov_len, PAGE_SIZE); error = uiomove(devzeropage, c, uio); continue; default: return (ENXIO); } if (error) break; iov->iov_base = (char *)iov->iov_base + c; iov->iov_len -= c; uio->uio_offset += c; uio->uio_resid -= c; } if (minor(dev) == DEV_MEM) { #ifndef DEBUG unlock: #endif if (physlock > 1) wakeup((void *)&physlock); physlock = 0; } return (error); }