static int arm32_sync_icache(struct thread *td, void *args) { struct arm_sync_icache_args ua; int error; ksiginfo_t ksi; #if __ARM_ARCH >= 6 vm_offset_t rv; #endif if ((error = copyin(args, &ua, sizeof(ua))) != 0) return (error); if (ua.len == 0) { td->td_retval[0] = 0; return (0); } /* * Validate arguments. Address and length are unsigned, * so we can use wrapped overflow check. */ if (((ua.addr + ua.len) < ua.addr) || ((ua.addr + ua.len) > VM_MAXUSER_ADDRESS)) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_ACCERR; ksi.ksi_addr = (void *)max(ua.addr, VM_MAXUSER_ADDRESS); trapsignal(td, &ksi); return (EINVAL); } #if __ARM_ARCH >= 6 rv = sync_icache(ua.addr, ua.len); if (rv != 1) { ksiginfo_init_trap(&ksi); ksi.ksi_signo = SIGSEGV; ksi.ksi_code = SEGV_MAPERR; ksi.ksi_addr = (void *)rv; trapsignal(td, &ksi); return (EINVAL); } #else cpu_icache_sync_range(ua.addr, ua.len); #endif td->td_retval[0] = 0; return (0); }
/* * Perform I/O to a given process. This will return EIO if we dectect * corrupt memory and ENXIO if there is no such mapped address in the * user process's address space. */ static int urw(proc_t *p, int writing, void *buf, size_t len, uintptr_t a) { caddr_t addr = (caddr_t)a; caddr_t page; caddr_t vaddr; struct seg *seg; int error = 0; int err = 0; uint_t prot; uint_t prot_rw = writing ? PROT_WRITE : PROT_READ; int protchanged; on_trap_data_t otd; int retrycnt; struct as *as = p->p_as; enum seg_rw rw; /* * Locate segment containing address of interest. */ page = (caddr_t)(uintptr_t)((uintptr_t)addr & PAGEMASK); retrycnt = 0; AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER); retry: if ((seg = as_segat(as, page)) == NULL || !page_valid(seg, page)) { AS_LOCK_EXIT(as, &as->a_lock); return (ENXIO); } SEGOP_GETPROT(seg, page, 0, &prot); protchanged = 0; if ((prot & prot_rw) == 0) { protchanged = 1; err = SEGOP_SETPROT(seg, page, PAGESIZE, prot | prot_rw); if (err == IE_RETRY) { protchanged = 0; ASSERT(retrycnt == 0); retrycnt++; goto retry; } if (err != 0) { AS_LOCK_EXIT(as, &as->a_lock); return (ENXIO); } } /* * segvn may do a copy-on-write for F_SOFTLOCK/S_READ case to break * sharing to avoid a copy on write of a softlocked page by another * thread. But since we locked the address space as a writer no other * thread can cause a copy on write. S_READ_NOCOW is passed as the * access type to tell segvn that it's ok not to do a copy-on-write * for this SOFTLOCK fault. */ if (writing) rw = S_WRITE; else if (seg->s_ops == &segvn_ops) rw = S_READ_NOCOW; else rw = S_READ; if (SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTLOCK, rw)) { if (protchanged) (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot); AS_LOCK_EXIT(as, &as->a_lock); return (ENXIO); } CPU_STATS_ADD_K(vm, softlock, 1); /* * Make sure we're not trying to read or write off the end of the page. */ ASSERT(len <= page + PAGESIZE - addr); /* * Map in the locked page, copy to our local buffer, * then map the page out and unlock it. */ vaddr = mapin(as, addr, writing); /* * Since we are copying memory on behalf of the user process, * protect against memory error correction faults. */ if (!on_trap(&otd, OT_DATA_EC)) { if (seg->s_ops == &segdev_ops) { /* * Device memory can behave strangely; invoke * a segdev-specific copy operation instead. */ if (writing) { if (segdev_copyto(seg, addr, buf, vaddr, len)) error = ENXIO; } else { if (segdev_copyfrom(seg, addr, vaddr, buf, len)) error = ENXIO; } } else { if (writing) bcopy(buf, vaddr, len); else bcopy(vaddr, buf, len); } } else { error = EIO; } no_trap(); /* * If we're writing to an executable page, we may need to sychronize * the I$ with the modifications we made through the D$. */ if (writing && (prot & PROT_EXEC)) sync_icache(vaddr, (uint_t)len); mapout(as, addr, vaddr, writing); if (rw == S_READ_NOCOW) rw = S_READ; (void) SEGOP_FAULT(as->a_hat, seg, page, PAGESIZE, F_SOFTUNLOCK, rw); if (protchanged) (void) SEGOP_SETPROT(seg, page, PAGESIZE, prot); AS_LOCK_EXIT(as, &as->a_lock); return (error); }