/* * No requirements. */ int kernacc(c_caddr_t addr, int len, int rw) { boolean_t rv; vm_offset_t saddr, eaddr; vm_prot_t prot; KASSERT((rw & (~VM_PROT_ALL)) == 0, ("illegal ``rw'' argument to kernacc (%x)", rw)); /* * The globaldata space is not part of the kernel_map proper, * check access separately. */ if (is_globaldata_space((vm_offset_t)addr, (vm_offset_t)(addr + len))) return (TRUE); /* * Nominal kernel memory access - check access via kernel_map. */ if ((vm_offset_t)addr + len > kernel_map.max_offset || (vm_offset_t)addr + len < (vm_offset_t)addr) { return (FALSE); } prot = rw; saddr = trunc_page((vm_offset_t)addr); eaddr = round_page((vm_offset_t)addr + len); rv = vm_map_check_protection(&kernel_map, saddr, eaddr, prot, FALSE); return (rv == TRUE); }
/* * No requirements. */ int useracc(c_caddr_t addr, int len, int rw) { boolean_t rv; vm_prot_t prot; vm_map_t map; vm_map_entry_t save_hint; vm_offset_t wrap; KASSERT((rw & (~VM_PROT_ALL)) == 0, ("illegal ``rw'' argument to useracc (%x)", rw)); prot = rw; /* * XXX - check separately to disallow access to user area and user * page tables - they are in the map. */ wrap = (vm_offset_t)addr + len; if (wrap > VM_MAX_USER_ADDRESS || wrap < (vm_offset_t)addr) { return (FALSE); } map = &curproc->p_vmspace->vm_map; vm_map_lock_read(map); /* * We save the map hint, and restore it. Useracc appears to distort * the map hint unnecessarily. */ save_hint = map->hint; rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), round_page(wrap), prot, TRUE); map->hint = save_hint; vm_map_unlock_read(map); return (rv == TRUE); }
int useracc( user_addr_t addr, user_size_t len, int prot) { return (vm_map_check_protection( current_map(), vm_map_trunc_page(addr), vm_map_round_page(addr+len), prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE)); }
/* * munmap system call handler * * munmap_args(void *addr, size_t len) * * No requirements */ int sys_munmap(struct munmap_args *uap) { struct proc *p = curproc; vm_offset_t addr; vm_offset_t tmpaddr; vm_size_t size, pageoff; vm_map_t map; addr = (vm_offset_t) uap->addr; size = uap->len; pageoff = (addr & PAGE_MASK); addr -= pageoff; size += pageoff; size = (vm_size_t) round_page(size); if (size < uap->len) /* wrap */ return(EINVAL); tmpaddr = addr + size; /* workaround gcc4 opt */ if (tmpaddr < addr) /* wrap */ return(EINVAL); if (size == 0) return (0); /* * Check for illegal addresses. Watch out for address wrap... Note * that VM_*_ADDRESS are not constants due to casts (argh). */ if (VM_MAX_USER_ADDRESS > 0 && tmpaddr > VM_MAX_USER_ADDRESS) return (EINVAL); if (VM_MIN_USER_ADDRESS > 0 && addr < VM_MIN_USER_ADDRESS) return (EINVAL); map = &p->p_vmspace->vm_map; /* map->token serializes between the map check and the actual unmap */ lwkt_gettoken(&map->token); /* * Make sure entire range is allocated. */ if (!vm_map_check_protection(map, addr, addr + size, VM_PROT_NONE, FALSE)) { lwkt_reltoken(&map->token); return (EINVAL); } /* returns nothing but KERN_SUCCESS anyway */ vm_map_remove(map, addr, addr + size); lwkt_reltoken(&map->token); return (0); }
int useracc( user_addr_t addr, user_size_t len, int prot) { vm_map_t map; map = current_map(); return (vm_map_check_protection( map, vm_map_trunc_page(addr, vm_map_page_mask(map)), vm_map_round_page(addr+len, vm_map_page_mask(map)), prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE)); }
/* * No requirements. */ int useracc(c_caddr_t addr, int len, int rw) { boolean_t rv; vm_prot_t prot; vm_map_t map; vm_offset_t wrap; vm_offset_t gpa; KASSERT((rw & (~VM_PROT_ALL)) == 0, ("illegal ``rw'' argument to useracc (%x)", rw)); prot = rw; if (curthread->td_vmm) { if (vmm_vm_get_gpa(curproc, (register_t *)&gpa, (register_t) addr)) panic("%s: could not get GPA\n", __func__); addr = (c_caddr_t) gpa; } /* * XXX - check separately to disallow access to user area and user * page tables - they are in the map. */ wrap = (vm_offset_t)addr + len; if (wrap > VM_MAX_USER_ADDRESS || wrap < (vm_offset_t)addr) { return (FALSE); } map = &curproc->p_vmspace->vm_map; vm_map_lock_read(map); rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), round_page(wrap), prot, TRUE); vm_map_unlock_read(map); return (rv == TRUE); }