int mprotect(__unused proc_t p, struct mprotect_args *uap, __unused register_t *retval) { register vm_prot_t prot; mach_vm_offset_t user_addr; mach_vm_size_t user_size; kern_return_t result; vm_map_t user_map; #if CONFIG_MACF int error; #endif AUDIT_ARG(addr, uap->addr); AUDIT_ARG(len, uap->len); AUDIT_ARG(value, uap->prot); user_addr = (mach_vm_offset_t) uap->addr; user_size = (mach_vm_size_t) uap->len; prot = (vm_prot_t)(uap->prot & VM_PROT_ALL); if (user_addr & PAGE_MASK_64) { /* UNIX SPEC: user address is not page-aligned, return EINVAL */ return EINVAL; } #ifdef notyet /* Hmm .. */ #if defined(VM_PROT_READ_IS_EXEC) if (prot & VM_PROT_READ) prot |= VM_PROT_EXECUTE; #endif #endif /* notyet */ #if 3936456 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) prot |= VM_PROT_READ; #endif /* 3936456 */ user_map = current_map(); #if CONFIG_MACF /* * The MAC check for mprotect is of limited use for 2 reasons: * Without mmap revocation, the caller could have asked for the max * protections initially instead of a reduced set, so a mprotect * check would offer no new security. * It is not possible to extract the vnode from the pager object(s) * of the target memory range. * However, the MAC check may be used to prevent a process from, * e.g., making the stack executable. */ error = mac_proc_check_mprotect(p, user_addr, user_size, prot); if (error) return (error); #endif result = mach_vm_protect(user_map, user_addr, user_size, FALSE, prot); switch (result) { case KERN_SUCCESS: return (0); case KERN_PROTECTION_FAILURE: return (EACCES); case KERN_INVALID_ADDRESS: /* UNIX SPEC: for an invalid address range, return ENOMEM */ return ENOMEM; } return (EINVAL); }
int mprotect(__unused proc_t p, struct mprotect_args *uap, __unused int32_t *retval) { vm_prot_t prot; mach_vm_offset_t user_addr; mach_vm_size_t user_size; kern_return_t result; vm_map_t user_map; #if CONFIG_MACF int error; #endif AUDIT_ARG(addr, uap->addr); AUDIT_ARG(len, uap->len); AUDIT_ARG(value32, uap->prot); user_map = current_map(); user_addr = (mach_vm_offset_t) uap->addr; user_size = (mach_vm_size_t) uap->len; prot = (vm_prot_t)(uap->prot & (VM_PROT_ALL | VM_PROT_TRUSTED | VM_PROT_STRIP_READ)); if (user_addr & vm_map_page_mask(user_map)) { /* UNIX SPEC: user address is not page-aligned, return EINVAL */ return EINVAL; } #ifdef notyet /* Hmm .. */ #if defined(VM_PROT_READ_IS_EXEC) if (prot & VM_PROT_READ) prot |= VM_PROT_EXECUTE; #endif #endif /* notyet */ #if 3936456 if (prot & (VM_PROT_EXECUTE | VM_PROT_WRITE)) prot |= VM_PROT_READ; #endif /* 3936456 */ #if defined(__arm64__) if (prot & VM_PROT_STRIP_READ) prot &= ~(VM_PROT_READ | VM_PROT_STRIP_READ); #endif #if CONFIG_MACF /* * The MAC check for mprotect is of limited use for 2 reasons: * Without mmap revocation, the caller could have asked for the max * protections initially instead of a reduced set, so a mprotect * check would offer no new security. * It is not possible to extract the vnode from the pager object(s) * of the target memory range. * However, the MAC check may be used to prevent a process from, * e.g., making the stack executable. */ error = mac_proc_check_mprotect(p, user_addr, user_size, prot); if (error) return (error); #endif if(prot & VM_PROT_TRUSTED) { #if CONFIG_DYNAMIC_CODE_SIGNING /* CODE SIGNING ENFORCEMENT - JIT support */ /* The special protection value VM_PROT_TRUSTED requests that we treat * this page as if it had a valid code signature. * If this is enabled, there MUST be a MAC policy implementing the * mac_proc_check_mprotect() hook above. Otherwise, Codesigning will be * compromised because the check would always succeed and thusly any * process could sign dynamically. */ result = vm_map_sign( user_map, vm_map_trunc_page(user_addr, vm_map_page_mask(user_map)), vm_map_round_page(user_addr+user_size, vm_map_page_mask(user_map))); switch (result) { case KERN_SUCCESS: break; case KERN_INVALID_ADDRESS: /* UNIX SPEC: for an invalid address range, return ENOMEM */ return ENOMEM; default: return EINVAL; } #else return ENOTSUP; #endif } prot &= ~VM_PROT_TRUSTED; result = mach_vm_protect(user_map, user_addr, user_size, FALSE, prot); switch (result) { case KERN_SUCCESS: return (0); case KERN_PROTECTION_FAILURE: return (EACCES); case KERN_INVALID_ADDRESS: /* UNIX SPEC: for an invalid address range, return ENOMEM */ return ENOMEM; } return (EINVAL); }