void lkdtm_ACCESS_USERSPACE(void) { unsigned long user_addr, tmp = 0; unsigned long *ptr; user_addr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); if (user_addr >= TASK_SIZE) { pr_warn("Failed to allocate user memory\n"); return; } if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) { pr_warn("copy_to_user failed\n"); vm_munmap(user_addr, PAGE_SIZE); return; } ptr = (unsigned long *)user_addr; pr_info("attempting bad read at %p\n", ptr); tmp = *ptr; tmp += 0xc0dec0de; pr_info("attempting bad write at %p\n", ptr); *ptr = tmp; vm_munmap(user_addr, PAGE_SIZE); }
void lkdtm_EXEC_USERSPACE(void) { unsigned long user_addr; user_addr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); if (user_addr >= TASK_SIZE) { pr_warn("Failed to allocate user memory\n"); return; } execute_user_location((void *)user_addr); vm_munmap(user_addr, PAGE_SIZE); }
static noinline void __init copy_user_test(void) { char *kmem; char __user *usermem; size_t size = 10; int unused; kmem = kmalloc(size, GFP_KERNEL); if (!kmem) return; usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); if (IS_ERR(usermem)) { pr_err("Failed to allocate user memory\n"); kfree(kmem); return; } pr_info("out-of-bounds in copy_from_user()\n"); unused = copy_from_user(kmem, usermem, size + 1); pr_info("out-of-bounds in copy_to_user()\n"); unused = copy_to_user(usermem, kmem, size + 1); pr_info("out-of-bounds in __copy_from_user()\n"); unused = __copy_from_user(kmem, usermem, size + 1); pr_info("out-of-bounds in __copy_to_user()\n"); unused = __copy_to_user(usermem, kmem, size + 1); pr_info("out-of-bounds in __copy_from_user_inatomic()\n"); unused = __copy_from_user_inatomic(kmem, usermem, size + 1); pr_info("out-of-bounds in __copy_to_user_inatomic()\n"); unused = __copy_to_user_inatomic(usermem, kmem, size + 1); pr_info("out-of-bounds in strncpy_from_user()\n"); unused = strncpy_from_user(kmem, usermem, size + 1); vm_munmap((unsigned long)usermem, PAGE_SIZE); kfree(kmem); }
/* * With 32-bit mode, a bounds directory is 4MB, and the size of each * bounds table is 16KB. With 64-bit mode, a bounds directory is 2GB, * and the size of each bounds table is 4MB. */ static int allocate_bt(struct mm_struct *mm, long __user *bd_entry) { unsigned long expected_old_val = 0; unsigned long actual_old_val = 0; unsigned long bt_addr; unsigned long bd_new_entry; int ret = 0; /* * Carve the virtual space out of userspace for the new * bounds table: */ bt_addr = mpx_mmap(mpx_bt_size_bytes(mm)); if (IS_ERR((void *)bt_addr)) return PTR_ERR((void *)bt_addr); /* * Set the valid flag (kinda like _PAGE_PRESENT in a pte) */ bd_new_entry = bt_addr | MPX_BD_ENTRY_VALID_FLAG; /* * Go poke the address of the new bounds table in to the * bounds directory entry out in userspace memory. Note: * we may race with another CPU instantiating the same table. * In that case the cmpxchg will see an unexpected * 'actual_old_val'. * * This can fault, but that's OK because we do not hold * mmap_sem at this point, unlike some of the other part * of the MPX code that have to pagefault_disable(). */ ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val, bd_entry, expected_old_val, bd_new_entry); if (ret) goto out_unmap; /* * The user_atomic_cmpxchg_inatomic() will only return nonzero * for faults, *not* if the cmpxchg itself fails. Now we must * verify that the cmpxchg itself completed successfully. */ /* * We expected an empty 'expected_old_val', but instead found * an apparently valid entry. Assume we raced with another * thread to instantiate this table and desclare succecss. */ if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) { ret = 0; goto out_unmap; } /* * We found a non-empty bd_entry but it did not have the * VALID_FLAG set. Return an error which will result in * a SEGV since this probably means that somebody scribbled * some invalid data in to a bounds table. */ if (expected_old_val != actual_old_val) { ret = -EINVAL; goto out_unmap; } trace_mpx_new_bounds_table(bt_addr); return 0; out_unmap: vm_munmap(bt_addr, mpx_bt_size_bytes(mm)); return ret; }
static void lkdtm_do_action(enum ctype which) { switch (which) { case CT_PANIC: panic("dumptest"); break; case CT_BUG: BUG(); break; case CT_WARNING: WARN_ON(1); break; case CT_EXCEPTION: *((int *) 0) = 0; break; case CT_LOOP: for (;;) ; break; case CT_OVERFLOW: (void) recursive_loop(recur_count); break; case CT_CORRUPT_STACK: corrupt_stack(); break; case CT_UNALIGNED_LOAD_STORE_WRITE: { static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5}; u32 *p; u32 val = 0x12345678; p = (u32 *)(data + 1); if (*p == 0) val = 0x87654321; *p = val; break; } case CT_OVERWRITE_ALLOCATION: { size_t len = 1020; u32 *data = kmalloc(len, GFP_KERNEL); data[1024 / sizeof(u32)] = 0x12345678; kfree(data); break; } case CT_WRITE_AFTER_FREE: { size_t len = 1024; u32 *data = kmalloc(len, GFP_KERNEL); kfree(data); schedule(); memset(data, 0x78, len); break; } case CT_SOFTLOCKUP: preempt_disable(); for (;;) cpu_relax(); break; case CT_HARDLOCKUP: local_irq_disable(); for (;;) cpu_relax(); break; case CT_SPINLOCKUP: /* Must be called twice to trigger. */ spin_lock(&lock_me_up); /* Let sparse know we intended to exit holding the lock. */ __release(&lock_me_up); break; case CT_HUNG_TASK: set_current_state(TASK_UNINTERRUPTIBLE); schedule(); break; case CT_EXEC_DATA: execute_location(data_area); break; case CT_EXEC_STACK: { u8 stack_area[EXEC_SIZE]; execute_location(stack_area); break; } case CT_EXEC_KMALLOC: { u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL); execute_location(kmalloc_area); kfree(kmalloc_area); break; } case CT_EXEC_VMALLOC: { u32 *vmalloc_area = vmalloc(EXEC_SIZE); execute_location(vmalloc_area); vfree(vmalloc_area); break; } case CT_EXEC_USERSPACE: { unsigned long user_addr; user_addr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); if (user_addr >= TASK_SIZE) { pr_warn("Failed to allocate user memory\n"); return; } execute_user_location((void *)user_addr); vm_munmap(user_addr, PAGE_SIZE); break; } case CT_ACCESS_USERSPACE: { unsigned long user_addr, tmp; unsigned long *ptr; user_addr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); if (user_addr >= TASK_SIZE) { pr_warn("Failed to allocate user memory\n"); return; } ptr = (unsigned long *)user_addr; pr_info("attempting bad read at %p\n", ptr); tmp = *ptr; tmp += 0xc0dec0de; pr_info("attempting bad write at %p\n", ptr); *ptr = tmp; vm_munmap(user_addr, PAGE_SIZE); break; } case CT_WRITE_RO: { unsigned long *ptr; ptr = (unsigned long *)&rodata; pr_info("attempting bad write at %p\n", ptr); *ptr ^= 0xabcd1234; break; } case CT_WRITE_KERN: { size_t size; unsigned char *ptr; size = (unsigned long)do_overwritten - (unsigned long)do_nothing; ptr = (unsigned char *)do_overwritten; pr_info("attempting bad %zu byte write at %p\n", size, ptr); memcpy(ptr, (unsigned char *)do_nothing, size); flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size)); do_overwritten(); break; } case CT_NONE: default: break; } }
int munmap(void *addr, size_t len){ int ret; blocking_notifier_call_chain(&munmap_notifier, 0, addr); ret = vm_munmap((unsigned long)addr, len); return ret; }
static int __init test_user_copy_init(void) { int ret = 0; char *kmem; char __user *usermem; char *bad_usermem; unsigned long user_addr; unsigned long value = 0x5A; kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); if (!kmem) return -ENOMEM; user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); if (user_addr >= (unsigned long)(TASK_SIZE)) { pr_warn("Failed to allocate user memory\n"); kfree(kmem); return -ENOMEM; } usermem = (char __user *)user_addr; bad_usermem = (char *)user_addr; /* Legitimate usage: none of these should fail. */ ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), "legitimate copy_from_user failed"); ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), "legitimate copy_to_user failed"); ret |= test(get_user(value, (unsigned long __user *)usermem), "legitimate get_user failed"); ret |= test(put_user(value, (unsigned long __user *)usermem), "legitimate put_user failed"); /* Invalid usage: none of these should succeed. */ ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), PAGE_SIZE), "illegal all-kernel copy_from_user passed"); ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, PAGE_SIZE), "illegal reversed copy_from_user passed"); ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, PAGE_SIZE), "illegal all-kernel copy_to_user passed"); ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, PAGE_SIZE), "illegal reversed copy_to_user passed"); ret |= test(!get_user(value, (unsigned long __user *)kmem), "illegal get_user passed"); ret |= test(!put_user(value, (unsigned long __user *)kmem), "illegal put_user passed"); vm_munmap(user_addr, PAGE_SIZE * 2); kfree(kmem); if (ret == 0) { pr_info("tests passed.\n"); return 0; } return -EINVAL; }
static int __init test_user_copy_init(void) { int ret = 0; char *kmem; char __user *usermem; char *bad_usermem; unsigned long user_addr; u8 val_u8; u16 val_u16; u32 val_u32; #ifdef TEST_U64 u64 val_u64; #endif kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); if (!kmem) return -ENOMEM; user_addr = vm_mmap(NULL, 0, PAGE_SIZE * 2, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); if (user_addr >= (unsigned long)(TASK_SIZE)) { pr_warn("Failed to allocate user memory\n"); kfree(kmem); return -ENOMEM; } usermem = (char __user *)user_addr; bad_usermem = (char *)user_addr; /* * Legitimate usage: none of these copies should fail. */ memset(kmem, 0x3a, PAGE_SIZE * 2); ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), "legitimate copy_to_user failed"); memset(kmem, 0x0, PAGE_SIZE); ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), "legitimate copy_from_user failed"); ret |= test(memcmp(kmem, kmem + PAGE_SIZE, PAGE_SIZE), "legitimate usercopy failed to copy data"); #define test_legit(size, check) \ do { \ val_##size = check; \ ret |= test(put_user(val_##size, (size __user *)usermem), \ "legitimate put_user (" #size ") failed"); \ val_##size = 0; \ ret |= test(get_user(val_##size, (size __user *)usermem), \ "legitimate get_user (" #size ") failed"); \ ret |= test(val_##size != check, \ "legitimate get_user (" #size ") failed to do copy"); \ if (val_##size != check) { \ pr_info("0x%llx != 0x%llx\n", \ (unsigned long long)val_##size, \ (unsigned long long)check); \ } \ } while (0) test_legit(u8, 0x5a); test_legit(u16, 0x5a5b); test_legit(u32, 0x5a5b5c5d); #ifdef TEST_U64 test_legit(u64, 0x5a5b5c5d6a6b6c6d); #endif #undef test_legit /* * Invalid usage: none of these copies should succeed. */ /* Prepare kernel memory with check values. */ memset(kmem, 0x5a, PAGE_SIZE); memset(kmem + PAGE_SIZE, 0, PAGE_SIZE); /* Reject kernel-to-kernel copies through copy_from_user(). */ ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), PAGE_SIZE), "illegal all-kernel copy_from_user passed"); /* Destination half of buffer should have been zeroed. */ ret |= test(memcmp(kmem + PAGE_SIZE, kmem, PAGE_SIZE), "zeroing failure for illegal all-kernel copy_from_user"); #if 0 /* * When running with SMAP/PAN/etc, this will Oops the kernel * due to the zeroing of userspace memory on failure. This needs * to be tested in LKDTM instead, since this test module does not * expect to explode. */ ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, PAGE_SIZE), "illegal reversed copy_from_user passed"); #endif ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, PAGE_SIZE), "illegal all-kernel copy_to_user passed"); ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, PAGE_SIZE), "illegal reversed copy_to_user passed"); #define test_illegal(size, check) \ do { \ val_##size = (check); \ ret |= test(!get_user(val_##size, (size __user *)kmem), \ "illegal get_user (" #size ") passed"); \ ret |= test(val_##size != (size)0, \ "zeroing failure for illegal get_user (" #size ")"); \ if (val_##size != (size)0) { \ pr_info("0x%llx != 0\n", \ (unsigned long long)val_##size); \ } \ ret |= test(!put_user(val_##size, (size __user *)kmem), \ "illegal put_user (" #size ") passed"); \ } while (0) test_illegal(u8, 0x5a); test_illegal(u16, 0x5a5b); test_illegal(u32, 0x5a5b5c5d); #ifdef TEST_U64 test_illegal(u64, 0x5a5b5c5d6a6b6c6d); #endif #undef test_illegal vm_munmap(user_addr, PAGE_SIZE * 2); kfree(kmem); if (ret == 0) { pr_info("tests passed.\n"); return 0; } return -EINVAL; }