static int mpx_cmpxchg_bd_entry(struct mm_struct *mm, unsigned long *curval, unsigned long __user *addr, unsigned long old_val, unsigned long new_val) { int ret; /* * user_atomic_cmpxchg_inatomic() actually uses sizeof() * the pointer that we pass to it to figure out how much * data to cmpxchg. We have to be careful here not to * pass a pointer to a 64-bit data type when we only want * a 32-bit copy. */ if (is_64bit_mm(mm)) { ret = user_atomic_cmpxchg_inatomic(curval, addr, old_val, new_val); } else { u32 uninitialized_var(curval_32); u32 old_val_32 = old_val; u32 new_val_32 = new_val; u32 __user *addr_32 = (u32 __user *)addr; ret = user_atomic_cmpxchg_inatomic(&curval_32, addr_32, old_val_32, new_val_32); *curval = curval_32; } return ret; }
static int unmap_single_bt(struct mm_struct *mm, long __user *bd_entry, unsigned long bt_addr) { unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG; unsigned long actual_old_val = 0; int ret; while (1) { int need_write = 1; pagefault_disable(); ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry, expected_old_val, 0); pagefault_enable(); if (!ret) break; if (ret == -EFAULT) ret = mpx_resolve_fault(bd_entry, need_write); /* * If we could not resolve the fault, consider it * userspace's fault and error out. */ if (ret) return ret; } /* * The cmpxchg was performed, check the results. */ if (actual_old_val != expected_old_val) { /* * Someone else raced with us to unmap the table. * There was no bounds table pointed to by the * directory, so declare success. Somebody freed * it. */ if (!actual_old_val) return 0; /* * Something messed with the bounds directory * entry. We hold mmap_sem for read or write * here, so it could not be a _new_ bounds table * that someone just allocated. Something is * wrong, so pass up the error and SIGSEGV. */ return -EINVAL; } /* * Note, we are likely being called under do_munmap() already. To * avoid recursion, do_munmap() will check whether it comes * from one bounds table through VM_MPX flag. */ return do_munmap(mm, bt_addr, MPX_BT_SIZE_BYTES); }
/* * With 32-bit mode, MPX_BT_SIZE_BYTES is 4MB, and the size of each * bounds table is 16KB. With 64-bit mode, MPX_BT_SIZE_BYTES is 2GB, * and the size of each bounds table is 4MB. */ static int allocate_bt(long __user *bd_entry) { unsigned long expected_old_val = 0; unsigned long actual_old_val = 0; unsigned long bt_addr; int ret = 0; /* * Carve the virtual space out of userspace for the new * bounds table: */ bt_addr = mpx_mmap(MPX_BT_SIZE_BYTES); if (IS_ERR((void *)bt_addr)) return PTR_ERR((void *)bt_addr); /* * Set the valid flag (kinda like _PAGE_PRESENT in a pte) */ bt_addr = bt_addr | MPX_BD_ENTRY_VALID_FLAG; /* * Go poke the address of the new bounds table in to the * bounds directory entry out in userspace memory. Note: * we may race with another CPU instantiating the same table. * In that case the cmpxchg will see an unexpected * 'actual_old_val'. * * This can fault, but that's OK because we do not hold * mmap_sem at this point, unlike some of the other part * of the MPX code that have to pagefault_disable(). */ ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry, expected_old_val, bt_addr); if (ret) goto out_unmap; /* * The user_atomic_cmpxchg_inatomic() will only return nonzero * for faults, *not* if the cmpxchg itself fails. Now we must * verify that the cmpxchg itself completed successfully. */ /* * We expected an empty 'expected_old_val', but instead found * an apparently valid entry. Assume we raced with another * thread to instantiate this table and desclare succecss. */ if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) { ret = 0; goto out_unmap; } /* * We found a non-empty bd_entry but it did not have the * VALID_FLAG set. Return an error which will result in * a SEGV since this probably means that somebody scribbled * some invalid data in to a bounds table. */ if (expected_old_val != actual_old_val) { ret = -EINVAL; goto out_unmap; } return 0; out_unmap: vm_munmap(bt_addr & MPX_BT_ADDR_MASK, MPX_BT_SIZE_BYTES); return ret; }