unsigned long __clear_user(void __user *addr, unsigned long n) { /* See rational for this in __copy_to_user() above. */ if (n < 64) return __clear_user_std(addr, n); return __clear_user_memset(addr, n); }
unsigned long arm_clear_user(void __user *addr, unsigned long n) { /* See rational for this in __copy_to_user() above. */ if (n < 64) { unsigned long ua_flags = uaccess_save_and_enable(); n = __clear_user_std(addr, n); uaccess_restore(ua_flags); } else { n = __clear_user_memset(addr, n); } return n; }
static int __init test_size_treshold(void) { struct page *src_page, *dst_page; void *user_ptr, *kernel_ptr; unsigned long long t0, t1, t2; int size, ret; ret = -ENOMEM; src_page = alloc_page(GFP_KERNEL); if (!src_page) goto no_src; dst_page = alloc_page(GFP_KERNEL); if (!dst_page) goto no_dst; kernel_ptr = page_address(src_page); user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010)); if (!user_ptr) goto no_vmap; /* warm up the src page dcache */ ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE); for (size = PAGE_SIZE; size >= 4; size /= 2) { t0 = sched_clock(); ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size); t1 = sched_clock(); ret |= __copy_to_user_std(user_ptr, kernel_ptr, size); t2 = sched_clock(); printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1); } for (size = PAGE_SIZE; size >= 4; size /= 2) { t0 = sched_clock(); ret |= __clear_user_memset(user_ptr, size); t1 = sched_clock(); ret |= __clear_user_std(user_ptr, size); t2 = sched_clock(); printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1); } if (ret) ret = -EFAULT; vunmap(user_ptr); no_vmap: put_page(dst_page); no_dst: put_page(src_page); no_src: return ret; }