unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long n) { #ifdef CONFIG_X86_INTEL_USERCOPY if (n > 64 && cpu_has_xmm2) n = __copy_user_intel_nocache(to, from, n); else __copy_user(to, from, n); #else __copy_user(to, from, n); #endif return n; }
unsigned long __generic_copy_to_user(void *to, const void *from, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) __copy_user(to,from,n); return n; }
unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n) { BUG_ON((long) n < 0); #ifndef CONFIG_X86_WP_WORKS_OK if (unlikely(boot_cpu_data.wp_works_ok == 0) && ((unsigned long )to) < TASK_SIZE) { /* * CPU does not honor the WP bit when writing * from supervisory mode, and due to preemption or SMP, * the page tables can change at any time. * Do it manually. Manfred <*****@*****.**> */ while (n) { unsigned long offset = ((unsigned long)to)%PAGE_SIZE; unsigned long len = PAGE_SIZE - offset; int retval; struct page *pg; void *maddr; if (len > n) len = n; survive: down_read(¤t->mm->mmap_sem); retval = get_user_pages(current, current->mm, (unsigned long )to, 1, 1, 0, &pg, NULL); if (retval == -ENOMEM && current->pid == 1) { up_read(¤t->mm->mmap_sem); blk_congestion_wait(WRITE, HZ/50); goto survive; } if (retval != 1) { up_read(¤t->mm->mmap_sem); break; } maddr = kmap_atomic(pg, KM_USER0); memcpy(maddr + offset, from, len); kunmap_atomic(maddr, KM_USER0); set_page_dirty_lock(pg); put_page(pg); up_read(¤t->mm->mmap_sem); from += len; to += len; n -= len; } return n; } #endif if (movsl_is_ok(to, from, n)) __copy_user(to, from, n); else n = __copy_user_intel(to, from, n); return n; }
unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned long n) { #ifndef CONFIG_X86_WP_WORKS_OK if (unlikely(boot_cpu_data.wp_works_ok == 0) && ((unsigned long)to) < TASK_SIZE) { if (in_atomic()) return n; while (n) { unsigned long offset = ((unsigned long)to)%PAGE_SIZE; unsigned long len = PAGE_SIZE - offset; int retval; struct page *pg; void *maddr; if (len > n) len = n; survive: down_read(¤t->mm->mmap_sem); retval = get_user_pages(current, current->mm, (unsigned long)to, 1, 1, 0, &pg, NULL); if (retval == -ENOMEM && is_global_init(current)) { up_read(¤t->mm->mmap_sem); congestion_wait(BLK_RW_ASYNC, HZ/50); goto survive; } if (retval != 1) { up_read(¤t->mm->mmap_sem); break; } maddr = kmap_atomic(pg, KM_USER0); memcpy(maddr + offset, from, len); kunmap_atomic(maddr, KM_USER0); set_page_dirty_lock(pg); put_page(pg); up_read(¤t->mm->mmap_sem); from += len; to += len; n -= len; } return n; } #endif if (movsl_is_ok(to, from, n)) __copy_user(to, from, n); else n = __copy_user_intel(to, from, n); return n; }
unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, unsigned long n) { if (movsl_is_ok(to, from, n)) __copy_user(to, from, n); else n = __copy_user_intel((void __user *)to, (const void *)from, n); return n; }