/* * best effort, GUP based copy_from_user() that is NMI-safe */ unsigned long copy_from_user_nmi(void *to, const void __user *from, unsigned long n) { unsigned long offset, addr = (unsigned long)from; unsigned long size, len = 0; struct page *page; void *map; int ret; if (__range_not_ok(from, n, TASK_SIZE)) return len; do { ret = __get_user_pages_fast(addr, 1, 0, &page); if (!ret) break; offset = addr & (PAGE_SIZE - 1); size = min(PAGE_SIZE - offset, n - len); map = kmap_atomic(page); memcpy(to, map+offset, size); kunmap_atomic(map); put_page(page); len += size; to += size; addr += size; } while (len < n); return len; }
/* * We rely on the nested NMI work to allow atomic faults from the NMI path; the * nested NMI paths are careful to preserve CR2. */ unsigned long copy_from_user_nmi(void *to, const void __user *from, unsigned long n) { unsigned long ret; if (__range_not_ok(from, n, TASK_SIZE)) return n; /* * Even though this function is typically called from NMI/IRQ context * disable pagefaults so that its behaviour is consistent even when * called form other contexts. */ pagefault_disable(); ret = __copy_from_user_inatomic(to, from, n); pagefault_enable(); return ret; }
static int dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size) { if (dtrace_here) { printk("copycheck: uaddr=%p kaddr=%p size=%d\n", (void *) uaddr, (void*) kaddr, (int) size); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) if (__range_not_ok(uaddr, size)) { #else if (!addr_valid(uaddr) || !addr_valid(uaddr + size)) { #endif //printk("uaddr=%p size=%d\n", uaddr, size); DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); cpu_core[cpu_get_id()].cpuc_dtrace_illval = uaddr; return (0); } return (1); } # endif void dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size, volatile uint16_t *flags) { if (dtrace_memcpy_with_error((void *) kaddr, (void *) uaddr, size) == 0) { DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); return; } } void dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size, volatile uint16_t *flags) { if (dtrace_memcpy_with_error((void *) uaddr, (void *) kaddr, size) == 0) { DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); return; } } void dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size, volatile uint16_t *flags) { if (dtrace_memcpy_with_error((void *) kaddr, (void *) uaddr, size) == 0) { DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); return; } } void dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size, volatile uint16_t *flags) { if (dtrace_memcpy_with_error((void *) kaddr, (void *) uaddr, size) == 0) { DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); return; } } uint8_t dtrace_fuword8(void *uaddr) { extern uint8_t dtrace_fuword8_nocheck(void *); if (!access_ok(VERIFY_READ, uaddr, 1)) { DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); printk("dtrace_fuword8: uaddr=%p CPU_DTRACE_BADADDR\n", uaddr); cpu_core[cpu_get_id()].cpuc_dtrace_illval = (uintptr_t)uaddr; return (0); } return (dtrace_fuword8_nocheck(uaddr)); } uint16_t dtrace_fuword16(void *uaddr) { extern uint16_t dtrace_fuword16_nocheck(void *); if (!access_ok(VERIFY_WRITE, uaddr, 2)) { printk("dtrace_fuword16: uaddr=%p CPU_DTRACE_BADADDR\n", uaddr); DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); cpu_core[cpu_get_id()].cpuc_dtrace_illval = (uintptr_t)uaddr; return (0); } return (dtrace_fuword16_nocheck(uaddr)); } uint32_t dtrace_fuword32(void *uaddr) { extern uint32_t dtrace_fuword32_nocheck(void *); if (!addr_valid(uaddr)) { HERE2(); DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); cpu_core[cpu_get_id()].cpuc_dtrace_illval = (uintptr_t)uaddr; return (0); } return (dtrace_fuword32_nocheck(uaddr)); } uint64_t dtrace_fuword64(void *uaddr) { extern uint64_t dtrace_fuword64_nocheck(void *); if (!addr_valid(uaddr)) { HERE2(); DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); cpu_core[cpu_get_id()].cpuc_dtrace_illval = (uintptr_t)uaddr; return (0); } return (dtrace_fuword64_nocheck(uaddr)); }