static void kasan_report_error(struct kasan_access_info *info) { unsigned long flags; const char *bug_type; kasan_start_report(&flags); if (info->access_addr < kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) { if ((unsigned long)info->access_addr < PAGE_SIZE) bug_type = "null-ptr-deref"; else if ((unsigned long)info->access_addr < TASK_SIZE) bug_type = "user-memory-access"; else bug_type = "wild-memory-access"; pr_err("BUG: KASAN: %s on address %p\n", bug_type, info->access_addr); pr_err("%s of size %zu by task %s/%d\n", info->is_write ? "Write" : "Read", info->access_size, current->comm, task_pid_nr(current)); dump_stack(); } else { print_error_description(info); print_address_description(info); print_shadow_for_address(info->first_bad_addr); } kasan_end_report(&flags); }
static void print_shadow_for_address(const void *addr) { int i; const void *shadow = kasan_mem_to_shadow(addr); const void *shadow_row; shadow_row = (void *)round_down((unsigned long)shadow, SHADOW_BYTES_PER_ROW) - SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW; pr_err("Memory state around the buggy address:\n"); for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) { const void *kaddr = kasan_shadow_to_mem(shadow_row); char buffer[4 + (BITS_PER_LONG/8)*2]; snprintf(buffer, sizeof(buffer), (i == 0) ? ">%p: " : " %p: ", kaddr); kasan_disable_current(); print_hex_dump(KERN_ERR, buffer, DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1, shadow_row, SHADOW_BYTES_PER_ROW, 0); kasan_enable_current(); if (row_is_guilty(shadow_row, shadow)) pr_err("%*c\n", shadow_pointer_offset(shadow_row, shadow), '^'); shadow_row += SHADOW_BYTES_PER_ROW; } }
void check_memory_region(unsigned long addr, size_t size, bool write, unsigned long ret_ip) { u8 tag; u8 *shadow_first, *shadow_last, *shadow; void *untagged_addr; if (unlikely(size == 0)) return; tag = get_tag((const void *)addr); /* * Ignore accesses for pointers tagged with 0xff (native kernel * pointer tag) to suppress false positives caused by kmap. * * Some kernel code was written to account for archs that don't keep * high memory mapped all the time, but rather map and unmap particular * pages when needed. Instead of storing a pointer to the kernel memory, * this code saves the address of the page structure and offset within * that page for later use. Those pages are then mapped and unmapped * with kmap/kunmap when necessary and virt_to_page is used to get the * virtual address of the page. For arm64 (that keeps the high memory * mapped all the time), kmap is turned into a page_address call. * The issue is that with use of the page_address + virt_to_page * sequence the top byte value of the original pointer gets lost (gets * set to KASAN_TAG_KERNEL (0xFF)). */ if (tag == KASAN_TAG_KERNEL) return; untagged_addr = reset_tag((const void *)addr); if (unlikely(untagged_addr < kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { kasan_report(addr, size, write, ret_ip); return; } shadow_first = kasan_mem_to_shadow(untagged_addr); shadow_last = kasan_mem_to_shadow(untagged_addr + size - 1); for (shadow = shadow_first; shadow <= shadow_last; shadow++) { if (*shadow != tag) { kasan_report(addr, size, write, ret_ip); return; } } }
static __always_inline void check_memory_region(unsigned long addr, size_t size, bool write) { if (unlikely(size == 0)) return; if (unlikely((void *)addr < kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) { kasan_report(addr, size, write, _RET_IP_); return; } if (likely(!memory_is_poisoned(addr, size))) return; kasan_report(addr, size, write, _RET_IP_); }
static void print_shadow_for_address(const void *addr) { int i; const void *shadow = kasan_mem_to_shadow(addr); const void *shadow_row; shadow_row = (void *)round_down((unsigned long)shadow, SHADOW_BYTES_PER_ROW) - SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW; pr_err("Memory state around the buggy address:\n"); for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) { const void *kaddr = kasan_shadow_to_mem(shadow_row); char buffer[4 + (BITS_PER_LONG/8)*2]; char shadow_buf[SHADOW_BYTES_PER_ROW]; snprintf(buffer, sizeof(buffer), (i == 0) ? ">%p: " : " %p: ", kaddr); /* * We should not pass a shadow pointer to generic * function, because generic functions may try to * access kasan mapping for the passed address. */ memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW); print_hex_dump(KERN_ERR, buffer, DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1, shadow_buf, SHADOW_BYTES_PER_ROW, 0); if (row_is_guilty(shadow_row, shadow)) pr_err("%*c\n", shadow_pointer_offset(shadow_row, shadow), '^'); shadow_row += SHADOW_BYTES_PER_ROW; } }
static void kasan_report_error(struct kasan_access_info *info) { unsigned long flags; const char *bug_type; /* * Make sure we don't end up in loop. */ kasan_disable_current(); spin_lock_irqsave(&report_lock, flags); pr_err("==================================================================\n"); if (info->access_addr < kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) { if ((unsigned long)info->access_addr < PAGE_SIZE) bug_type = "null-ptr-deref"; else if ((unsigned long)info->access_addr < TASK_SIZE) bug_type = "user-memory-access"; else bug_type = "wild-memory-access"; pr_err("BUG: KASAN: %s on address %p\n", bug_type, info->access_addr); pr_err("%s of size %zu by task %s/%d\n", info->is_write ? "Write" : "Read", info->access_size, current->comm, task_pid_nr(current)); dump_stack(); } else { print_error_description(info); print_address_description(info); print_shadow_for_address(info->first_bad_addr); } pr_err("==================================================================\n"); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); spin_unlock_irqrestore(&report_lock, flags); kasan_enable_current(); }
static bool addr_has_shadow(struct kasan_access_info *info) { return (info->access_addr >= kasan_shadow_to_mem((void *)KASAN_SHADOW_START)); }