static void execute_location(void *dst) { void (*func)(void) = dst; pr_info("attempting ok execution at %p\n", do_nothing); do_nothing(); memcpy(dst, do_nothing, EXEC_SIZE); flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); pr_info("attempting bad execution at %p\n", func); func(); }
/* * We could optimize the case where the cache argument is not BCACHE but * that seems very atypical use ... */ SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, unsigned int, cache) { if (bytes == 0) return 0; if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes)) return -EFAULT; flush_icache_range(addr, addr + bytes); return 0; }
/* * Some architectures need cache flushes when we set/clear a * breakpoint: */ static void kgdb_flush_swbreak_addr(unsigned long addr) { if (!CACHE_FLUSH_IS_SAFE) return; if (current->mm && current->mm->mmap_cache) { flush_cache_range(current->mm->mmap_cache, addr, addr + BREAK_INSTR_SIZE); } /* Force flush instruction cache if it was outside the mm */ flush_icache_range(addr, addr + BREAK_INSTR_SIZE); }
/* * We could optimize the case where the cache argument is not BCACHE but * that seems very atypical use ... */ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long bytes, unsigned int cache) { if (bytes == 0) return 0; if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes)) return -EFAULT; flush_icache_range(addr, addr + bytes); return 0; }
void arch_jump_label_transform(struct jump_entry *e, enum jump_label_type type) { get_online_cpus(); mutex_lock(&text_mutex); __jump_label_transform(e, type); flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits)); mutex_unlock(&text_mutex); put_online_cpus(); }
static void setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs *regs) { struct sigframe *frame; unsigned long retcode; int err = 0; frame = get_sigframe(ka, regs, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame))) goto segv_and_exit; err |= setup_sigcontext(&frame->sc, /*&frame->fpstate,*/ regs, set->sig[0]); if (_NSIG_WORDS > 1) { err |= __copy_to_user(frame->extramask, &set->sig[1], sizeof(frame->extramask)); } /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { retcode = (unsigned long)ka->sa.sa_restorer; } else { retcode = (unsigned long)&frame->retcode; err |= __put_user(SWI_SYS_SIGRETURN, &frame->retcode); flush_icache_range(retcode, retcode + 4); } if (err) goto segv_and_exit; if (current->exec_domain && current->exec_domain->signal_invmap && sig < 32) regs->ARM_r0 = current->exec_domain->signal_invmap[sig]; else regs->ARM_r0 = sig; regs->ARM_sp = (unsigned long)frame; regs->ARM_lr = retcode; regs->ARM_pc = (unsigned long)ka->sa.sa_handler; #if defined(CONFIG_CPU_32) /* Maybe we need to deliver a 32-bit signal to a 26-bit task. */ if (ka->sa.sa_flags & SA_THIRTYTWO) regs->ARM_cpsr = USR_MODE; #endif if (valid_user_regs(regs)) return; segv_and_exit: if (sig == SIGSEGV) ka->sa.sa_handler = SIG_DFL; force_sig(SIGSEGV, current); }
void machine_kexec(struct kimage *image) { unsigned long page_list; unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; arch_kexec(); page_list = image->head & PAGE_MASK; /* we need both effective and real address here */ reboot_code_buffer_phys = page_to_pfn(image->control_code_page) << PAGE_SHIFT; reboot_code_buffer = page_address(image->control_code_page); /* Prepare parameters for reboot_code_buffer*/ mem_text_write_kernel_word(&kexec_start_address, image->start); mem_text_write_kernel_word(&kexec_indirection_page, page_list); mem_text_write_kernel_word(&kexec_mach_type, machine_arch_type); mem_text_write_kernel_word(&kexec_boot_atags, image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET); #ifdef CONFIG_KEXEC_HARDBOOT mem_text_write_kernel_word(&kexec_hardboot, image->hardboot); #endif /* copy our kernel relocation code to the control code page */ memcpy(reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); flush_icache_range((unsigned long) reboot_code_buffer, (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); if (kexec_reinit) kexec_reinit(); local_irq_disable(); local_fiq_disable(); setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ #ifdef CONFIG_KEXEC_HARDBOOT if (image->hardboot && kexec_hardboot_hook) /* Run any final machine-specific shutdown code. */ kexec_hardboot_hook(); #endif flush_cache_all(); outer_flush_all(); outer_disable(); cpu_proc_fin(); outer_inv_all(); flush_cache_all(); __virt_to_phys(cpu_reset)(reboot_code_buffer_phys); }
void machine_kexec(struct kimage *image) { unsigned long page_list; unsigned long reboot_code_buffer_phys; void *reboot_code_buffer; arch_kexec(); page_list = image->head & PAGE_MASK; /* we need both effective and real address here */ reboot_code_buffer_phys = page_to_pfn(image->control_code_page) << PAGE_SHIFT; reboot_code_buffer = page_address(image->control_code_page); /* Prepare parameters for reboot_code_buffer*/ #ifdef CONFIG_KEXEC_HARDBOOT mem_text_write_kernel_word(&kexec_start_address, image->start); mem_text_write_kernel_word(&kexec_indirection_page, page_list); mem_text_write_kernel_word(&kexec_mach_type, machine_arch_type); if (!kexec_boot_atags) mem_text_write_kernel_word(&kexec_boot_atags, image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET); mem_text_write_kernel_word(&kexec_hardboot, image->hardboot); #else kexec_start_address = image->start; kexec_indirection_page = page_list; kexec_mach_type = machine_arch_type; kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; #endif /* copy our kernel relocation code to the control code page */ memcpy(reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); flush_icache_range((unsigned long) reboot_code_buffer, (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); if (kexec_reinit) kexec_reinit(); #ifdef CONFIG_KEXEC_HARDBOOT /* Run any final machine-specific shutdown code. */ if (image->hardboot && kexec_hardboot_hook) kexec_hardboot_hook(); #endif soft_restart(reboot_code_buffer_phys); }
static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one) { u32 *p; int distance_start; int distance_end; ulong next_inst; p = kvm_alloc(kvm_emulate_wrtee_len * 4); if (!p) return; /* Find out where we are and put everything there */ distance_start = (ulong)p - (ulong)inst; next_inst = ((ulong)inst + 4); distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs]; /* Make sure we only write valid b instructions */ if (distance_start > KVM_INST_B_MAX) { kvm_patching_worked = false; return; } /* Modify the chunk to fit the invocation */ memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4); p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK; if (imm_one) { p[kvm_emulate_wrtee_reg_offs] = KVM_INST_LI | __PPC_RT(R30) | MSR_EE; } else { /* Make clobbered registers work too */ switch (get_rt(rt)) { case 30: kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs], magic_var(scratch2), KVM_RT_30); break; case 31: kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs], magic_var(scratch1), KVM_RT_30); break; default: p[kvm_emulate_wrtee_reg_offs] |= rt; break; } } p[kvm_emulate_wrtee_orig_ins_offs] = *inst; flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4); /* Patch the invocation */ kvm_patch_ins_b(inst, distance_start); }
static void copy_page_dma(void *to, void *from) { /* * This doesn't seem to get triggered until further along in the * boot process, at which point the DMAC is already initialized. * Fix this in the same fashion as clear_page_dma() in the event * that this crashes due to the DMAC not being initialized. */ flush_icache_range((unsigned long)from, PAGE_SIZE); dma_write_page(dma_channel, (unsigned long)from, (unsigned long)to); dma_wait_for_completion(dma_channel); }
//#define FLUSH_FOR_REAL asmlinkage int sys_cacheflush(unsigned long addr, unsigned long bytes, unsigned int cache) { if (bytes == 0) return 0; if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes)) return -EFAULT; #ifdef FLUSH_FOR_REAL if (cache & ICACHE) flush_icache_range(addr, addr + bytes); if (cache & DCACHE) { unsigned long start_addr; for (start_addr = addr; start_addr < (addr + bytes); start_addr += PAGE_SIZE) flush_data_cache_page(start_addr); } #else flush_icache_range(addr, addr + bytes); #endif return 0; }
static unsigned long load_aout_interp(struct exec * interp_ex, int interpreter_fd) { unsigned long text_data, offset, elf_entry = ~0UL; char * addr; int retval; printf("WARNING: load_aout_interp() has not been tested at all!\n"); current->end_code = interp_ex->a_text; text_data = interp_ex->a_text + interp_ex->a_data; current->end_data = text_data; current->brk = interp_ex->a_bss + text_data; switch (N_MAGIC(*interp_ex)) { case OMAGIC: offset = 32; addr = (char *) 0; break; case ZMAGIC: case QMAGIC: offset = N_TXTOFF(*interp_ex); addr = (char *) N_TXTADDR(*interp_ex); break; default: goto out; } if ((unsigned long)addr + text_data < text_data) goto out; do_mmap(-1, 0, text_data, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, 0); retval = read_exec(interpreter_fd, offset, addr, text_data, 0); if (retval < 0) goto out; #if 0 flush_icache_range((unsigned long)addr, (unsigned long)addr + text_data); #endif do_mmap(-1, ELF_PAGESTART(text_data + ELF_EXEC_PAGESIZE - 1), interp_ex->a_bss, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, 0); elf_entry = interp_ex->a_entry; out: return elf_entry; }
void ia64_env_setup(struct ia64_boot_param *boot_param, struct kexec_boot_params *params) { unsigned long len; efi_system_table_t *systab; efi_runtime_services_t *runtime; unsigned long *set_virtual_address_map; char *command_line = (char *)params->command_line; uint64_t command_line_len = params->command_line_len; struct ia64_boot_param *new_boot_param = (struct ia64_boot_param *) params->boot_param_base; memcpy(new_boot_param, boot_param, 4096); /* * patch efi_runtime->set_virtual_address_map to a dummy function * * The EFI specification mandates that set_virtual_address_map only * takes effect the first time that it is called, and that * subsequent calls will return error. By replacing it with a * dummy function the new OS can think it is calling it again * without either the OS or any buggy EFI implementations getting * upset. * * Note: as the EFI specification says that set_virtual_address_map * will only take affect the first time it is called, the mapping * can't be updated, and thus mapping of the old and new OS really * needs to be the same. */ len = __dummy_efi_function_end - __dummy_efi_function; memcpy(command_line + command_line_len, __dummy_efi_function, len); systab = (efi_system_table_t *)new_boot_param->efi_systab; runtime = (efi_runtime_services_t *)PA(systab->runtime); set_virtual_address_map = (unsigned long *)PA(runtime->set_virtual_address_map); *(set_virtual_address_map) = (unsigned long)(command_line + command_line_len); flush_icache_range(command_line + command_line_len, len); patch_efi_memmap(params, new_boot_param); new_boot_param->efi_memmap = params->efi_memmap_base; new_boot_param->command_line = params->command_line; new_boot_param->console_info.orig_x = 0; new_boot_param->console_info.orig_y = 0; new_boot_param->initrd_start = params->ramdisk_base; new_boot_param->initrd_size = params->ramdisk_size; new_boot_param->vmcode_start = params->vmcode_base; new_boot_param->vmcode_size = params->vmcode_size; }
static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len) { if (vma->vm_flags & VM_EXEC) { unsigned long addr = (unsigned long)kaddr; if (icache_is_aliasing()) { __flush_dcache_area(kaddr, len); __flush_icache_all(); } else { flush_icache_range(addr, addr + len); } } }
static void undo_single_step(struct pt_regs *regs) { unsigned long addr_wr; if (stepped_instr == 0) return; addr_wr = writable_address(stepped_addr); probe_kernel_write((char *)addr_wr, (char *)&stepped_instr, BREAK_INSTR_SIZE); stepped_instr = 0; smp_wmb(); flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE); }
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) { unsigned long dc; pr_debug("resuming execution at PC=%08lx\n", regs->pc); dc = __mfdr(DBGREG_DC); dc &= ~DC_SS; __mtdr(DBGREG_DC, dc); *p->addr = BREAKPOINT_INSTRUCTION; flush_icache_range((unsigned long)p->addr, (unsigned long)p->addr + sizeof(kprobe_opcode_t)); }
static int g2_xfer_dma(struct dma_channel *chan) { unsigned int chan_nr = chan->chan; if (chan->sar & 31) { printk("g2dma: unaligned source 0x%lx\n", chan->sar); return -EINVAL; } if (chan->dar & 31) { printk("g2dma: unaligned dest 0x%lx\n", chan->dar); return -EINVAL; } if (chan->count & 31) chan->count = (chan->count + (32 - 1)) & ~(32 - 1); chan->dar += 0xa0800000; chan->mode = !chan->mode; flush_icache_range((unsigned long)chan->sar, chan->count); g2_disable_dma(chan); g2_dma->channel[chan_nr].g2_addr = chan->dar & 0x1fffffe0; g2_dma->channel[chan_nr].root_addr = chan->sar & 0x1fffffe0; g2_dma->channel[chan_nr].size = (chan->count & ~31) | 0x80000000; g2_dma->channel[chan_nr].direction = chan->mode; g2_dma->channel[chan_nr].ctrl = 5; g2_enable_dma(chan); pr_debug("count, sar, dar, mode, ctrl, chan, xfer: %ld, 0x%08lx, " "0x%08lx, %ld, %ld, %ld, %ld\n", g2_dma->channel[chan_nr].size, g2_dma->channel[chan_nr].root_addr, g2_dma->channel[chan_nr].g2_addr, g2_dma->channel[chan_nr].direction, g2_dma->channel[chan_nr].ctrl, g2_dma->channel[chan_nr].chan_enable, g2_dma->channel[chan_nr].xfer_enable); return 0; }
static void execute_user_location(void *dst) { /* Intentionally crossing kernel/user memory boundary. */ void (*func)(void) = dst; pr_info("attempting ok execution at %p\n", do_nothing); do_nothing(); if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE)) return; flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); pr_info("attempting bad execution at %p\n", func); func(); }
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) { unsigned long dc; pr_debug("resuming execution at PC=%08lx\n", regs->pc); dc = ocd_read(DC); dc &= ~(1 << OCD_DC_SS_BIT); ocd_write(DC, dc); *p->addr = BREAKPOINT_INSTRUCTION; flush_icache_range((unsigned long)p->addr, (unsigned long)p->addr + sizeof(kprobe_opcode_t)); }
void lkdtm_WRITE_KERN(void) { size_t size; unsigned char *ptr; size = (unsigned long)do_overwritten - (unsigned long)do_nothing; ptr = (unsigned char *)do_overwritten; pr_info("attempting bad %zu byte write at %p\n", size, ptr); memcpy(ptr, (unsigned char *)do_nothing, size); flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size)); do_overwritten(); }
static noinline void execute_location(void *dst, bool write) { void (*func)(void) = dst; pr_info("attempting ok execution at %p\n", do_nothing); do_nothing(); if (write == CODE_WRITE) { memcpy(dst, do_nothing, EXEC_SIZE); flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); } pr_info("attempting bad execution at %p\n", func); func(); }
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { int err; unsigned long addr_wr = writable_address(bpt->bpt_addr); if (addr_wr == 0) return -1; err = probe_kernel_write((char *)addr_wr, (char *)bpt->saved_instr, BREAK_INSTR_SIZE); smp_wmb(); flush_icache_range((unsigned long)bpt->bpt_addr, (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE); return err; }
void __kmc_copy_supcode(unsigned long elf_brk) { char __user *usrcode; int ret; if (__kmc_sup_size) { usrcode = (char *)(PAGE_ALIGN(elf_brk) - PAGE_SIZE); ret = copy_to_user(usrcode, __kmc_sup_start, __kmc_sup_size); flush_icache_range((unsigned long)usrcode, (unsigned long)(usrcode + __kmc_sup_size)); sys_mlock((unsigned long)usrcode, PAGE_SIZE); sys_mprotect((unsigned long)usrcode, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC); } return; }
/* * Syscall implementation for icache flush */ asmlinkage int sys_cacheflush(void *__addr, __const int __nbytes, __const int __op) { int ret = 0; switch (__op) { case ICACHE: flush_icache_range((unsigned long)__addr, (unsigned long)__addr + __nbytes); break; default: ret = -EINVAL; } return ret; }
/* * Replace the next instruction after the current instruction with a * breakpoint instruction. */ static void do_single_step(struct pt_regs *regs) { unsigned long addr_wr; /* Determine where the target instruction will send us to. */ stepped_addr = get_step_address(regs); probe_kernel_read((char *)&stepped_instr, (char *)stepped_addr, BREAK_INSTR_SIZE); addr_wr = writable_address(stepped_addr); probe_kernel_write((char *)addr_wr, (char *)&singlestep_insn, BREAK_INSTR_SIZE); smp_wmb(); flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE); }
/* * Do what every setup is needed on image and the * reboot code buffer to allow us to avoid allocations * later. */ int machine_kexec_prepare(struct kimage *image) { void *control_code_buffer; const unsigned long *func; func = (unsigned long *)&relocate_new_kernel; /* Pre-load control code buffer to minimize work in kexec path */ control_code_buffer = page_address(image->control_code_page); memcpy((void *)control_code_buffer, (const void *)func[0], relocate_new_kernel_size); flush_icache_range((unsigned long)control_code_buffer, (unsigned long)control_code_buffer + relocate_new_kernel_size); ia64_kimage = image; return 0; }
int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn) { u32 *tp = addr; int ret; /* A64 instructions must be word aligned */ if ((uintptr_t)tp & 0x3) return -EINVAL; ret = aarch64_insn_write(tp, insn); if (ret == 0) flush_icache_range((uintptr_t)tp, (uintptr_t)tp + AARCH64_INSN_SIZE); return ret; }
static unsigned long load_aout_interp(struct exec * interp_ex, struct file * interpreter) { unsigned long text_data, elf_entry = ~0UL; char * addr; loff_t offset; current->mm->end_code = interp_ex->a_text; text_data = interp_ex->a_text + interp_ex->a_data; current->mm->end_data = text_data; current->mm->brk = interp_ex->a_bss + text_data; switch (N_MAGIC(*interp_ex)) { case OMAGIC: offset = 32; addr = (char *) 0; break; case ZMAGIC: case QMAGIC: offset = N_TXTOFF(*interp_ex); addr = (char *) N_TXTADDR(*interp_ex); break; default: goto out; } down_write(¤t->mm->mmap_sem); do_brk(0, text_data); up_write(¤t->mm->mmap_sem); if (!interpreter->f_op || !interpreter->f_op->read) goto out; if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0) goto out; flush_icache_range((unsigned long)addr, (unsigned long)addr + text_data); down_write(¤t->mm->mmap_sem); do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1), interp_ex->a_bss); up_write(¤t->mm->mmap_sem); elf_entry = interp_ex->a_entry; out: return elf_entry; }
int __init kvmppc_booke_init(void) { unsigned long ivor[16]; unsigned long max_ivor = 0; int i; /* We install our own exception handlers by hijacking IVPR. IVPR must * be 16-bit aligned, so we need a 64KB allocation. */ kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, VCPU_SIZE_ORDER); if (!kvmppc_booke_handlers) return -ENOMEM; /* XXX make sure our handlers are smaller than Linux's */ /* Copy our interrupt handlers to match host IVORs. That way we don't * have to swap the IVORs on every guest/host transition. */ ivor[0] = mfspr(SPRN_IVOR0); ivor[1] = mfspr(SPRN_IVOR1); ivor[2] = mfspr(SPRN_IVOR2); ivor[3] = mfspr(SPRN_IVOR3); ivor[4] = mfspr(SPRN_IVOR4); ivor[5] = mfspr(SPRN_IVOR5); ivor[6] = mfspr(SPRN_IVOR6); ivor[7] = mfspr(SPRN_IVOR7); ivor[8] = mfspr(SPRN_IVOR8); ivor[9] = mfspr(SPRN_IVOR9); ivor[10] = mfspr(SPRN_IVOR10); ivor[11] = mfspr(SPRN_IVOR11); ivor[12] = mfspr(SPRN_IVOR12); ivor[13] = mfspr(SPRN_IVOR13); ivor[14] = mfspr(SPRN_IVOR14); ivor[15] = mfspr(SPRN_IVOR15); for (i = 0; i < 16; i++) { if (ivor[i] > max_ivor) max_ivor = ivor[i]; memcpy((void *)kvmppc_booke_handlers + ivor[i], kvmppc_handlers_start + i * kvmppc_handler_len, kvmppc_handler_len); } flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); return 0; }
void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, void *src, unsigned long len) { void *kaddr; /* Initialize the slot */ kaddr = kmap_atomic(page); memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len); kunmap_atomic(kaddr); /* * The MIPS version of flush_icache_range will operate safely on * user space addresses and more importantly, it doesn't require a * VMA argument. */ flush_icache_range(vaddr, vaddr + len); }