/* * copy_user_page * @to: kernel logical address * @from: kernel logical address * @address: U0 address to be mapped * @page: page (virt_to_page(to)) */ void copy_user_page(void *to, void *from, unsigned long address, struct page *page) { extern void __copy_page_wb(void *to, void *from); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) __copy_page_wb(to, from); else { pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = virt_to_phys(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *pgd = pgd_offset_k(p3_addr); pud_t *pud = pud_offset(pgd, p3_addr); pmd_t *pmd = pmd_offset(pud, p3_addr); pte_t *pte = pte_offset_kernel(pmd, p3_addr); pte_t entry; unsigned long flags; entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); inc_preempt_count(); BUG_ON(atomic_inc_return(&concurreny_check[(address & CACHE_ALIAS)>>12]) != 1); set_pte(pte, entry); local_irq_save(flags); flush_tlb_one(get_asid(), p3_addr); local_irq_restore(flags); update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); pte_clear(&init_mm, p3_addr, pte); atomic_dec(&concurreny_check[(address & CACHE_ALIAS)>>12]); dec_preempt_count(); } }
/* * copy_user_page * @to: P1 address * @from: P1 address * @address: U0 address to be mapped * @page: page (virt_to_page(to)) */ void copy_user_page(void *to, void *from, unsigned long address, struct page *page) { __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_CACHABLE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_HW_SHARED | _PAGE_FLAGS_HARD); unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *dir = pgd_offset_k(p3_addr); pmd_t *pmd = pmd_offset(dir, p3_addr); pte_t *pte = pte_offset_kernel(pmd, p3_addr); pte_t entry; unsigned long flags; entry = pfn_pte(phys_addr >> PAGE_SHIFT, pgprot); down(&p3map_sem[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); local_irq_restore(flags); update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); pte_clear(&init_mm, p3_addr, pte); up(&p3map_sem[(address & CACHE_ALIAS)>>12]); } }
/* * copy_user_page * @to: P1 address * @from: P1 address * @address: U0 address to be mapped * @page: page (virt_to_page(to)) */ void copy_user_page(void *to, void *from, unsigned long address, struct page *page) { __set_bit(PG_mapped, &page->flags); if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) copy_page(to, from); else { unsigned long phys_addr = PHYSADDR(to); unsigned long p3_addr = P3SEG + (address & CACHE_ALIAS); pgd_t *pgd = pgd_offset_k(p3_addr); pud_t *pud = pud_offset(pgd, p3_addr); pmd_t *pmd = pmd_offset(pud, p3_addr); pte_t *pte = pte_offset_kernel(pmd, p3_addr); pte_t entry; unsigned long flags; entry = pfn_pte(phys_addr >> PAGE_SHIFT, PAGE_KERNEL); mutex_lock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); set_pte(pte, entry); local_irq_save(flags); __flush_tlb_page(get_asid(), p3_addr); local_irq_restore(flags); update_mmu_cache(NULL, p3_addr, entry); __copy_user_page((void *)p3_addr, from, to); pte_clear(&init_mm, p3_addr, pte); mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]); } }
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { unsigned long flags, pteval, vpn; /* * Handle debugger faulting in for debugee. */ if (vma && current->active_mm != vma->vm_mm) return; local_irq_save(flags); /* Set PTEH register */ vpn = (address & MMU_VPN_MASK) | get_asid(); __raw_writel(vpn, MMU_PTEH); pteval = pte_val(pte); /* Set PTEL register */ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ /* conveniently, we want all the software flags to be 0 anyway */ __raw_writel(pteval, MMU_PTEL); /* Load the TLB */ asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); local_irq_restore(flags); }
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { pgd_ERROR(*pgd); return; } pud = pud_alloc(NULL, pgd, addr); if (unlikely(!pud)) { pud_ERROR(*pud); return; } pmd = pmd_alloc(NULL, pud, addr); if (unlikely(!pmd)) { pmd_ERROR(*pmd); return; } pte = pte_offset_kernel(pmd, addr); if (!pte_none(*pte)) { pte_ERROR(*pte); return; } set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); local_flush_tlb_one(get_asid(), addr); }
static void vmi_fork_callback(CPUState *env, target_ulong pc){ uint8_t offset = 0; if(env->thumb == 0){ offset = 4; } else { offset = 2; } // pc + offset or env->regs[14] ? fork_returns.push_back(ReturnPoint(pc + offset, get_asid(env, pc))); }
static void clear_pte_phys(unsigned long addr, pgprot_t prot) { pte_t *pte; pte = __get_pte_phys(addr); if (pgprot_val(prot) & _PAGE_WIRED) tlb_unwire_entry(); set_pte(pte, pfn_pte(0, __pgprot(0))); local_flush_tlb_one(get_asid(), addr); }
static void vmi_execve_callback(CPUState *env, target_ulong pc, syscalls::string filename,target_ulong argv,target_ulong envp) { uint8_t offset = 0; if(env->thumb == 0){ offset = 4; } else { offset = 2; } exec_returns.push_back(ReturnPoint(pc+offset, get_asid(env, pc))); //exec_returns.push_back(std::make_pair(env->regs[14], get_asid(env, pc))); }
static inline bool is_watched(CPUState *env){ target_ulong pc; #if defined(TARGET_ARM) pc = env->regs[15]; #elif defined(TARGET_I386) pc = env->eip; #endif if(relevant_ASIDs.empty()) return true; for (auto asid : relevant_ASIDs){ if (get_asid(env, pc) == asid) return true; } return false; }
asmlinkage void do_debug_priv(struct pt_regs *regs) { unsigned long dc, ds; unsigned long die_val; ds = __mfdr(DBGREG_DS); pr_debug("do_debug_priv: pc = %08lx, ds = %08lx\n", regs->pc, ds); if (ds & DS_SSS) die_val = DIE_SSTEP; else die_val = DIE_BREAKPOINT; if (notify_die(die_val, regs, 0, SIGTRAP) == NOTIFY_STOP) return; if (likely(ds & DS_SSS)) { extern void itlb_miss(void); extern void tlb_miss_common(void); struct thread_info *ti; dc = __mfdr(DBGREG_DC); dc &= ~DC_SS; __mtdr(DBGREG_DC, dc); ti = current_thread_info(); set_ti_thread_flag(ti, TIF_BREAKPOINT); /* The TLB miss handlers don't check thread flags */ if ((regs->pc >= (unsigned long)&itlb_miss) && (regs->pc <= (unsigned long)&tlb_miss_common)) { __mtdr(DBGREG_BWA2A, sysreg_read(RAR_EX)); __mtdr(DBGREG_BWC2A, 0x40000001 | (get_asid() << 1)); } /* * If we're running in supervisor mode, the breakpoint * will take us where we want directly, no need to * single step. */ if ((regs->sr & MODE_MASK) != MODE_SUPERVISOR) set_ti_thread_flag(ti, TIF_SINGLE_STEP); } else { panic("Unable to handle debug trap at pc = %08lx\n", regs->pc); } }
static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) { pte_t *pte; pte = __get_pte_phys(addr); if (!pte_none(*pte)) { pte_ERROR(*pte); return; } set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); local_flush_tlb_one(get_asid(), addr); if (pgprot_val(prot) & _PAGE_WIRED) tlb_wire_entry(NULL, addr, *pte); }
void tlb_print() { int context = mips32_get_c0(C0_CONTEXT); kprintf("[tlb] C0_CONTEXT : %p\n", (void *)context); kprintf("[tlb] C0_CONTEXT >> 1: %p\n", (void *)(context >> 1)); kprintf("[tlb] ASID : %d\n", get_asid()); unsigned n = mips_tlb_size(); for (unsigned i = 0; i < n; i++) { tlbhi_t entryhi; tlblo_t entrylo0; tlblo_t entrylo1; tlb_read_index(&entryhi, &entrylo0, &entrylo1, i); kprintf("[tlb: %d]: hi: %p lo0:%p lo1: %p \n", i, (void *)entryhi, (void *)entrylo0, (void *)entrylo1); } }
/* Callable from fault.c, so not static */ inline void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte) { unsigned long long ptel; unsigned long long pteh=0; struct tlb_info *tlbp; unsigned long long next; /* Get PTEL first */ ptel = pte_val(*pte); /* * Set PTEH register */ pteh = address & MMU_VPN_MASK; /* Sign extend based on neff. */ #if (NEFF == 32) /* Faster sign extension */ pteh = (unsigned long long)(signed long long)(signed long)pteh; #else /* General case */ pteh = (pteh & NEFF_SIGN) ? (pteh | NEFF_MASK) : pteh; #endif /* Set the ASID. */ pteh |= get_asid() << PTEH_ASID_SHIFT; pteh |= PTEH_VALID; /* Set PTEL register, set_pte has performed the sign extension */ ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb); next = tlbp->next; __flush_tlb_slot(next); asm volatile ("putcfg %0,1,%2\n\n\t" "putcfg %0,0,%1\n" : : "r" (next), "r" (pteh), "r" (ptel) ); next += TLB_STEP; if (next > tlbp->last) next = tlbp->first; tlbp->next = next; }
void update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) { unsigned long flags; unsigned long pteval; unsigned long vpn; /* Ptrace may call this routine. */ if (vma && current->active_mm != vma->vm_mm) return; #if defined(CONFIG_SH7705_CACHE_32KB) { struct page *page = pte_page(pte); unsigned long pfn = pte_pfn(pte); if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) { unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE); __set_bit(PG_mapped, &page->flags); } } #endif local_irq_save(flags); /* Set PTEH register */ vpn = (address & MMU_VPN_MASK) | get_asid(); ctrl_outl(vpn, MMU_PTEH); pteval = pte_val(pte); /* Set PTEL register */ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ /* conveniently, we want all the software flags to be 0 anyway */ ctrl_outl(pteval, MMU_PTEL); /* Load the TLB */ asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); local_irq_restore(flags); }
void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { unsigned long long entry; unsigned long paddr, flags; BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries)); local_irq_save(flags); entry = sh64_get_wired_dtlb_entry(); dtlb_entries[dtlb_entry++] = entry; paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK; paddr &= ~PAGE_MASK; sh64_setup_tlb_slot(entry, addr, get_asid(), paddr); local_irq_restore(flags); }
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) { unsigned long flags; unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK; page &= PAGE_MASK; local_irq_save(flags); if (vma->vm_mm != current->mm) { saved_asid = get_asid(); set_asid(asid); } __flush_tlb_page(asid, page); if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); local_irq_restore(flags); } }
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { unsigned int cpu = smp_processor_id(); if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { unsigned long flags; unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; asid = cpu_asid(cpu, vma->vm_mm); page &= PAGE_MASK; local_irq_save(flags); if (vma->vm_mm != current->mm) { saved_asid = get_asid(); set_asid(asid); } local_flush_tlb_one(asid, page); if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); local_irq_restore(flags); } }
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; unsigned int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != NO_CONTEXT) { unsigned long flags; int size; local_irq_save(flags); size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */ cpu_context(cpu, mm) = NO_CONTEXT; if (mm == current->mm) activate_context(mm, cpu); } else { unsigned long asid; unsigned long saved_asid = MMU_NO_ASID; asid = cpu_asid(cpu, mm); start &= PAGE_MASK; end += (PAGE_SIZE - 1); end &= PAGE_MASK; if (mm != current->mm) { saved_asid = get_asid(); set_asid(asid); } while (start < end) { local_flush_tlb_one(asid, start); start += PAGE_SIZE; } if (saved_asid != MMU_NO_ASID) set_asid(saved_asid); } local_irq_restore(flags); }
static bool returned_check_callback(CPUState *env, TranslationBlock* tb){ // First, check if any of the PANDA VMI callbacks needs to be triggered #if defined(CONFIG_PANDA_VMI) panda_cb_list *plist; for(auto& retVal :fork_returns){ if (retVal.retaddr == tb->pc && retVal.process_id == get_asid(env, tb->pc)){ // we returned from fork for(plist = panda_cbs[PANDA_CB_VMI_AFTER_FORK]; plist != NULL; plist = plist->next) { plist->entry.return_from_fork(env); } // set to 0,0 so we can remove after we finish iterating retVal.retaddr = retVal.process_id = 0; } } fork_returns.remove_if(is_empty); for(auto& retVal :exec_returns){ if(retVal.process_id == get_asid(env, tb->pc) && !in_kernelspace(env)){ //if (retVal.retaddr == tb->pc /*&& retVal.process_id == get_asid(env, tb->pc)*/){ // we returned from fork for(plist = panda_cbs[PANDA_CB_VMI_AFTER_EXEC]; plist != NULL; plist = plist->next) { plist->entry.return_from_exec(env); } // set to 0,0 so we can remove after we finish iterating retVal.retaddr = retVal.process_id = 0; } } exec_returns.remove_if(is_empty); for(auto& retVal :clone_returns){ if (retVal.retaddr == tb->pc && retVal.process_id == get_asid(env, tb->pc)){ // we returned from fork for(plist = panda_cbs[PANDA_CB_VMI_AFTER_CLONE]; plist != NULL; plist = plist->next) { plist->entry.return_from_clone(env); } // set to 0,0 so we can remove after we finish iterating retVal.retaddr = retVal.process_id = 0; } } clone_returns.remove_if(is_empty); #else fork_returns.clear(); exec_returns.clear(); clone_returns.clear(); #endif //check if any of the internally tracked syscalls has returned //only one should be at its return point for any given basic block bool invalidate = false; for(auto& retVal : other_returns){ if(retVal.retaddr == tb->pc && retVal.process_id == get_asid(env, tb->pc)){ Callback_RC rc = retVal.callback(retVal.opaque.get(), env, retVal.process_id); if(Callback_RC::INVALIDATE == rc){ invalidate = true; } else if(Callback_RC::NORMAL == rc){ retVal.retaddr = retVal.process_id = 0; } else if(Callback_RC::ERROR == rc){ fprintf(stderr, "Syscalls caused an error\n"); assert(0); } else { fprintf(stderr, "Unknown syscalls internal callback return value\n"); assert(0); } } } other_returns.remove_if(is_empty); return invalidate; }
void tlb_write_index(tlbhi_t hi, tlblo_t lo0, tlblo_t lo1, int i) { uint8_t asid = get_asid(); mips_tlbwi2(hi, lo0, lo1, PAGE_MASK_4KB, i); set_asid(asid); }
void tlb_probe2(tlbhi_t hi, tlblo_t *lo0, tlblo_t *lo1) { uint8_t asid = get_asid(); unsigned dummy; mips_tlbprobe2(hi, lo0, lo1, &dummy); set_asid(asid); }
void tlb_overwrite_random(tlbhi_t hi, tlblo_t lo0, tlblo_t lo1) { uint8_t asid = get_asid(); mips_tlbrwr2(hi, lo0, lo1, PAGE_MASK_4KB); set_asid(asid); }
static void vmi_clone_callback(CPUState* env,target_ulong pc,uint32_t clone_flags,uint32_t newsp, target_ulong parent_tidptr,int32_t tls_val, target_ulong child_tidptr,target_ulong regs) { clone_returns.push_back(ReturnPoint(mask_retaddr_to_pc(env->regs[14]), get_asid(env, pc))); }
static void vmi_sys_prctl_callback(CPUState *env, target_ulong pc, uint32_t option,uint32_t arg2,uint32_t arg3,uint32_t arg4,uint32_t arg5) { prctl_returns.push_back(ReturnPoint(mask_retaddr_to_pc(env->regs[14]), get_asid(env, pc))); }
void tlb_read_index(tlbhi_t *hi, tlblo_t *lo0, tlblo_t *lo1, int i) { uint8_t asid = get_asid(); unsigned dummy; mips_tlbri2(hi, lo0, lo1, &dummy, i); set_asid(asid); }
static void vmi_do_mmap2_callback(CPUState *env, target_ulong pc, uint32_t addr,uint32_t len,uint32_t prot,uint32_t flags,uint32_t fd,uint32_t pgoff) { mmap_returns.push_back(ReturnPoint(mask_retaddr_to_pc(env->regs[14]), get_asid(env, pc))); }
void tlb_invalidate(tlbhi_t hi) { uint8_t asid = get_asid(); mips_tlbinval(hi); set_asid(asid); }