Esempio n. 1
0
static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
				   unsigned long end_gpa)
{
	pte_t *pte;
	unsigned long end = ~0ul;
	int i_min = __pmd_offset(start_gpa);
	int i_max = __pmd_offset(end_gpa);
	bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
	int i;

	for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
		if (!pmd_present(pmd[i]))
			continue;

		pte = pte_offset(pmd + i, 0);
		if (i == i_max)
			end = end_gpa;

		if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) {
			pmd_clear(pmd + i);
			pte_free_kernel(NULL, pte);
		} else {
			safe_to_remove = false;
		}
	}
	return safe_to_remove;
}
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
    unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmdp;

    flush_cache_vunmap(addr, end);
    pgd = pgd_offset_k(addr);
    pud = pud_offset(pgd, addr);
    pmdp = pmd_offset(pud, addr);
    do {
        pmd_t pmd = *pmdp;

        if (!pmd_none(pmd)) {
            pmd_clear(pmdp);
            init_mm.context.kvm_seq++;

            if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
                pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
        }

        addr += PMD_SIZE;
        pmdp += 2;
    } while (addr < end);

    if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
        __check_kvm_seq(current->active_mm);

    flush_tlb_kernel_range(virt, end);
}
Esempio n. 3
0
/*
 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 * the other CPUs will not see this change until their next context switch.
 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 * which requires the new ioremap'd region to be referenced, the CPU will
 * reference the _old_ region.
 *
 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 * mask the size back to 4MB aligned or we will overflow in the loop below.
 */
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
	unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
	pgd_t *pgd;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	do {
		pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);

		pmd = *pmdp;
		if (!pmd_none(pmd)) {
			/*
			 * Clear the PMD from the page table, and
			 * increment the kvm sequence so others
			 * notice this change.
			 *
			 * Note: this is still racy on SMP machines.
			 */
			pmd_clear(pmdp);

			/*
			 * Free the page table, if there was one.
			 */
			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
		}

		addr += PGDIR_SIZE;
		pgd++;
	} while (addr < end);

	flush_tlb_kernel_range(virt, end);
}
Esempio n. 4
0
static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
{
	pte_t *pte_table = pte_offset_kernel(pmd, 0);
	pmd_clear(pmd);
	kvm_tlb_flush_vmid_ipa(kvm, addr);
	pte_free_kernel(NULL, pte_table);
	put_page(virt_to_page(pmd));
}
Esempio n. 5
0
/*
 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 * the other CPUs will not see this change until their next context switch.
 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 * which requires the new ioremap'd region to be referenced, the CPU will
 * reference the _old_ region.
 *
 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 * mask the size back to 1MB aligned or we will overflow in the loop below.
 */
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmdp;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	pud = pud_offset(pgd, addr);
	pmdp = pmd_offset(pud, addr);
	do {
		pmd_t pmd = *pmdp;

		if (!pmd_none(pmd)) {
			/*
			 * Clear the PMD from the page table, and
			 * increment the kvm sequence so others
			 * notice this change.
			 *
			 * Note: this is still racy on SMP machines.
			 */
			pmd_clear(pmdp);
			init_mm.context.kvm_seq++;

			/*
			 * Free the page table, if there was one.
			 */
			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
		}

		addr += PMD_SIZE;
		pmdp += 2;
	} while (addr < end);

	/*
	 * Ensure that the active_mm is up to date - we want to
	 * catch any use-after-iounmap cases.
	 */
	if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
		__check_kvm_seq(current->active_mm);

	flush_tlb_kernel_range(virt, end);
}
Esempio n. 6
0
void destroy_context_skas(struct mm_struct *mm)
{
	struct mmu_context_skas *mmu = &mm->context.skas;

	if(proc_mm)
		os_close_file(mmu->id.u.mm_fd);
	else
		os_kill_ptraced_process(mmu->id.u.pid, 1);

	if(!proc_mm || !ptrace_faultinfo){
		free_page(mmu->id.stack);
		pte_lock_deinit(virt_to_page(mmu->last_page_table));
		pte_free_kernel((pte_t *) mmu->last_page_table);
		dec_zone_page_state(virt_to_page(mmu->last_page_table), NR_PAGETABLE);
#ifdef CONFIG_3_LEVEL_PGTABLES
		pmd_free((pmd_t *) mmu->last_pmd);
#endif
	}
}
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
	unsigned long addr = virt, end = virt + (size & ~(SZ_4M - 1));
	pgd_t *pgd;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	do {
		pmd_t pmd, *pmdp = pmd_offset((pud_t *)pgd, addr);

		pmd = *pmdp;
		if (!pmd_none(pmd)) {
			pmd_clear(pmdp);

			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
		}

		addr += PGDIR_SIZE;
		pgd++;
	} while (addr < end);

	flush_tlb_kernel_range(virt, end);
}