コード例 #1
0
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
    unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
    pgd_t *pgd;
    pud_t *pud;
    pmd_t *pmdp;

    flush_cache_vunmap(addr, end);
    pgd = pgd_offset_k(addr);
    pud = pud_offset(pgd, addr);
    pmdp = pmd_offset(pud, addr);
    do {
        pmd_t pmd = *pmdp;

        if (!pmd_none(pmd)) {
            pmd_clear(pmdp);
            init_mm.context.kvm_seq++;

            if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
                pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
        }

        addr += PMD_SIZE;
        pmdp += 2;
    } while (addr < end);

    if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
        __check_kvm_seq(current->active_mm);

    flush_tlb_kernel_range(virt, end);
}
コード例 #2
0
ファイル: ioremap.c プロジェクト: Dzenik/kernel-source
/*
 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
 * the other CPUs will not see this change until their next context switch.
 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
 * which requires the new ioremap'd region to be referenced, the CPU will
 * reference the _old_ region.
 *
 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
 * mask the size back to 1MB aligned or we will overflow in the loop below.
 */
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
	unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmdp;

	flush_cache_vunmap(addr, end);
	pgd = pgd_offset_k(addr);
	pud = pud_offset(pgd, addr);
	pmdp = pmd_offset(pud, addr);
	do {
		pmd_t pmd = *pmdp;

		if (!pmd_none(pmd)) {
			/*
			 * Clear the PMD from the page table, and
			 * increment the kvm sequence so others
			 * notice this change.
			 *
			 * Note: this is still racy on SMP machines.
			 */
			pmd_clear(pmdp);
			init_mm.context.kvm_seq++;

			/*
			 * Free the page table, if there was one.
			 */
			if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
				pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
		}

		addr += PMD_SIZE;
		pmdp += 2;
	} while (addr < end);

	/*
	 * Ensure that the active_mm is up to date - we want to
	 * catch any use-after-iounmap cases.
	 */
	if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
		__check_kvm_seq(current->active_mm);

	flush_tlb_kernel_range(virt, end);
}