Exemple #1
0
/*
 * need to get a 16k page for level 1
 */
pgd_t *get_pgd_slow(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;
	unsigned long flags;

	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
	if (!new_pgd)
		goto no_pgd;

	memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));

	/*
	 * Copy over the kernel and IO PGD entries
	 */
	init_pgd = pgd_offset_k(0);
	pgd_list_lock(flags);
	memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
		       (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
	pgd_list_add(new_pgd);
	pgd_list_unlock(flags);

	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));

	if (!vectors_high()) {
#ifdef CONFIG_ARM_FCSE
		/* FCSE does not work without high vectors. */
		BUG();
#endif /* CONFIG_ARM_FCSE */

		/*
		 * On ARM, first page must always be allocated since it
		 * contains the machine vectors.
		 */
		new_pmd = pmd_alloc(mm, new_pgd, 0);
		if (!new_pmd)
			goto no_pmd;

		new_pte = pte_alloc_map(mm, new_pmd, 0);
		if (!new_pte)
			goto no_pte;

		init_pmd = pmd_offset(init_pgd, 0);
		init_pte = pte_offset_map_nested(init_pmd, 0);
		set_pte_ext(new_pte, *init_pte, 0);
		pte_unmap_nested(init_pte);
		pte_unmap(new_pte);
	}

	return new_pgd;

no_pte:
	pmd_free(mm, new_pmd);
no_pmd:
	free_pages((unsigned long)new_pgd, 2);
no_pgd:
	return NULL;
}
pgd_t *pgd_alloc(struct mm_struct *mm)
{
	int i;
	pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);

	pgd_test_and_unpin(pgd);

	if (PTRS_PER_PMD == 1 || !pgd)
		return pgd;

	for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
		pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
		if (!pmd)
			goto out_oom;
		set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
	}

	if (!HAVE_SHARED_KERNEL_PMD) {
		unsigned long flags;

		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
			pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
			if (!pmd)
				goto out_oom;
			set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
		}

		spin_lock_irqsave(&pgd_lock, flags);
		for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
			unsigned long v = (unsigned long)i << PGDIR_SHIFT;
			pgd_t *kpgd = pgd_offset_k(v);
			pud_t *kpud = pud_offset(kpgd, v);
			pmd_t *kpmd = pmd_offset(kpud, v);
			pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
			memcpy(pmd, kpmd, PAGE_SIZE);
			make_lowmem_page_readonly(
				pmd, XENFEAT_writable_page_tables);
		}
		pgd_list_add(pgd);
		spin_unlock_irqrestore(&pgd_lock, flags);
	}

	return pgd;

out_oom:
	for (i--; i >= 0; i--)
		kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
	kmem_cache_free(pgd_cache, pgd);
	return NULL;
}
Exemple #3
0
/* PAE pgd constructor */
static void pgd_ctor(void *pgd)
{
    /* PAE, kernel PMD may be shared */

    if (SHARED_KERNEL_PMD) {
        clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
                        swapper_pg_dir + USER_PTRS_PER_PGD,
                        KERNEL_PGD_PTRS);
    } else {
        unsigned long flags;

        memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
        spin_lock_irqsave(&pgd_lock, flags);
        pgd_list_add(pgd);
        spin_unlock_irqrestore(&pgd_lock, flags);
    }
}
Exemple #4
0
void pgd_ctor(void *pgd)
{
    unsigned long flags;

    if (PTRS_PER_PMD == 1)
        spin_lock_irqsave(&pgd_lock, flags);

    memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
            swapper_pg_dir + USER_PTRS_PER_PGD,
            (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));

    if (PTRS_PER_PMD > 1)
        return;

    pgd_list_add(pgd);
    spin_unlock_irqrestore(&pgd_lock, flags);
    memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
}
Exemple #5
0
/* Non-PAE pgd constructor */
static void pgd_ctor(void *pgd)
{
    unsigned long flags;

    /* !PAE, no pagetable sharing */
    memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));

    spin_lock_irqsave(&pgd_lock, flags);

    /* must happen under lock */
    clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
                    swapper_pg_dir + USER_PTRS_PER_PGD,
                    KERNEL_PGD_PTRS);
    paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
                            __pa(swapper_pg_dir) >> PAGE_SHIFT,
                            USER_PTRS_PER_PGD,
                            KERNEL_PGD_PTRS);
    pgd_list_add(pgd);
    spin_unlock_irqrestore(&pgd_lock, flags);
}
Exemple #6
0
static void pgd_ctor(pgd_t *pgd)
{
	unsigned long flags;

	memset(pgd, 0, KERNEL_PGD_INDEX_START*sizeof(pgd_t));
	spin_lock_irqsave(&pgd_lock, flags);

#ifndef __tilegx__
	/*
	 * Check that the user interrupt vector has no L2.
	 * It never should for the swapper, and new page tables
	 * should always start with an empty user interrupt vector.
	 */
	BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0);
#endif

	clone_pgd_range(pgd + KERNEL_PGD_INDEX_START,
			swapper_pg_dir + KERNEL_PGD_INDEX_START,
			KERNEL_PGD_PTRS);

	pgd_list_add(pgd);
	spin_unlock_irqrestore(&pgd_lock, flags);
}
void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
{
	unsigned long flags;

	if (PTRS_PER_PMD > 1) {
		if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) {
			int rc = xen_create_contiguous_region(
				(unsigned long)pgd, 0, 32);
			BUG_ON(rc);
		}
		if (HAVE_SHARED_KERNEL_PMD)
			clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
					swapper_pg_dir + USER_PTRS_PER_PGD,
					KERNEL_PGD_PTRS);
	} else {
		spin_lock_irqsave(&pgd_lock, flags);
		clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
				swapper_pg_dir + USER_PTRS_PER_PGD,
				KERNEL_PGD_PTRS);
		memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
		pgd_list_add(pgd);
		spin_unlock_irqrestore(&pgd_lock, flags);
	}
}
Exemple #8
0
void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{
    unsigned long flags;

    if (PTRS_PER_PMD == 1) {
        memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
        spin_lock_irqsave(&pgd_lock, flags);
    }

    clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
                    swapper_pg_dir + USER_PTRS_PER_PGD,
                    KERNEL_PGD_PTRS);

    if (PTRS_PER_PMD > 1)
        return;

    /* must happen under lock */
    paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
                            __pa(swapper_pg_dir) >> PAGE_SHIFT,
                            USER_PTRS_PER_PGD, PTRS_PER_PGD - USER_PTRS_PER_PGD);

    pgd_list_add(pgd);
    spin_unlock_irqrestore(&pgd_lock, flags);
}