Esempio n. 1
0
pgd_t *get_pgd_slow(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;

	new_pgd = alloc_pgd_table(GFP_KERNEL);
	if (!new_pgd)
		goto no_pgd;

	/*
	 * This lock is here just to satisfy pmd_alloc and pte_lock
	 */
	spin_lock(&mm->page_table_lock);

	/*
	 * On ARM, first page must always be allocated since it contains
	 * the machine vectors.
	 */
	new_pmd = pmd_alloc(mm, new_pgd, 0);
	if (!new_pmd)
		goto no_pmd;

	new_pte = pte_alloc(mm, new_pmd, 0);
	if (!new_pte)
		goto no_pte;

	init_pgd = pgd_offset_k(0);
	init_pmd = pmd_offset(init_pgd, 0);
	init_pte = pte_offset(init_pmd, 0);

	set_pte(new_pte, *init_pte);

	/*
	 * most of the page table entries are zeroed
	 * wne the table is created.
	 */
	memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD,
		(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));

	spin_unlock(&mm->page_table_lock);

	/* update MEMC tables */
	cpu_memc_update_all(new_pgd);
	return new_pgd;

no_pte:
	spin_unlock(&mm->page_table_lock);
	pmd_free(new_pmd);
	free_pgd_slow(new_pgd);
	return NULL;

no_pmd:
	spin_unlock(&mm->page_table_lock);
	free_pgd_slow(new_pgd);
	return NULL;

no_pgd:
	return NULL;
}
Esempio n. 2
0
pgd_t *get_pgd_slow(struct mm_struct *mm)
{
	pgd_t *new_pgd, *init_pgd;
	pmd_t *new_pmd, *init_pmd;
	pte_t *new_pte, *init_pte;

	new_pgd = alloc_pgd_table();
	if (!new_pgd)
		goto no_pgd;

	/*
	 * On ARM, first page must always be allocated since it contains
	 * the machine vectors.
	 */
	new_pmd = pmd_alloc(mm, new_pgd, 0);
	if (!new_pmd)
		goto no_pmd;

	new_pte = pte_alloc_map(mm, new_pmd, 0);
	if (!new_pte)
		goto no_pte;

	init_pgd = pgd_offset(&init_mm, 0);
	init_pmd = pmd_offset(init_pgd, 0);
	init_pte = pte_offset(init_pmd, 0);

	set_pte(new_pte, *init_pte);
	pte_unmap(new_pte);

	/*
	 * the page table entries are zeroed
	 * when the table is created. (see the cache_ctor functions below)
	 * Now we need to plonk the kernel (vmalloc) area at the end of
	 * the address space. We copy this from the init thread, just like
	 * the init_pte we copied above...
	 */
	memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
		(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));

	/* update MEMC tables */
	cpu_memc_update_all(new_pgd);
	return new_pgd;

no_pte:
	pmd_free(new_pmd);
no_pmd:
	free_pgd_slow(new_pgd);
no_pgd:
	return NULL;
}
Esempio n. 3
0
int do_check_pgt_cache(int low, int high)
{
	int freed = 0;

	if(pgtable_cache_size > high) {
		do {
			if(pgd_quicklist)
				free_pgd_slow(get_pgd_fast()), freed++;
			if(pmd_quicklist)
				free_pmd_slow(get_pmd_fast()), freed++;
			if(pte_quicklist)
				free_pte_slow(get_pte_fast()), freed++;
		} while(pgtable_cache_size > low);
	}
	return freed;
}
Esempio n. 4
0
int do_check_pgt_cache(int low, int high)
{
	int freed = 0;
	if (pgtable_cache_size > high) {
		do {
                        if (pgd_quicklist) {
				free_pgd_slow(get_pgd_fast());
				freed++;
			}
			if (pte_quicklist) {
				pte_free_slow(pte_alloc_one_fast(NULL, 0));
				freed++;
			}
		} while (pgtable_cache_size > low);
	}
	return freed;
}
Esempio n. 5
0
pgd_t *get_pgd_slow(void)
{
	pgd_t *pgd = (pgd_t *)alloc_pgd_table(GFP_KERNEL);
	pmd_t *new_pmd;

	if (pgd) {
		pgd_t *init = pgd_offset(&init_mm, 0);
		
		memzero(pgd, USER_PTRS_PER_PGD * sizeof(pgd_t));
		memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
			(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));

		/*
		 * On ARM, first page must always be allocated
		 */
		if (!pmd_alloc(pgd, 0))
			goto nomem;
		else {
			pmd_t *old_pmd = pmd_offset(init, 0);
			new_pmd = pmd_offset(pgd, 0);

			if (!pte_alloc(new_pmd, 0))
				goto nomem_pmd;
			else {
				pte_t *new_pte = pte_offset(new_pmd, 0);
				pte_t *old_pte = pte_offset(old_pmd, 0);

				set_pte (new_pte, *old_pte);
			}
		}
		/* update MEMC tables */
		cpu_memc_update_all(pgd);
	}
	return pgd;

nomem_pmd:
	pmd_free(new_pmd);
nomem:
	free_pgd_slow(pgd);
	return NULL;
}
Esempio n. 6
0
void check_pgt_cache(void)
{
	preempt_disable();
	if (pgtable_cache_size > PGT_CACHE_HIGH) {
		do {
#ifdef CONFIG_SMP
			if (pgd_quicklist)
				free_pgd_slow(get_pgd_fast());
#endif
			if (pte_quicklist[0])
				free_pte_slow(pte_alloc_one_fast(NULL, 0));
			if (pte_quicklist[1])
				free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
		} while (pgtable_cache_size > PGT_CACHE_LOW);
	}
#ifndef CONFIG_SMP
        if (pgd_cache_size > PGT_CACHE_HIGH / 4) {
		struct page *page, *page2;
                for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
                        if ((unsigned long)page->lru.prev == 3) {
                                if (page2)
                                        page2->lru.next = page->lru.next;
                                else
                                        pgd_quicklist = (void *) page->lru.next;
                                pgd_cache_size -= 2;
                                __free_page(page);
                                if (page2)
                                        page = (struct page *)page2->lru.next;
                                else
                                        page = (struct page *)pgd_quicklist;
                                if (pgd_cache_size <= PGT_CACHE_LOW / 4)
                                        break;
                                continue;
                        }
                        page2 = page;
                        page = (struct page *)page->lru.next;
                }
        }
#endif
	preempt_enable();
}