Пример #1
0
void check_pgt_cache(void)
{
	preempt_disable();
	if (pgtable_cache_size > PGT_CACHE_HIGH) {
		do {
#ifdef CONFIG_SMP
			if (pgd_quicklist)
				free_pgd_slow(get_pgd_fast());
#endif
			if (pte_quicklist[0])
				free_pte_slow(pte_alloc_one_fast(NULL, 0));
			if (pte_quicklist[1])
				free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
		} while (pgtable_cache_size > PGT_CACHE_LOW);
	}
#ifndef CONFIG_SMP
        if (pgd_cache_size > PGT_CACHE_HIGH / 4) {
		struct page *page, *page2;
                for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
                        if ((unsigned long)page->lru.prev == 3) {
                                if (page2)
                                        page2->lru.next = page->lru.next;
                                else
                                        pgd_quicklist = (void *) page->lru.next;
                                pgd_cache_size -= 2;
                                __free_page(page);
                                if (page2)
                                        page = (struct page *)page2->lru.next;
                                else
                                        page = (struct page *)pgd_quicklist;
                                if (pgd_cache_size <= PGT_CACHE_LOW / 4)
                                        break;
                                continue;
                        }
                        page2 = page;
                        page = (struct page *)page->lru.next;
                }
        }
#endif
	preempt_enable();
}
Пример #2
0
int do_check_pgt_cache(int low, int high)
{
	int freed = 0;

	if(pgtable_cache_size > high) {
		do {
			if(pgd_quicklist)
				free_pgd_slow(get_pgd_fast()), freed++;
			if(pmd_quicklist)
				free_pmd_slow(get_pmd_fast()), freed++;
			if(pte_quicklist)
				free_pte_slow(get_pte_fast()), freed++;
		} while(pgtable_cache_size > low);
	}
	return freed;
}
Пример #3
0
pte_t *get_pte_slow(pmd_t *pmd, unsigned long offset)
{
	pte_t *pte;

	pte = (pte_t *)alloc_pte_table(PTRS_PER_PTE * sizeof(pte_t), GFP_KERNEL);
	if (pmd_none(*pmd)) {
		if (pte) {
			memzero(pte, PTRS_PER_PTE * sizeof(pte_t));
			set_pmd(pmd, mk_user_pmd(pte));
			return pte + offset;
		}
		set_pmd(pmd, mk_user_pmd(get_bad_pte_table()));
		return NULL;
	}
	free_pte_slow(pte);
	if (pmd_bad(*pmd)) {
		__handle_bad_pmd(pmd);
		return NULL;
	}
	return (pte_t *) pmd_page(*pmd) + offset;
}