void pte_free(struct mm_struct *mm, pgtable_t ptepage)
{
#ifdef CONFIG_SMP
	hash_page_sync();
#endif
	pgtable_page_dtor(ptepage);
	__free_page(ptepage);
}
예제 #2
0
파일: pgtable_64.c 프로젝트: Endika/linux
void pte_fragment_free(unsigned long *table, int kernel)
{
	struct page *page = virt_to_page(table);
	if (put_page_testzero(page)) {
		if (!kernel)
			pgtable_page_dtor(page);
		free_hot_cold_page(page, 0);
	}
}
예제 #3
0
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
	ktime_t local_tstart, local_tend;
	s64 local_act_time;
	
	pgtable_page_dtor(pte);
	paravirt_release_pte(page_to_pfn(pte));
	
	//tlb_remove_page(tlb, pte);
	
	//added by zhang
 	//free_pte_page((unsigned long)__va(PFN_PHYS(page_to_pfn(pte))));	
	//free_pte_page((unsigned long)page_address(pte));
 	if(cache_on)
	{
	  if(timing_on)
	  {	
		local_tstart=ktime_get();	
		//free_pte_page(tlb, pte);
		free_pte_page(pte);
		local_tend=ktime_get();
		local_act_time=ktime_to_ns(ktime_sub(local_tend, local_tstart));
        
		spin_lock(&pte_free_cnt_lock);
		pte_free_waste += local_act_time;
        	pte_free_cnt++;
		spin_unlock(&pte_free_cnt_lock);

	  }
	  else
		//free_pte_page(tlb,pte);
		free_pte_page(pte);
	}
	else
	{
	   if(timing_on)
	   {	
		local_tstart=ktime_get();	
		tlb_remove_page(tlb, pte);
		local_tend=ktime_get();
		local_act_time=ktime_to_ns(ktime_sub(local_tend, local_tstart));
        
		spin_lock(&pte_free_cnt_lock);
		pte_free_waste += local_act_time;
        	pte_free_cnt++;
		spin_unlock(&pte_free_cnt_lock);
	   }
	   else
		tlb_remove_page(tlb, pte);
	}
}
예제 #4
0
static void pte_frag_destroy(void *pte_frag)
{
	int count;
	struct page *page;

	page = virt_to_page(pte_frag);
	/* drop all the pending references */
	count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
	/* We allow PTE_FRAG_NR fragments from a PTE page */
	if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
		pgtable_page_dtor(page);
		__free_page(page);
	}
}
예제 #5
0
static void destroy_pagetable_page(struct mm_struct *mm)
{
	int count;
	void *pte_frag;
	struct page *page;

	pte_frag = mm->context.pte_frag;
	if (!pte_frag)
		return;

	page = virt_to_page(pte_frag);
	/* drop all the pending references */
	count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
	/* We allow PTE_FRAG_NR fragments from a PTE page */
	if (page_ref_sub_and_test(page, PTE_FRAG_NR - count)) {
		pgtable_page_dtor(page);
		free_unref_page(page);
	}
}
예제 #6
0
void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
		    unsigned long address)
{
	int i;

	pgtable_page_dtor(pte);
	tlb->need_flush = 1;
	if (tlb_fast_mode(tlb)) {
		struct page *pte_pages[L2_USER_PGTABLE_PAGES];
		for (i = 0; i < L2_USER_PGTABLE_PAGES; ++i)
			pte_pages[i] = pte + i;
		free_pages_and_swap_cache(pte_pages, L2_USER_PGTABLE_PAGES);
		return;
	}
	for (i = 0; i < L2_USER_PGTABLE_PAGES; ++i) {
		tlb->pages[tlb->nr++] = pte + i;
		if (tlb->nr >= FREE_PTE_NR)
			tlb_flush_mmu(tlb, 0, 0);
	}
}
예제 #7
0
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
	pgtable_page_dtor(pte);
	paravirt_release_pte(page_to_pfn(pte));
	tlb_remove_page(tlb, pte);
}
예제 #8
0
/*
 * Free page immediately (used in __pte_alloc if we raced with another
 * process).  We have to correct whatever pte_alloc_one() did before
 * returning the pages to the allocator.
 */
void pte_free(struct mm_struct *mm, struct page *p)
{
	pgtable_page_dtor(p);
	__free_pages(p, L2_USER_PGTABLE_ORDER);
}