void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) { ktime_t local_tstart, local_tend; s64 local_act_time; pgtable_page_dtor(pte); paravirt_release_pte(page_to_pfn(pte)); //tlb_remove_page(tlb, pte); //added by zhang //free_pte_page((unsigned long)__va(PFN_PHYS(page_to_pfn(pte)))); //free_pte_page((unsigned long)page_address(pte)); if(cache_on) { if(timing_on) { local_tstart=ktime_get(); //free_pte_page(tlb, pte); free_pte_page(pte); local_tend=ktime_get(); local_act_time=ktime_to_ns(ktime_sub(local_tend, local_tstart)); spin_lock(&pte_free_cnt_lock); pte_free_waste += local_act_time; pte_free_cnt++; spin_unlock(&pte_free_cnt_lock); } else //free_pte_page(tlb,pte); free_pte_page(pte); } else { if(timing_on) { local_tstart=ktime_get(); tlb_remove_page(tlb, pte); local_tend=ktime_get(); local_act_time=ktime_to_ns(ktime_sub(local_tend, local_tstart)); spin_lock(&pte_free_cnt_lock); pte_free_waste += local_act_time; pte_free_cnt++; spin_unlock(&pte_free_cnt_lock); } else tlb_remove_page(tlb, pte); } }
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) { ktime_t local_tstart, local_tend; s64 local_act_time; paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); //tlb_remove_page(tlb, virt_to_page(pmd)); // added by zhang if(cache_on) { if(timing_on) { local_tstart=ktime_get(); free_pmd_page((unsigned long)pmd); local_tend=ktime_get(); local_act_time=ktime_to_ns(ktime_sub(local_tend, local_tstart)); spin_lock(&pmd_free_cnt_lock); pmd_free_waste += local_act_time; pmd_free_cnt++; spin_unlock(&pmd_free_cnt_lock); } else free_pmd_page((unsigned long)pmd); } else { if(timing_on) { local_tstart=ktime_get(); tlb_remove_page(tlb, virt_to_page(pmd)); local_tend=ktime_get(); local_act_time=ktime_to_ns(ktime_sub(local_tend, local_tstart)); spin_lock(&pmd_free_cnt_lock); pmd_free_waste += local_act_time; pmd_free_cnt++; spin_unlock(&pmd_free_cnt_lock); } else tlb_remove_page(tlb, virt_to_page(pmd)); } }
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) { struct page *page = virt_to_page(pmd); paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); /* * NOTE! For PAE, any changes to the top page-directory-pointer-table * entries need a full cr3 reload to flush. */ #ifdef CONFIG_X86_PAE tlb->need_flush_all = 1; #endif pgtable_pmd_page_dtor(page); tlb_remove_page(tlb, page); }
static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long address, unsigned long size) { unsigned long offset; pte_t * ptep; int freed = 0; if (pmd_none(*pmd)) return 0; if (pmd_bad(*pmd)) { pmd_ERROR(*pmd); pmd_clear(pmd); return 0; } ptep = pte_offset(pmd, address); offset = address & ~PMD_MASK; if (offset + size > PMD_SIZE) size = PMD_SIZE - offset; size &= PAGE_MASK; for (offset=0; offset < size; ptep++, offset += PAGE_SIZE) { pte_t pte = *ptep; if (pte_none(pte)) continue; if (pte_present(pte)) { struct page *page = pte_page(pte); if (VALID_PAGE(page) && !PageReserved(page)) freed ++; /* This will eventually call __free_pte on the pte. */ tlb_remove_page(tlb, ptep, address + offset); } else { free_swap_and_cache(pte_to_swp_entry(pte)); pte_clear(ptep); } } return freed; }
void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) { pgtable_page_dtor(pte); paravirt_release_pte(page_to_pfn(pte)); tlb_remove_page(tlb, pte); }