void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) { struct pte_freelist_batch **batchp; batchp = &tlb->arch.batch; if (atomic_read(&tlb->mm->mm_users) < 2) { pgtable_free(pgf); return; } if (*batchp == NULL) { *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); if (*batchp == NULL) { pgtable_free_now(pgf); return; } (*batchp)->index = 0; } (*batchp)->tables[(*batchp)->index++] = pgf; if ((*batchp)->index == PTE_FREELIST_SIZE) { pte_free_submit(*batchp); *batchp = NULL; } }
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) { /* This is safe as we are holding page_table_lock */ cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id()); struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); if (atomic_read(&tlb->mm->mm_users) < 2 || cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { pgtable_free(pgf); return; } if (*batchp == NULL) { *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); if (*batchp == NULL) { pgtable_free_now(pgf); return; } (*batchp)->index = 0; } (*batchp)->tables[(*batchp)->index++] = pgf; if ((*batchp)->index == PTE_FREELIST_SIZE) { pte_free_submit(*batchp); *batchp = NULL; } }
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) { /* This is safe since tlb_gather_mmu has disabled preemption */ struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); unsigned long pgf; if (atomic_read(&tlb->mm->mm_users) < 2 || cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ pgtable_free(table, shift); return; } if (*batchp == NULL) { *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); if (*batchp == NULL) { pgtable_free_now(table, shift); return; } (*batchp)->index = 0; } BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); pgf = (unsigned long)table | shift; (*batchp)->tables[(*batchp)->index++] = pgf; if ((*batchp)->index == PTE_FREELIST_SIZE) { pte_free_submit(*batchp); *batchp = NULL; } }
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) { /* This is safe since tlb_gather_mmu has disabled preemption */ struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); if (atomic_read(&tlb->mm->mm_users) < 2 || cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ pgtable_free(pgf); return; } if (*batchp == NULL) { *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); if (*batchp == NULL) { pgtable_free_now(pgf); return; } (*batchp)->index = 0; } (*batchp)->tables[(*batchp)->index++] = pgf; if ((*batchp)->index == PTE_FREELIST_SIZE) { pte_free_submit(*batchp); *batchp = NULL; } }
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) { /* * This is safe since tlb_gather_mmu has disabled preemption. * tlb->cpu is set by tlb_gather_mmu as well. */ cpumask_t local_cpumask = cpumask_of_cpu(tlb->cpu); struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); if (atomic_read(&tlb->mm->mm_users) < 2 || cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) { pgtable_free(pgf); return; } if (*batchp == NULL) { *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); if (*batchp == NULL) { pgtable_free_now(pgf); return; } (*batchp)->index = 0; } (*batchp)->tables[(*batchp)->index++] = pgf; if ((*batchp)->index == PTE_FREELIST_SIZE) { pte_free_submit(*batchp); *batchp = NULL; } }
/* This is only called when we are critically out of memory * (and fail to get a page in pte_free_tlb). */ static void pgtable_free_now(pgtable_free_t pgf) { pte_freelist_forced_free++; smp_call_function(pte_free_smp_sync, NULL, 0, 1); pgtable_free(pgf); }
/* This is only called when we are critically out of memory * (and fail to get a page in pte_free_tlb). */ static void pgtable_free_now(void *table, unsigned shift) { pte_freelist_forced_free++; smp_call_function(pte_free_smp_sync, NULL, 1); pgtable_free(table, shift); }
static void pte_free_rcu_callback(struct rcu_head *head) { struct pte_freelist_batch *batch = container_of(head, struct pte_freelist_batch, rcu); unsigned int i; for (i = 0; i < batch->index; i++) pgtable_free(batch->tables[i]); free_page((unsigned long)batch); }
static void pte_free_rcu_callback(struct rcu_head *head) { struct pte_freelist_batch *batch = container_of(head, struct pte_freelist_batch, rcu); unsigned int i; for (i = 0; i < batch->index; i++) { void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE); unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE; pgtable_free(table, shift); } free_page((unsigned long)batch); }