Esempio n. 1
0
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
{
	struct pte_freelist_batch **batchp;

	batchp = &tlb->arch.batch;

	if (atomic_read(&tlb->mm->mm_users) < 2) {
		pgtable_free(pgf);
		return;
	}

	if (*batchp == NULL) {
		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
		if (*batchp == NULL) {
			pgtable_free_now(pgf);
			return;
		}
		(*batchp)->index = 0;
	}
	(*batchp)->tables[(*batchp)->index++] = pgf;
	if ((*batchp)->index == PTE_FREELIST_SIZE) {
		pte_free_submit(*batchp);
		*batchp = NULL;
	}
}
Esempio n. 2
0
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
{
	/* This is safe as we are holding page_table_lock */
        cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

	if (atomic_read(&tlb->mm->mm_users) < 2 ||
	    cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
		pgtable_free(pgf);
		return;
	}

	if (*batchp == NULL) {
		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
		if (*batchp == NULL) {
			pgtable_free_now(pgf);
			return;
		}
		(*batchp)->index = 0;
	}
	(*batchp)->tables[(*batchp)->index++] = pgf;
	if ((*batchp)->index == PTE_FREELIST_SIZE) {
		pte_free_submit(*batchp);
		*batchp = NULL;
	}
}
Esempio n. 3
0
void pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte)
{
	cpumask_t local_cpumask = cpumask_of_cpu(tlb->cpu);
	struct pte_freelist_batch **batchp = &per_cpu(pte_freelist_cur, tlb->cpu);

	if (atomic_read(&tlb->mm->mm_users) < 2 ||
	    cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
		pte_free(pte);
		return;
	}

	if (*batchp == NULL) {
		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
		if (*batchp == NULL) {
			pgtable_free_now(pte);
			return;
		}
		(*batchp)->index = 0;
	}
	(*batchp)->tables[(*batchp)->index++] = pte;
	if ((*batchp)->index == PTE_FREELIST_SIZE) {
		pte_free_submit(*batchp);
		*batchp = NULL;
	}
}
Esempio n. 4
0
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
{
	/* This is safe since tlb_gather_mmu has disabled preemption */
	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
	unsigned long pgf;

	if (atomic_read(&tlb->mm->mm_users) < 2 ||
	    cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
		pgtable_free(table, shift);
		return;
	}

	if (*batchp == NULL) {
		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
		if (*batchp == NULL) {
			pgtable_free_now(table, shift);
			return;
		}
		(*batchp)->index = 0;
	}
	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
	pgf = (unsigned long)table | shift;
	(*batchp)->tables[(*batchp)->index++] = pgf;
	if ((*batchp)->index == PTE_FREELIST_SIZE) {
		pte_free_submit(*batchp);
		*batchp = NULL;
	}
}
Esempio n. 5
0
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
{
	/* This is safe since tlb_gather_mmu has disabled preemption */
	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

	if (atomic_read(&tlb->mm->mm_users) < 2 ||
	    cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){
		pgtable_free(pgf);
		return;
	}

	if (*batchp == NULL) {
		*batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
		if (*batchp == NULL) {
			pgtable_free_now(pgf);
			return;
		}
		(*batchp)->index = 0;
	}
	(*batchp)->tables[(*batchp)->index++] = pgf;
	if ((*batchp)->index == PTE_FREELIST_SIZE) {
		pte_free_submit(*batchp);
		*batchp = NULL;
	}
}
Esempio n. 6
0
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
{
    /*
     * This is safe since tlb_gather_mmu has disabled preemption.
     * tlb->cpu is set by tlb_gather_mmu as well.
     */
    cpumask_t local_cpumask = cpumask_of_cpu(tlb->cpu);
    struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

    if (atomic_read(&tlb->mm->mm_users) < 2 ||
            cpus_equal(tlb->mm->cpu_vm_mask, local_cpumask)) {
        pgtable_free(pgf);
        return;
    }

    if (*batchp == NULL) {
        *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC);
        if (*batchp == NULL) {
            pgtable_free_now(pgf);
            return;
        }
        (*batchp)->index = 0;
    }
    (*batchp)->tables[(*batchp)->index++] = pgf;
    if ((*batchp)->index == PTE_FREELIST_SIZE) {
        pte_free_submit(*batchp);
        *batchp = NULL;
    }
}
Esempio n. 7
0
void pte_free_finish(void)
{
	/* This is safe as we are holding page_table_lock */
	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

	if (*batchp == NULL)
		return;
	pte_free_submit(*batchp);
	*batchp = NULL;
}
Esempio n. 8
0
void pte_free_finish(void)
{
	/* This is safe since tlb_gather_mmu has disabled preemption */
	struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);

	if (*batchp == NULL)
		return;
	pte_free_submit(*batchp);
	*batchp = NULL;
}
Esempio n. 9
0
void pte_free_finish(struct mmu_gather *tlb)
{
	struct pte_freelist_batch **batchp;

	batchp = &tlb->arch.batch;

	if (*batchp) {
		pte_free_submit(*batchp);
		*batchp = NULL;
	}
}