int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	int index;
	int err;

again:
	if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
		return -ENOMEM;

	spin_lock(&mmu_context_lock);
	err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
	spin_unlock(&mmu_context_lock);

	if (err == -EAGAIN)
		goto again;
	else if (err)
		return err;

	if (index > MAX_CONTEXT) {
		spin_lock(&mmu_context_lock);
		idr_remove(&mmu_context_idr, index);
		spin_unlock(&mmu_context_lock);
		return -ENOMEM;
	}

	/* The old code would re-promote on fork, we don't do that
	 * when using slices as it could cause problem promoting slices
	 * that have been forced down to 4K
	 */
	if (slice_mm_new_context(mm))
		slice_set_user_psize(mm, mmu_virtual_psize);
	mm->context.id = index;

	return 0;
}
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	int index;

	index = __init_new_context();
	if (index < 0)
		return index;

	if (slice_mm_new_context(mm))
		slice_set_user_psize(mm, mmu_virtual_psize);
	subpage_prot_init_new_context(mm);
	mm->context.id = index;
#ifdef CONFIG_PPC_ICSWX
	mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
	if (!mm->context.cop_lockp) {
		__destroy_context(index);
		subpage_prot_free(mm);
		mm->context.id = MMU_NO_CONTEXT;
		return -ENOMEM;
	}
	spin_lock_init(mm->context.cop_lockp);
#endif 

	return 0;
}
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	int index;

	index = __init_new_context();
	if (index < 0)
		return index;

	/* The old code would re-promote on fork, we don't do that
	 * when using slices as it could cause problem promoting slices
	 * that have been forced down to 4K
	 */
	if (slice_mm_new_context(mm))
		slice_set_user_psize(mm, mmu_virtual_psize);
	subpage_prot_init_new_context(mm);
	mm->context.id = index;
#ifdef CONFIG_PPC_ICSWX
	mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
	if (!mm->context.cop_lockp) {
		__destroy_context(index);
		subpage_prot_free(mm);
		mm->context.id = MMU_NO_CONTEXT;
		return -ENOMEM;
	}
	spin_lock_init(mm->context.cop_lockp);
#endif /* CONFIG_PPC_ICSWX */

	return 0;
}
Beispiel #4
0
/*
 * Set up the context for a new address space.
 */
int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
	pr_hard("initing context for mm @%p\n", mm);

	mm->context.id = MMU_NO_CONTEXT;
	mm->context.active = 0;

#ifdef CONFIG_PPC_MM_SLICES
	slice_set_user_psize(mm, mmu_virtual_psize);
#endif

	return 0;
}
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
    int index;

    index = __init_new_context();
    if (index < 0)
        return index;

    if (radix_enabled()) {
        radix__init_new_context(mm, index);
    } else {

        /* The old code would re-promote on fork, we don't do that
         * when using slices as it could cause problem promoting slices
         * that have been forced down to 4K
         *
         * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
         * explicitly against context.id == 0. This ensures that we
         * properly initialize context slice details for newly allocated
         * mm's (which will have id == 0) and don't alter context slice
         * inherited via fork (which will have id != 0).
         *
         * We should not be calling init_new_context() on init_mm. Hence a
         * check against 0 is ok.
         */
        if (mm->context.id == 0)
            slice_set_user_psize(mm, mmu_virtual_psize);
        subpage_prot_init_new_context(mm);
    }
    mm->context.id = index;
#ifdef CONFIG_PPC_ICSWX
    mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
    if (!mm->context.cop_lockp) {
        __destroy_context(index);
        subpage_prot_free(mm);
        mm->context.id = MMU_NO_CONTEXT;
        return -ENOMEM;
    }
    spin_lock_init(mm->context.cop_lockp);
#endif /* CONFIG_PPC_ICSWX */

#ifdef CONFIG_PPC_64K_PAGES
    mm->context.pte_frag = NULL;
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
    mm_iommu_init(&mm->context);
#endif
    return 0;
}
Beispiel #6
0
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	int index;
	int err;
	int new_context = (mm->context.id == 0);

again:
	if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
		return -ENOMEM;

	spin_lock(&mmu_context_lock);
	err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
	spin_unlock(&mmu_context_lock);

	if (err == -EAGAIN)
		goto again;
	else if (err)
		return err;

	if (index > MAX_CONTEXT) {
		spin_lock(&mmu_context_lock);
		idr_remove(&mmu_context_idr, index);
		spin_unlock(&mmu_context_lock);
		return -ENOMEM;
	}

	mm->context.id = index;
#ifdef CONFIG_PPC_MM_SLICES
	/* The old code would re-promote on fork, we don't do that
	 * when using slices as it could cause problem promoting slices
	 * that have been forced down to 4K
	 */
	if (new_context)
		slice_set_user_psize(mm, mmu_virtual_psize);
#else
	mm->context.user_psize = mmu_virtual_psize;
	mm->context.sllp = SLB_VSID_USER |
		mmu_psize_defs[mmu_virtual_psize].sllp;
#endif

	return 0;
}