示例#1
0
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
	int index;

	if (radix_enabled())
		index = radix__init_new_context(mm);
	else
		index = hash__init_new_context(mm);

	if (index < 0)
		return index;

	mm->context.id = index;

#ifdef CONFIG_PPC_64K_PAGES
	mm->context.pte_frag = NULL;
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
	mm_iommu_init(mm);
#endif
	atomic_set(&mm->context.active_cpus, 0);
	atomic_set(&mm->context.copros, 0);

	return 0;
}
示例#2
0
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
    int index;

    index = __init_new_context();
    if (index < 0)
        return index;

    if (radix_enabled()) {
        radix__init_new_context(mm, index);
    } else {

        /* The old code would re-promote on fork, we don't do that
         * when using slices as it could cause problem promoting slices
         * that have been forced down to 4K
         *
         * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check
         * explicitly against context.id == 0. This ensures that we
         * properly initialize context slice details for newly allocated
         * mm's (which will have id == 0) and don't alter context slice
         * inherited via fork (which will have id != 0).
         *
         * We should not be calling init_new_context() on init_mm. Hence a
         * check against 0 is ok.
         */
        if (mm->context.id == 0)
            slice_set_user_psize(mm, mmu_virtual_psize);
        subpage_prot_init_new_context(mm);
    }
    mm->context.id = index;
#ifdef CONFIG_PPC_ICSWX
    mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
    if (!mm->context.cop_lockp) {
        __destroy_context(index);
        subpage_prot_free(mm);
        mm->context.id = MMU_NO_CONTEXT;
        return -ENOMEM;
    }
    spin_lock_init(mm->context.cop_lockp);
#endif /* CONFIG_PPC_ICSWX */

#ifdef CONFIG_PPC_64K_PAGES
    mm->context.pte_frag = NULL;
#endif
#ifdef CONFIG_SPAPR_TCE_IOMMU
    mm_iommu_init(&mm->context);
#endif
    return 0;
}