int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int index; index = __init_new_context(); if (index < 0) return index; if (slice_mm_new_context(mm)) slice_set_user_psize(mm, mmu_virtual_psize); subpage_prot_init_new_context(mm); mm->context.id = index; #ifdef CONFIG_PPC_ICSWX mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); if (!mm->context.cop_lockp) { __destroy_context(index); subpage_prot_free(mm); mm->context.id = MMU_NO_CONTEXT; return -ENOMEM; } spin_lock_init(mm->context.cop_lockp); #endif return 0; }
int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int index; index = __init_new_context(); if (index < 0) return index; /* The old code would re-promote on fork, we don't do that * when using slices as it could cause problem promoting slices * that have been forced down to 4K */ if (slice_mm_new_context(mm)) slice_set_user_psize(mm, mmu_virtual_psize); subpage_prot_init_new_context(mm); mm->context.id = index; #ifdef CONFIG_PPC_ICSWX mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); if (!mm->context.cop_lockp) { __destroy_context(index); subpage_prot_free(mm); mm->context.id = MMU_NO_CONTEXT; return -ENOMEM; } spin_lock_init(mm->context.cop_lockp); #endif /* CONFIG_PPC_ICSWX */ return 0; }
void destroy_context(struct mm_struct *mm) { #ifdef CONFIG_PPC_ICSWX drop_cop(mm->context.acop, mm); kfree(mm->context.cop_lockp); mm->context.cop_lockp = NULL; #endif __destroy_context(mm->context.id); subpage_prot_free(mm); mm->context.id = MMU_NO_CONTEXT; }
void destroy_context(struct mm_struct *mm) { #ifdef CONFIG_SPAPR_TCE_IOMMU WARN_ON_ONCE(!list_empty(&mm->context.iommu_group_mem_list)); #endif if (radix_enabled()) WARN_ON(process_tb[mm->context.id].prtb0 != 0); else subpage_prot_free(mm); destroy_contexts(&mm->context); mm->context.id = MMU_NO_CONTEXT; }
int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int index; index = __init_new_context(); if (index < 0) return index; if (radix_enabled()) { radix__init_new_context(mm, index); } else { /* The old code would re-promote on fork, we don't do that * when using slices as it could cause problem promoting slices * that have been forced down to 4K * * For book3s we have MMU_NO_CONTEXT set to be ~0. Hence check * explicitly against context.id == 0. This ensures that we * properly initialize context slice details for newly allocated * mm's (which will have id == 0) and don't alter context slice * inherited via fork (which will have id != 0). * * We should not be calling init_new_context() on init_mm. Hence a * check against 0 is ok. */ if (mm->context.id == 0) slice_set_user_psize(mm, mmu_virtual_psize); subpage_prot_init_new_context(mm); } mm->context.id = index; #ifdef CONFIG_PPC_ICSWX mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); if (!mm->context.cop_lockp) { __destroy_context(index); subpage_prot_free(mm); mm->context.id = MMU_NO_CONTEXT; return -ENOMEM; } spin_lock_init(mm->context.cop_lockp); #endif /* CONFIG_PPC_ICSWX */ #ifdef CONFIG_PPC_64K_PAGES mm->context.pte_frag = NULL; #endif #ifdef CONFIG_SPAPR_TCE_IOMMU mm_iommu_init(&mm->context); #endif return 0; }
void destroy_context(struct mm_struct *mm) { #ifdef CONFIG_SPAPR_TCE_IOMMU mm_iommu_cleanup(&mm->context); #endif #ifdef CONFIG_PPC_ICSWX drop_cop(mm->context.acop, mm); kfree(mm->context.cop_lockp); mm->context.cop_lockp = NULL; #endif /* CONFIG_PPC_ICSWX */ destroy_pagetable_page(mm); __destroy_context(mm->context.id); subpage_prot_free(mm); mm->context.id = MMU_NO_CONTEXT; }
void destroy_context(struct mm_struct *mm) { #ifdef CONFIG_SPAPR_TCE_IOMMU mm_iommu_cleanup(&mm->context); #endif #ifdef CONFIG_PPC_ICSWX drop_cop(mm->context.acop, mm); kfree(mm->context.cop_lockp); mm->context.cop_lockp = NULL; #endif /* CONFIG_PPC_ICSWX */ if (radix_enabled()) process_tb[mm->context.id].prtb1 = 0; else subpage_prot_free(mm); destroy_pagetable_page(mm); __destroy_context(mm->context.id); mm->context.id = MMU_NO_CONTEXT; }