void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) { unsigned int id, cpu = smp_processor_id(); unsigned long *map; /* No lockless fast path .. yet */ atomic_spin_lock(&context_lock); #ifndef DEBUG_STEAL_ONLY pr_devel("[%d] activating context for mm @%p, active=%d, id=%d\n", cpu, next, next->context.active, next->context.id); #endif #ifdef CONFIG_SMP /* Mark us active and the previous one not anymore */ next->context.active++; if (prev) { #ifndef DEBUG_STEAL_ONLY pr_devel(" old context %p active was: %d\n", prev, prev->context.active); #endif WARN_ON(prev->context.active < 1); prev->context.active--; } again: #endif /* CONFIG_SMP */ /* If we already have a valid assigned context, skip all that */ id = next->context.id; if (likely(id != MMU_NO_CONTEXT)) goto ctxt_ok; /* We really don't have a context, let's try to acquire one */ id = next_context; if (id > last_context) id = first_context; map = context_map; /* No more free contexts, let's try to steal one */ if (nr_free_contexts == 0) { #ifdef CONFIG_SMP if (num_online_cpus() > 1) { id = steal_context_smp(id); if (id == MMU_NO_CONTEXT) goto again; goto stolen; } #endif /* CONFIG_SMP */ id = steal_context_up(id); goto stolen; } nr_free_contexts--; /* We know there's at least one free context, try to find it */ while (__test_and_set_bit(id, map)) { id = find_next_zero_bit(map, last_context+1, id); if (id > last_context) id = first_context; } stolen: next_context = id + 1; context_mm[id] = next; next->context.id = id; #ifndef DEBUG_STEAL_ONLY pr_devel("[%d] picked up new id %d, nrf is now %d\n", cpu, id, nr_free_contexts); #endif context_check_map(); ctxt_ok: /* If that context got marked stale on this CPU, then flush the * local TLB for it and unmark it before we use it */ if (test_bit(id, stale_map[cpu])) { pr_devel("[%d] flushing stale context %d for mm @%p !\n", cpu, id, next); local_flush_tlb_mm(next); /* XXX This clear should ultimately be part of local_flush_tlb_mm */ __clear_bit(id, stale_map[cpu]); } /* Flick the MMU and release lock */ set_context(id, next->pgd); atomic_spin_unlock(&context_lock); }
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned int i, id, cpu = smp_processor_id(); unsigned long *map; /* No lockless fast path .. yet */ raw_spin_lock(&context_lock); pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", cpu, next, next->context.active, next->context.id); #ifdef CONFIG_SMP /* Mark us active and the previous one not anymore */ next->context.active++; if (prev) { pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); WARN_ON(prev->context.active < 1); prev->context.active--; } again: #endif /* CONFIG_SMP */ /* If we already have a valid assigned context, skip all that */ id = next->context.id; if (likely(id != MMU_NO_CONTEXT)) { #ifdef DEBUG_MAP_CONSISTENCY if (context_mm[id] != next) pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", next, id, id, context_mm[id]); #endif goto ctxt_ok; } /* We really don't have a context, let's try to acquire one */ id = next_context; if (id > last_context) id = first_context; map = context_map; /* No more free contexts, let's try to steal one */ if (nr_free_contexts == 0) { #ifdef CONFIG_SMP if (num_online_cpus() > 1) { id = steal_context_smp(id); if (id == MMU_NO_CONTEXT) goto again; goto stolen; } #endif /* CONFIG_SMP */ if (no_selective_tlbil) id = steal_all_contexts(); else id = steal_context_up(id); goto stolen; } nr_free_contexts--; /* We know there's at least one free context, try to find it */ while (__test_and_set_bit(id, map)) { id = find_next_zero_bit(map, last_context+1, id); if (id > last_context) id = first_context; } stolen: next_context = id + 1; context_mm[id] = next; next->context.id = id; pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts); context_check_map(); ctxt_ok: /* If that context got marked stale on this CPU, then flush the * local TLB for it and unmark it before we use it */ if (test_bit(id, stale_map[cpu])) { pr_hardcont(" | stale flush %d [%d..%d]", id, cpu_first_thread_sibling(cpu), cpu_last_thread_sibling(cpu)); local_flush_tlb_mm(next); /* XXX This clear should ultimately be part of local_flush_tlb_mm */ for (i = cpu_first_thread_sibling(cpu); i <= cpu_last_thread_sibling(cpu); i++) { if (stale_map[i]) __clear_bit(id, stale_map[i]); } } /* Flick the MMU and release lock */ pr_hardcont(" -> %d\n", id); set_context(id, next->pgd); raw_spin_unlock(&context_lock); }
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) { unsigned int i, id, cpu = smp_processor_id(); unsigned long *map; raw_spin_lock(&context_lock); pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", cpu, next, next->context.active, next->context.id); #ifdef CONFIG_SMP next->context.active++; if (prev) { pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); WARN_ON(prev->context.active < 1); prev->context.active--; } again: #endif id = next->context.id; if (likely(id != MMU_NO_CONTEXT)) { #ifdef DEBUG_MAP_CONSISTENCY if (context_mm[id] != next) pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", next, id, id, context_mm[id]); #endif goto ctxt_ok; } id = next_context; if (id > last_context) id = first_context; map = context_map; if (nr_free_contexts == 0) { #ifdef CONFIG_SMP if (num_online_cpus() > 1) { id = steal_context_smp(id); if (id == MMU_NO_CONTEXT) goto again; goto stolen; } #endif id = steal_context_up(id); goto stolen; } nr_free_contexts--; while (__test_and_set_bit(id, map)) { id = find_next_zero_bit(map, last_context+1, id); if (id > last_context) id = first_context; } stolen: next_context = id + 1; context_mm[id] = next; next->context.id = id; pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts); context_check_map(); ctxt_ok: if (test_bit(id, stale_map[cpu])) { pr_hardcont(" | stale flush %d [%d..%d]", id, cpu_first_thread_sibling(cpu), cpu_last_thread_sibling(cpu)); local_flush_tlb_mm(next); for (i = cpu_first_thread_sibling(cpu); i <= cpu_last_thread_sibling(cpu); i++) { __clear_bit(id, stale_map[i]); } } pr_hardcont(" -> %d\n", id); set_context(id, next->pgd); raw_spin_unlock(&context_lock); }