static unsigned int steal_all_contexts(void) { struct mm_struct *mm; int cpu = smp_processor_id(); unsigned int id; for (id = first_context; id <= last_context; id++) { /* Pick up the victim mm */ mm = context_mm[id]; pr_hardcont(" | steal %d from 0x%p", id, mm); /* Mark this mm as having no context anymore */ mm->context.id = MMU_NO_CONTEXT; if (id != first_context) { context_mm[id] = NULL; __clear_bit(id, context_map); #ifdef DEBUG_MAP_CONSISTENCY mm->context.active = 0; #endif } __clear_bit(id, stale_map[cpu]); } /* Flush the TLB for all contexts (not to be used on SMP) */ _tlbil_all(); nr_free_contexts = last_context - first_context; return first_context; }
static unsigned int steal_context_smp(unsigned int id) { struct mm_struct *mm; unsigned int cpu, max, i; max = last_context - first_context; /* Attempt to free next_context first and then loop until we manage */ while (max--) { /* Pick up the victim mm */ mm = context_mm[id]; /* We have a candidate victim, check if it's active, on SMP * we cannot steal active contexts */ if (mm->context.active) { id++; if (id > last_context) id = first_context; continue; } pr_hardcont(" | steal %d from 0x%p", id, mm); /* Mark this mm has having no context anymore */ mm->context.id = MMU_NO_CONTEXT; /* Mark it stale on all CPUs that used this mm. For threaded * implementations, we set it on all threads on each core * represented in the mask. A future implementation will use * a core map instead but this will do for now. */ for_each_cpu(cpu, mm_cpumask(mm)) { for (i = cpu_first_thread_sibling(cpu); i <= cpu_last_thread_sibling(cpu); i++) { if (stale_map[i]) __set_bit(id, stale_map[i]); } cpu = i - 1; } return id; } /* This will happen if you have more CPUs than available contexts, * all we can do here is wait a bit and try again */ raw_spin_unlock(&context_lock); cpu_relax(); raw_spin_lock(&context_lock); /* This will cause the caller to try again */ return MMU_NO_CONTEXT; }
/* Note that this will also be called on SMP if all other CPUs are * offlined, which means that it may be called for cpu != 0. For * this to work, we somewhat assume that CPUs that are onlined * come up with a fully clean TLB (or are cleaned when offlined) */ static unsigned int steal_context_up(unsigned int id) { struct mm_struct *mm; int cpu = smp_processor_id(); /* Pick up the victim mm */ mm = context_mm[id]; pr_hardcont(" | steal %d from 0x%p", id, mm); /* Flush the TLB for that context */ local_flush_tlb_mm(mm); /* Mark this mm has having no context anymore */ mm->context.id = MMU_NO_CONTEXT; /* XXX This clear should ultimately be part of local_flush_tlb_mm */ __clear_bit(id, stale_map[cpu]); return id; }
static unsigned int steal_context_up(unsigned int id) { struct mm_struct *mm; int cpu = smp_processor_id(); mm = context_mm[id]; pr_hardcont(" | steal %d from 0x%p", id, mm); local_flush_tlb_mm(mm); mm->context.id = MMU_NO_CONTEXT; __clear_bit(id, stale_map[cpu]); return id; }
static unsigned int steal_context_smp(unsigned int id) { struct mm_struct *mm; unsigned int cpu, max, i; max = last_context - first_context; while (max--) { mm = context_mm[id]; if (mm->context.active) { id++; if (id > last_context) id = first_context; continue; } pr_hardcont(" | steal %d from 0x%p", id, mm); mm->context.id = MMU_NO_CONTEXT; for_each_cpu(cpu, mm_cpumask(mm)) { for (i = cpu_first_thread_sibling(cpu); i <= cpu_last_thread_sibling(cpu); i++) __set_bit(id, stale_map[i]); cpu = i - 1; } return id; } raw_spin_unlock(&context_lock); cpu_relax(); raw_spin_lock(&context_lock); return MMU_NO_CONTEXT; }
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned int i, id, cpu = smp_processor_id(); unsigned long *map; /* No lockless fast path .. yet */ raw_spin_lock(&context_lock); pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", cpu, next, next->context.active, next->context.id); #ifdef CONFIG_SMP /* Mark us active and the previous one not anymore */ next->context.active++; if (prev) { pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); WARN_ON(prev->context.active < 1); prev->context.active--; } again: #endif /* CONFIG_SMP */ /* If we already have a valid assigned context, skip all that */ id = next->context.id; if (likely(id != MMU_NO_CONTEXT)) { #ifdef DEBUG_MAP_CONSISTENCY if (context_mm[id] != next) pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", next, id, id, context_mm[id]); #endif goto ctxt_ok; } /* We really don't have a context, let's try to acquire one */ id = next_context; if (id > last_context) id = first_context; map = context_map; /* No more free contexts, let's try to steal one */ if (nr_free_contexts == 0) { #ifdef CONFIG_SMP if (num_online_cpus() > 1) { id = steal_context_smp(id); if (id == MMU_NO_CONTEXT) goto again; goto stolen; } #endif /* CONFIG_SMP */ if (no_selective_tlbil) id = steal_all_contexts(); else id = steal_context_up(id); goto stolen; } nr_free_contexts--; /* We know there's at least one free context, try to find it */ while (__test_and_set_bit(id, map)) { id = find_next_zero_bit(map, last_context+1, id); if (id > last_context) id = first_context; } stolen: next_context = id + 1; context_mm[id] = next; next->context.id = id; pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts); context_check_map(); ctxt_ok: /* If that context got marked stale on this CPU, then flush the * local TLB for it and unmark it before we use it */ if (test_bit(id, stale_map[cpu])) { pr_hardcont(" | stale flush %d [%d..%d]", id, cpu_first_thread_sibling(cpu), cpu_last_thread_sibling(cpu)); local_flush_tlb_mm(next); /* XXX This clear should ultimately be part of local_flush_tlb_mm */ for (i = cpu_first_thread_sibling(cpu); i <= cpu_last_thread_sibling(cpu); i++) { if (stale_map[i]) __clear_bit(id, stale_map[i]); } } /* Flick the MMU and release lock */ pr_hardcont(" -> %d\n", id); set_context(id, next->pgd); raw_spin_unlock(&context_lock); }
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) { unsigned int i, id, cpu = smp_processor_id(); unsigned long *map; raw_spin_lock(&context_lock); pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", cpu, next, next->context.active, next->context.id); #ifdef CONFIG_SMP next->context.active++; if (prev) { pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); WARN_ON(prev->context.active < 1); prev->context.active--; } again: #endif id = next->context.id; if (likely(id != MMU_NO_CONTEXT)) { #ifdef DEBUG_MAP_CONSISTENCY if (context_mm[id] != next) pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", next, id, id, context_mm[id]); #endif goto ctxt_ok; } id = next_context; if (id > last_context) id = first_context; map = context_map; if (nr_free_contexts == 0) { #ifdef CONFIG_SMP if (num_online_cpus() > 1) { id = steal_context_smp(id); if (id == MMU_NO_CONTEXT) goto again; goto stolen; } #endif id = steal_context_up(id); goto stolen; } nr_free_contexts--; while (__test_and_set_bit(id, map)) { id = find_next_zero_bit(map, last_context+1, id); if (id > last_context) id = first_context; } stolen: next_context = id + 1; context_mm[id] = next; next->context.id = id; pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts); context_check_map(); ctxt_ok: if (test_bit(id, stale_map[cpu])) { pr_hardcont(" | stale flush %d [%d..%d]", id, cpu_first_thread_sibling(cpu), cpu_last_thread_sibling(cpu)); local_flush_tlb_mm(next); for (i = cpu_first_thread_sibling(cpu); i <= cpu_last_thread_sibling(cpu); i++) { __clear_bit(id, stale_map[i]); } } pr_hardcont(" -> %d\n", id); set_context(id, next->pgd); raw_spin_unlock(&context_lock); }