void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); struct mm_struct *mm = tb->mm; if (!tb->tlb_nr) goto out; flush_tsb_user(tb); if (CTX_VALID(mm->context)) { if (tb->tlb_nr == 1) { global_flush_tlb_page(mm, tb->vaddrs[0]); } else { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); #else __flush_tlb_pending(CTX_HWBITS(tb->mm->context), tb->tlb_nr, &tb->vaddrs[0]); #endif } } tb->tlb_nr = 0; out: put_cpu_var(tlb_batch); }
void flush_tlb_pending(void) { struct mmu_gather *mp = &get_cpu_var(mmu_gathers); if (mp->tlb_nr) { flush_tsb_user(mp); if (CTX_VALID(mp->mm->context)) { #ifdef CONFIG_SMP smp_flush_tlb_pending(mp->mm, mp->tlb_nr, &mp->vaddrs[0]); #else __flush_tlb_pending(CTX_HWBITS(mp->mm->context), mp->tlb_nr, &mp->vaddrs[0]); #endif } mp->tlb_nr = 0; } put_cpu_var(mmu_gathers); }
void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); if (tb->tlb_nr) { flush_tsb_user(tb); if (CTX_VALID(tb->mm->context)) { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); #else __flush_tlb_pending(CTX_HWBITS(tb->mm->context), tb->tlb_nr, &tb->vaddrs[0]); #endif } tb->tlb_nr = 0; } put_cpu_var(tlb_batch); }
/* Pretty sick eh? */ int prom_callback(long *args) { struct console *cons, *saved_console = NULL; unsigned long flags; char *cmd; extern spinlock_t prom_entry_lock; if (!args) return -1; if (!(cmd = (char *)args[0])) return -1; /* * The callback can be invoked on the cpu that first dropped * into prom_cmdline after taking the serial interrupt, or on * a slave processor that was smp_captured() if the * administrator has done a switch-cpu inside obp. In either * case, the cpu is marked as in-interrupt. Drop IRQ locks. */ irq_exit(); /* XXX Revisit the locking here someday. This is a debugging * XXX feature so it isnt all that critical. -DaveM */ local_irq_save(flags); spin_unlock(&prom_entry_lock); cons = console_drivers; while (cons) { unregister_console(cons); cons->flags &= ~(CON_PRINTBUFFER); cons->next = saved_console; saved_console = cons; cons = console_drivers; } register_console(&prom_console); if (!strcmp(cmd, "sync")) { prom_printf("PROM `%s' command...\n", cmd); show_free_areas(); if (current->pid != 0) { local_irq_enable(); sys_sync(); local_irq_disable(); } args[2] = 0; args[args[1] + 3] = -1; prom_printf("Returning to PROM\n"); } else if (!strcmp(cmd, "va>tte-data")) { unsigned long ctx, va; unsigned long tte = 0; long res = PROM_FALSE; ctx = args[3]; va = args[4]; if (ctx) { /* * Find process owning ctx, lookup mapping. */ struct task_struct *p; struct mm_struct *mm = NULL; pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; for_each_process(p) { mm = p->mm; if (CTX_HWBITS(mm->context) == ctx) break; } if (!mm || CTX_HWBITS(mm->context) != ctx) goto done; pgdp = pgd_offset(mm, va); if (pgd_none(*pgdp)) goto done; pmdp = pmd_offset(pgdp, va); if (pmd_none(*pmdp)) goto done; /* Preemption implicitly disabled by virtue of * being called from inside OBP. */ ptep = pte_offset_map(pmdp, va); if (pte_present(*ptep)) { tte = pte_val(*ptep); res = PROM_TRUE; } pte_unmap(ptep); goto done; } if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) { /* Spitfire Errata #32 workaround */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); /* * Locked down tlb entry. */ if (tlb_type == spitfire) tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT); else if (tlb_type == cheetah || tlb_type == cheetah_plus) tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT); res = PROM_TRUE; goto done; }
/* Pretty sick eh? */ int prom_callback(long *args) { struct console *cons, *saved_console = NULL; unsigned long flags; char *cmd; if (!args) return -1; if (!(cmd = (char *)args[0])) return -1; save_and_cli(flags); cons = console_drivers; while (cons) { unregister_console(cons); cons->flags &= ~(CON_PRINTBUFFER); cons->next = saved_console; saved_console = cons; cons = console_drivers; } register_console(&prom_console); if (!strcmp(cmd, "sync")) { prom_printf("PROM `%s' command...\n", cmd); show_free_areas(); if(current->pid != 0) { sti(); sys_sync(); cli(); } args[2] = 0; args[args[1] + 3] = -1; prom_printf("Returning to PROM\n"); } else if (!strcmp(cmd, "va>tte-data")) { unsigned long ctx, va; unsigned long tte = 0; long res = PROM_FALSE; ctx = args[3]; va = args[4]; if (ctx) { /* * Find process owning ctx, lookup mapping. */ struct task_struct *p; struct mm_struct *mm = NULL; pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; for_each_task(p) { mm = p->mm; if (CTX_HWBITS(mm->context) == ctx) break; } if (!mm || CTX_HWBITS(mm->context) != ctx) goto done; pgdp = pgd_offset(mm, va); if (pgd_none(*pgdp)) goto done; pmdp = pmd_offset(pgdp, va); if (pmd_none(*pmdp)) goto done; ptep = pte_offset(pmdp, va); if (!pte_present(*ptep)) goto done; tte = pte_val(*ptep); res = PROM_TRUE; goto done; } if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) { /* Spitfire Errata #32 workaround */ __asm__ __volatile__("stxa %0, [%1] %2\n\t" "flush %%g6" : /* No outputs */ : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); /* * Locked down tlb entry 63. */ tte = spitfire_get_dtlb_data(63); res = PROM_TRUE; goto done; }