void __init allocate_pacas(void) { u64 limit; int cpu; #ifdef CONFIG_PPC_BOOK3S_64 /* * We access pacas in real mode, and cannot take SLB faults * on them when in virtual mode, so allocate them accordingly. */ limit = min(ppc64_bolted_size(), ppc64_rma_size); #else limit = ppc64_rma_size; #endif paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); memset(paca, 0, paca_size); printk(KERN_DEBUG "Allocated %u bytes for %u pacas at %p\n", paca_size, nr_cpu_ids, paca); allocate_lppacas(nr_cpu_ids, limit); allocate_slb_shadows(nr_cpu_ids, limit); /* Can't use for_each_*_cpu, as they aren't functional yet */ for (cpu = 0; cpu < nr_cpu_ids; cpu++) initialise_paca(&paca[cpu], cpu); }
void __init irqstack_early_init(void) { u64 limit = ppc64_bolted_size(); unsigned int i; /* * Interrupt stacks must be in the first segment since we * cannot afford to take SLB misses on them. They are not * accessed in realmode. */ for_each_possible_cpu(i) { softirq_ctx[i] = alloc_stack(limit, i); hardirq_ctx[i] = alloc_stack(limit, i); } }
/* * Stack space used when we detect a bad kernel stack pointer, and * early in SMP boots before relocation is enabled. Exclusive emergency * stack for machine checks. */ void __init emergency_stack_init(void) { u64 limit; unsigned int i; /* * Emergency stacks must be under 256MB, we cannot afford to take * SLB misses on them. The ABI also requires them to be 128-byte * aligned. * * Since we use these as temporary stacks during secondary CPU * bringup, machine check, system reset, and HMI, we need to get * at them in real mode. This means they must also be within the RMO * region. * * The IRQ stacks allocated elsewhere in this file are zeroed and * initialized in kernel/irq.c. These are initialized here in order * to have emergency stacks available as early as possible. */ limit = min(ppc64_bolted_size(), ppc64_rma_size); for_each_possible_cpu(i) { struct thread_info *ti; ti = alloc_stack(limit, i); memset(ti, 0, THREAD_SIZE); emerg_stack_init_thread_info(ti, i); paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE; #ifdef CONFIG_PPC_BOOK3S_64 /* emergency stack for NMI exception handling. */ ti = alloc_stack(limit, i); memset(ti, 0, THREAD_SIZE); emerg_stack_init_thread_info(ti, i); paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE; /* emergency stack for machine check exception handling. */ ti = alloc_stack(limit, i); memset(ti, 0, THREAD_SIZE); emerg_stack_init_thread_info(ti, i); paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE; #endif } }
static void __ref init_fallback_flush(void) { u64 l1d_size, limit; int cpu; /* Only allocate the fallback flush area once (at boot time). */ if (l1d_flush_fallback_area) return; l1d_size = ppc64_caches.l1d.size; /* * If there is no d-cache-size property in the device tree, l1d_size * could be zero. That leads to the loop in the asm wrapping around to * 2^64-1, and then walking off the end of the fallback area and * eventually causing a page fault which is fatal. Just default to * something vaguely sane. */ if (!l1d_size) l1d_size = (64 * 1024); limit = min(ppc64_bolted_size(), ppc64_rma_size); /* * Align to L1d size, and size it at 2x L1d size, to catch possible * hardware prefetch runoff. We don't have a recipe for load patterns to * reliably avoid the prefetcher. */ l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); memset(l1d_flush_fallback_area, 0, l1d_size * 2); for_each_possible_cpu(cpu) { struct paca_struct *paca = paca_ptrs[cpu]; paca->rfi_flush_fallback_area = l1d_flush_fallback_area; paca->l1d_flush_size = l1d_size; } }