void __init allocate_pacas(void) { int cpu, limit; /* * We can't take SLB misses on the paca, and we want to access them * in real mode, so allocate them within the RMA and also within * the first segment. */ limit = min(0x10000000ULL, ppc64_rma_size); paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); memset(paca, 0, paca_size); printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", paca_size, nr_cpu_ids, paca); allocate_lppacas(nr_cpu_ids, limit); /* Can't use for_each_*_cpu, as they aren't functional yet */ for (cpu = 0; cpu < nr_cpu_ids; cpu++) initialise_paca(&paca[cpu], cpu); }
void * __init iSeries_early_setup(void) { unsigned long phys_mem_size; /* Identify CPU type. This is done again by the common code later * on but calling this function multiple times is fine. */ identify_cpu(0, mfspr(SPRN_PVR)); initialise_paca(&boot_paca, 0); powerpc_firmware_features |= FW_FEATURE_ISERIES; powerpc_firmware_features |= FW_FEATURE_LPAR; #ifdef CONFIG_SMP /* On iSeries we know we can never have more than 64 cpus */ nr_cpu_ids = max(nr_cpu_ids, 64); #endif iSeries_fixup_klimit(); /* * Initialize the table which translate Linux physical addresses to * AS/400 absolute addresses */ phys_mem_size = build_iSeries_Memory_Map(); iSeries_get_cmdline(); return (void *) __pa(build_flat_dt(phys_mem_size)); }
void __init allocate_pacas(void) { int cpu, limit; /* */ limit = min(0x10000000ULL, ppc64_rma_size); paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); memset(paca, 0, paca_size); printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", paca_size, nr_cpu_ids, paca); allocate_lppacas(nr_cpu_ids, limit); /* */ for (cpu = 0; cpu < nr_cpu_ids; cpu++) initialise_paca(&paca[cpu], cpu); }
void __init allocate_pacas(void) { int nr_cpus, cpu, limit; /* * We can't take SLB misses on the paca, and we want to access them * in real mode, so allocate them within the RMA and also within * the first segment. On iSeries they must be within the area mapped * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. */ limit = min(0x10000000ULL, lmb.rmo_size); if (firmware_has_feature(FW_FEATURE_ISERIES)) limit = min(limit, HvPagesToMap * HVPAGESIZE); nr_cpus = NR_CPUS; /* On iSeries we know we can never have more than 64 cpus */ if (firmware_has_feature(FW_FEATURE_ISERIES)) nr_cpus = min(64, nr_cpus); paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus); paca = __va(lmb_alloc_base(paca_size, PAGE_SIZE, limit)); memset(paca, 0, paca_size); printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", paca_size, nr_cpus, paca); /* Can't use for_each_*_cpu, as they aren't functional yet */ for (cpu = 0; cpu < nr_cpus; cpu++) initialise_paca(&paca[cpu], cpu); }
void __init allocate_pacas(void) { u64 limit; int cpu; #ifdef CONFIG_PPC_BOOK3S_64 /* * We access pacas in real mode, and cannot take SLB faults * on them when in virtual mode, so allocate them accordingly. */ limit = min(ppc64_bolted_size(), ppc64_rma_size); #else limit = ppc64_rma_size; #endif paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); memset(paca, 0, paca_size); printk(KERN_DEBUG "Allocated %u bytes for %u pacas at %p\n", paca_size, nr_cpu_ids, paca); allocate_lppacas(nr_cpu_ids, limit); allocate_slb_shadows(nr_cpu_ids, limit); /* Can't use for_each_*_cpu, as they aren't functional yet */ for (cpu = 0; cpu < nr_cpu_ids; cpu++) initialise_paca(&paca[cpu], cpu); }
void __init early_setup(unsigned long dt_ptr) { /* -------- printk is _NOT_ safe to use here ! ------- */ /* Identify CPU type */ identify_cpu(0, mfspr(SPRN_PVR)); /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ initialise_paca(&boot_paca, 0); setup_paca(&boot_paca); /* Initialize lockdep early or else spinlocks will blow */ lockdep_init(); /* -------- printk is now safe to use ------- */ /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); /* * Do early initialization using the flattened device * tree, such as retrieving the physical memory map or * calculating/retrieving the hash table size. */ early_init_devtree(__va(dt_ptr)); /* Now we know the logical id of our boot cpu, setup the paca. */ setup_paca(&paca[boot_cpuid]); /* Fix up paca fields required for the boot cpu */ get_paca()->cpu_start = 1; /* Probe the machine type */ probe_machine(); setup_kdump_trampoline(); DBG("Found, Initializing memory management...\n"); /* Initialize the hash table or TLB handling */ early_init_mmu(); DBG(" <- early_setup()\n"); }
void __init allocate_pacas(void) { u64 limit; int cpu; int nr_cpus; limit = ppc64_rma_size; #ifdef CONFIG_PPC_BOOK3S_64 /* * We can't take SLB misses on the paca, and we want to access them * in real mode, so allocate them within the RMA and also within * the first segment. */ limit = min(0x10000000ULL, limit); #endif /* * Always align up the nr_cpu_ids to SMT threads and allocate * the paca. This will help us to prepare for a situation where * boot cpu id > nr_cpus_id. We will use the last nthreads * slots (nthreads == threads per core) to accommodate a core * that contains boot cpu thread. * * Do not change nr_cpu_ids value here. Let us do that in * early_init_dt_scan_cpus() where we know exact value * of threads per core. */ nr_cpus = _ALIGN_UP(nr_cpu_ids, MAX_SMT); paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus); paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); memset(paca, 0, paca_size); printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", paca_size, nr_cpus, paca); allocate_lppacas(nr_cpus, limit); allocate_slb_shadows(nr_cpus, limit); /* Can't use for_each_*_cpu, as they aren't functional yet */ for (cpu = 0; cpu < nr_cpus; cpu++) initialise_paca(&paca[cpu], cpu); }
void __init early_setup(unsigned long dt_ptr) { static __initdata struct paca_struct boot_paca; /* -------- printk is _NOT_ safe to use here ! ------- */ /* Try new device tree based feature discovery ... */ if (!dt_cpu_ftrs_init(__va(dt_ptr))) /* Otherwise use the old style CPU table */ identify_cpu(0, mfspr(SPRN_PVR)); /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ initialise_paca(&boot_paca, 0); setup_paca(&boot_paca); fixup_boot_paca(); /* -------- printk is now safe to use ------- */ /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); /* * Do early initialization using the flattened device * tree, such as retrieving the physical memory map or * calculating/retrieving the hash table size. */ early_init_devtree(__va(dt_ptr)); /* Now we know the logical id of our boot cpu, setup the paca. */ if (boot_cpuid != 0) { /* Poison paca_ptrs[0] again if it's not the boot cpu */ memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0])); } setup_paca(paca_ptrs[boot_cpuid]); fixup_boot_paca(); /* * Configure exception handlers. This include setting up trampolines * if needed, setting exception endian mode, etc... */ configure_exceptions(); /* Apply all the dynamic patching */ apply_feature_fixups(); setup_feature_keys(); /* Initialize the hash table or TLB handling */ early_init_mmu(); /* * After firmware and early platform setup code has set things up, * we note the SPR values for configurable control/performance * registers, and use those as initial defaults. */ record_spr_defaults(); /* * At this point, we can let interrupts switch to virtual mode * (the MMU has been setup), so adjust the MSR in the PACA to * have IR and DR set and enable AIL if it exists */ cpu_ready_for_interrupts(); /* * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it * will only actually get enabled on the boot cpu much later once * ftrace itself has been initialized. */ this_cpu_enable_ftrace(); DBG(" <- early_setup()\n"); #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX /* * This needs to be done *last* (after the above DBG() even) * * Right after we return from this function, we turn on the MMU * which means the real-mode access trick that btext does will * no longer work, it needs to switch to using a real MMU * mapping. This call will ensure that it does */ btext_map(); #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ }