static void *early_alloc_pgtable(unsigned long size) { void *pt; if (init_bootmem_done) pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS)); else pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS))); memset(pt, 0, size); return pt; }
static void prealloc(struct ps3_prealloc *p) { if (!p->size) return; p->address = __alloc_bootmem(p->size, p->align, __pa(MAX_DMA_ADDRESS)); if (!p->address) { printk(KERN_ERR "%s: Cannot allocate %s\n", __FUNCTION__, p->name); return; } printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size, p->address); }
/* * Setup new PSW and allocate stack for PSW restart interrupt */ static void __init setup_restart_psw(void) { psw_t psw; restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); restart_stack += ASYNC_SIZE; /* * Setup restart PSW for absolute zero lowcore. This is necesary * if PSW restart is done on an offline CPU that has lowcore zero */ psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; copy_to_absolute_zero(&S390_lowcore.restart_psw, &psw, sizeof(psw)); }
void * __init move_initrd(unsigned long mem_limit) { void *start; unsigned long size; size = initrd_end - initrd_start; start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0); if (!start || __pa(start) + size > mem_limit) { initrd_start = initrd_end = 0; return NULL; } memmove(start, (void *)initrd_start, size); initrd_start = (unsigned long)start; initrd_end = initrd_start + size; printk("initrd moved to %p\n", start); return start; }
void mapu_reserve_bootmem(void) { struct mapu_media_device *mdev; int i, nr_devs; nr_devs = sizeof(media_devs) / sizeof(media_devs[0]); for (i = 0; i < nr_devs; i++) { mdev = &media_devs[i]; if (mdev->memsize <= 0) continue; mdev->paddr = virt_to_phys(__alloc_bootmem(mdev->memsize, PAGE_SIZE, meminfo.bank[mdev->bank].start)); printk(KERN_INFO "MAPU: %lu bytes system memory reserved " "for %s at 0x%08x\n", (unsigned long) mdev->memsize, mdev->name, mdev->paddr); } }
void __init tf_allocate_workspace(void) { struct tf_device *dev = tf_get_device(); tf_clock_timer_init(); if (tf_ctrl_check_omap_type() <= 0) return; dev->workspace_size = smc_mem; if (dev->workspace_size < 3*SZ_1M) dev->workspace_size = 3*SZ_1M; if (smc_address == 0) #if 0 dev->workspace_addr = (u32) __pa(__alloc_bootmem( dev->workspace_size, SZ_1M, __pa(MAX_DMA_ADDRESS))); #else dev->workspace_addr = (u32) 0xBFD00000; #endif else
static void __init setup_lowcore(void) { struct _lowcore *lc; /* * Setup lowcore for boot cpu */ BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096); lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) restart_int_handler; if (user_mode != HOME_SPACE_MODE) lc->restart_psw.mask |= PSW_ASC_HOME; lc->external_new_psw.mask = psw_kernel_bits; lc->external_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) ext_int_handler; lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; lc->program_new_psw.mask = psw_kernel_bits; lc->program_new_psw.addr = PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; lc->mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; lc->mcck_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; lc->io_new_psw.mask = psw_kernel_bits; lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; lc->clock_comparator = -1ULL; lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; lc->async_stack = (unsigned long) __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; lc->panic_stack = (unsigned long) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; lc->current_task = (unsigned long) init_thread_union.thread_info.task; lc->thread_info = (unsigned long) &init_thread_union; lc->machine_flags = S390_lowcore.machine_flags; lc->stfl_fac_list = S390_lowcore.stfl_fac_list; memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, MAX_FACILITY_BIT/8); #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lc->extended_save_area_addr = (__u32) __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0); /* enable extended save area */ __ctl_set_bit(14, 29); } #else lc->cmf_hpp = -1ULL; lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; #endif lc->sync_enter_timer = S390_lowcore.sync_enter_timer; lc->async_enter_timer = S390_lowcore.async_enter_timer; lc->exit_timer = S390_lowcore.exit_timer; lc->user_timer = S390_lowcore.user_timer; lc->system_timer = S390_lowcore.system_timer; lc->steal_timer = S390_lowcore.steal_timer; lc->last_update_timer = S390_lowcore.last_update_timer; lc->last_update_clock = S390_lowcore.last_update_clock; lc->ftrace_func = S390_lowcore.ftrace_func; set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; }
/* Create an L2 page table */ static pte_t * __init alloc_pte(void) { return __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0); }
static inline pmd_t *alloc_pmd(void) { return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0); }
static void __init setup_lowcore(void) { struct lowcore *lc; /* * Setup lowcore for boot cpu */ BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096); lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); lc->restart_psw.mask = PSW_KERNEL_BITS; lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) restart_int_handler; lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; lc->external_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) ext_int_handler; lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; lc->program_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; lc->mcck_new_psw.mask = PSW_KERNEL_BITS; lc->mcck_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; lc->clock_comparator = -1ULL; lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->async_stack = (unsigned long) __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->panic_stack = (unsigned long) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); lc->current_task = (unsigned long) init_thread_union.thread_info.task; lc->thread_info = (unsigned long) &init_thread_union; lc->machine_flags = S390_lowcore.machine_flags; lc->stfl_fac_list = S390_lowcore.stfl_fac_list; memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, MAX_FACILITY_BIT/8); if (MACHINE_HAS_VX) lc->vector_save_area_addr = (unsigned long) &lc->vector_save_area; lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; lc->sync_enter_timer = S390_lowcore.sync_enter_timer; lc->async_enter_timer = S390_lowcore.async_enter_timer; lc->exit_timer = S390_lowcore.exit_timer; lc->user_timer = S390_lowcore.user_timer; lc->system_timer = S390_lowcore.system_timer; lc->steal_timer = S390_lowcore.steal_timer; lc->last_update_timer = S390_lowcore.last_update_timer; lc->last_update_clock = S390_lowcore.last_update_clock; restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); restart_stack += ASYNC_SIZE; /* * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant * restart data to the absolute zero lowcore. This is necessary if * PSW restart is done on an offline CPU that has lowcore zero. */ lc->restart_stack = (unsigned long) restart_stack; lc->restart_fn = (unsigned long) do_restart; lc->restart_data = 0; lc->restart_source = -1UL; /* Setup absolute zero lowcore */ mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack); mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn); mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data); mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); #ifdef CONFIG_SMP lc->spinlock_lockval = arch_spin_lockval(0); #endif set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; }
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); }
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { return __alloc_bootmem(size, align, 0); }
int __init spc_memory_init(void) { #if 0 void* bedram; #endif // Quick sanity checks // Is SPC context area large enough for all SPC contexts? BUILD_BUG_ON((sizeof(spc_context_t) * NR_CPUS) > SPC_CONTEXT_SIZE); // Does struct fusedos_config_t fit into memory area for FusedOS config? BUILD_BUG_ON(sizeof(fusedos_config_t) > FUSEDOS_CONFIG_SIZE); // Put the SPC monitor, context, and config just below 1 GB spc_monitor = (void*)memblock_alloc_base(SPC_MONITOR_SIZE + SPC_CONTEXT_SIZE + FUSEDOS_CONFIG_SIZE, (phys_addr_t)(1ul << 24), // align to 16 MB (phys_addr_t)(1ul << 30)); // below 1 GB if (!spc_monitor) { printk(KERN_ERR "FUSEDOS spc_memory_init: Cannot allocate spc_monitor.\n"); return -2; } spc_context = (spc_context_t*)(__va(spc_monitor + SPC_MONITOR_SIZE)); fusedos_config = (fusedos_config_t*)(__va(spc_monitor + SPC_MONITOR_SIZE + SPC_CONTEXT_SIZE)); fusedos_config_init(); if( fusedos_config->nr_spcs > 0 ) { spc_memory = __alloc_bootmem( ((unsigned long)SPC_MEMORY_SIZE) * (fusedos_config->nr_spcs), PAGE_SIZE, SPC_MEMORY_PADDR); if (__pa(spc_memory) < SPC_MEMORY_PADDR) { printk(KERN_ERR "FUSEDOS spc_memory_init: Cannot allocate spc_memory at 0x%x, 0x%lx\n", SPC_MEMORY_PADDR, __pa(spc_memory)); return -3; } } printk("FUSEDOS spc_memory_init: spc_monitor 0x%p, spc_context 0x%p, fusedos_config 0x%p\n", spc_monitor, spc_context, fusedos_config); printk("FUSEDOS spc_memory_init: spc_memory 0x%p, __pa(spc_memory) 0x%lx\n", spc_memory, __pa(spc_memory)); printk("FUSEDOS spc_memory_init: _fw %p\n", _fw); // From firmware/src/fw_mmu.c, tlbwe_slot parameters calculated // with tests/fusedos/tlbwe_slot_defines // // NOTE: we force this into way 3 of the TLB set in order to avoid an A2 defect // that does not properly honor IPROT (Linux relies on IPROT to keep the // firmware TLB resident). // tlbwe_slot( // 3, // MAS1_V(1) | MAS1_TID(0) | MAS1_TS(0) | MAS1_TSIZE_1GB | MAS1_IPROT(1), // MAS2_EPN((PHYMAP_MINADDR_MMIO | PHYMAP_PRIVILEGEDOFFSET) >> 12) | MAS2_W(0) | MAS2_I(1) | MAS2_M(1) | // MAS2_G(1) | MAS2_E(0), // MAS7_3_RPN((PHYMAP_MINADDR_MMIO | PHYMAP_PRIVILEGEDOFFSET) >> 12) | MAS3_SR(1) | MAS3_SW(1) | MAS3_SX(1) | // MAS3_UR(0) | MAS3_UW(0) | MAS3_UX(0) | MAS3_U1(1), // MAS8_TGS(0) | MAS8_VF(0) | MAS8_TLPID(0), // MMUCR3_X(0) | MMUCR3_R(1) | MMUCR3_C(1) | MMUCR3_ECL(0) | MMUCR3_CLASS(1) |MMUCR3_ThdID(0xF) // ); // #define SPRN_MMUCR3 (1023) // Memory Management Unit Control Register 3 asm volatile ("mtspr %0,%1": : "i" (SPRN_MAS0), "r" (0x30000)); asm volatile ("mtspr %0,%1": : "i" (SPRN_MAS1), "r" (0xc0000a00)); asm volatile ("mtspr %0,%1": : "i" (SPRN_MAS2), "r" (0x3ffc000000e)); asm volatile ("mtspr %0,%1": : "i" (SPRN_MAS7_MAS3), "r" (0x3ffc0000115)); asm volatile ("mtspr %0,%1": : "i" (SPRN_MAS8), "r" (0x0)); asm volatile ("mtspr %0,%1": : "i" (SPRN_MMUCR3), "r" (0x310f)); asm volatile ("isync;" : : : "memory" ); asm volatile ("tlbwe;" : : : "memory" ); spc_context_init(); return 0; }
void __init pnv_pci_init_p5ioc2_hub(struct device_node *np) { struct device_node *phbn; const __be64 *prop64; u64 hub_id; void *tce_mem; uint64_t tce_per_phb; int64_t rc; int phb_count = 0; pr_info("Probing p5ioc2 IO-Hub %s\n", np->full_name); prop64 = of_get_property(np, "ibm,opal-hubid", NULL); if (!prop64) { pr_err(" Missing \"ibm,opal-hubid\" property !\n"); return; } hub_id = be64_to_cpup(prop64); pr_info(" HUB-ID : 0x%016llx\n", hub_id); /* Currently allocate 16M of TCE memory for every Hub * * XXX TODO: Make it chip local if possible */ tce_mem = __alloc_bootmem(P5IOC2_TCE_MEMORY, P5IOC2_TCE_MEMORY, __pa(MAX_DMA_ADDRESS)); if (!tce_mem) { pr_err(" Failed to allocate TCE Memory !\n"); return; } pr_debug(" TCE : 0x%016lx..0x%016lx\n", __pa(tce_mem), __pa(tce_mem) + P5IOC2_TCE_MEMORY - 1); rc = opal_pci_set_hub_tce_memory(hub_id, __pa(tce_mem), P5IOC2_TCE_MEMORY); if (rc != OPAL_SUCCESS) { pr_err(" Failed to allocate TCE memory, OPAL error %lld\n", rc); return; } /* Count child PHBs */ for_each_child_of_node(np, phbn) { if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") || of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) phb_count++; } /* Calculate how much TCE space we can give per PHB */ tce_per_phb = __rounddown_pow_of_two(P5IOC2_TCE_MEMORY / phb_count); pr_info(" Allocating %lld MB of TCE memory per PHB\n", tce_per_phb >> 20); /* Initialize PHBs */ for_each_child_of_node(np, phbn) { if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") || of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) { pnv_pci_init_p5ioc2_phb(phbn, hub_id, tce_mem, tce_per_phb); tce_mem += tce_per_phb; } } }
static void __init setup_lowcore(void) { struct _lowcore *lc; int lc_pages; /* * Setup lowcore for boot cpu */ lc_pages = sizeof(void *) == 8 ? 2 : 1; lc = (struct _lowcore *) __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0); memset(lc, 0, lc_pages * PAGE_SIZE); lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) restart_int_handler; if (switch_amode) lc->restart_psw.mask |= PSW_ASC_HOME; lc->external_new_psw.mask = psw_kernel_bits; lc->external_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) ext_int_handler; lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT; lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; lc->program_new_psw.mask = psw_kernel_bits; lc->program_new_psw.addr = PSW_ADDR_AMODE | (unsigned long)pgm_check_handler; lc->mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT; lc->mcck_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; lc->io_new_psw.mask = psw_kernel_bits; lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; lc->clock_comparator = -1ULL; lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE; lc->async_stack = (unsigned long) __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE; lc->panic_stack = (unsigned long) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE; lc->current_task = (unsigned long) init_thread_union.thread_info.task; lc->thread_info = (unsigned long) &init_thread_union; lc->machine_flags = S390_lowcore.machine_flags; #ifndef CONFIG_64BIT if (MACHINE_HAS_IEEE) { lc->extended_save_area_addr = (__u32) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); /* enable extended save area */ __ctl_set_bit(14, 29); } #else lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; #endif lc->sync_enter_timer = S390_lowcore.sync_enter_timer; lc->async_enter_timer = S390_lowcore.async_enter_timer; lc->exit_timer = S390_lowcore.exit_timer; lc->user_timer = S390_lowcore.user_timer; lc->system_timer = S390_lowcore.system_timer; lc->steal_timer = S390_lowcore.steal_timer; lc->last_update_timer = S390_lowcore.last_update_timer; lc->last_update_clock = S390_lowcore.last_update_clock; lc->ftrace_func = S390_lowcore.ftrace_func; set_prefix((u32)(unsigned long) lc); lowcore_ptr[0] = lc; }
void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { /* return __va(memblock_alloc(size, align));*/ return __alloc_bootmem(size, align, 0); }