static int __init ip27_setup(void) { hubreg_t p, e, n_mode; nasid_t nid; ip27_setup_console(); ip27_reboot_setup(); /* * hub_rtc init and cpu clock intr enabled for later calibrate_delay. */ nid = get_nasid(); printk("IP27: Running on node %d.\n", nid); p = LOCAL_HUB_L(PI_CPU_PRESENT_A) & 1; e = LOCAL_HUB_L(PI_CPU_ENABLE_A) & 1; printk("Node %d has %s primary CPU%s.\n", nid, p ? "a" : "no", e ? ", CPU is running" : ""); p = LOCAL_HUB_L(PI_CPU_PRESENT_B) & 1; e = LOCAL_HUB_L(PI_CPU_ENABLE_B) & 1; printk("Node %d has %s secondary CPU%s.\n", nid, p ? "a" : "no", e ? ", CPU is running" : ""); /* * Try to catch kernel missconfigurations and give user an * indication what option to select. */ n_mode = LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_MORENODES_MASK; printk("Machine is in %c mode.\n", n_mode ? 'N' : 'M'); #ifdef CONFIG_SGI_SN0_N_MODE if (!n_mode) panic("Kernel compiled for M mode."); #else if (n_mode) panic("Kernel compiled for N mode."); #endif ioc3_sio_init(); ioc3_eth_init(); per_cpu_init(); set_io_port_base(IO_BASE); board_time_init = ip27_time_init; return 0; }
void __init plat_mem_setup(void) { hubreg_t p, e, n_mode; nasid_t nid; ip27_reboot_setup(); /* */ nid = get_nasid(); printk("IP27: Running on node %d.\n", nid); p = LOCAL_HUB_L(PI_CPU_PRESENT_A) & 1; e = LOCAL_HUB_L(PI_CPU_ENABLE_A) & 1; printk("Node %d has %s primary CPU%s.\n", nid, p ? "a" : "no", e ? ", CPU is running" : ""); p = LOCAL_HUB_L(PI_CPU_PRESENT_B) & 1; e = LOCAL_HUB_L(PI_CPU_ENABLE_B) & 1; printk("Node %d has %s secondary CPU%s.\n", nid, p ? "a" : "no", e ? ", CPU is running" : ""); /* */ n_mode = LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_MORENODES_MASK; printk("Machine is in %c mode.\n", n_mode ? 'N' : 'M'); #ifdef CONFIG_SGI_SN_N_MODE if (!n_mode) panic("Kernel compiled for M mode."); #else if (n_mode) panic("Kernel compiled for N mode."); #endif ioc3_eth_init(); per_cpu_init(); set_io_port_base(IO_BASE); }
void __init ip27_setup(void) { nasid_t nid; hubreg_t p, e; ip27_setup_console(); ip27_reboot_setup(); num_bridges = 0; /* * hub_rtc init and cpu clock intr enabled for later calibrate_delay. */ DBG("ip27_setup(): Entered.\n"); nid = get_nasid(); printk("IP27: Running on node %d.\n", nid); p = LOCAL_HUB_L(PI_CPU_PRESENT_A) & 1; e = LOCAL_HUB_L(PI_CPU_ENABLE_A) & 1; printk("Node %d has %s primary CPU%s.\n", nid, p ? "a" : "no", e ? ", CPU is running" : ""); p = LOCAL_HUB_L(PI_CPU_PRESENT_B) & 1; e = LOCAL_HUB_L(PI_CPU_ENABLE_B) & 1; printk("Node %d has %s secondary CPU%s.\n", nid, p ? "a" : "no", e ? ", CPU is running" : ""); verify_mode(); ioc3_sio_init(); ioc3_eth_init(); per_cpu_init(); set_io_port_base(IO_BASE); board_be_init = ip27_be_init; board_time_init = ip27_time_init; }
void prom_init_secondary(void) { per_cpu_init(); local_irq_enable(); }
/* * cpu_init() initializes state that is per-CPU. This function acts * as a 'CPU state barrier', nothing should get across. */ void cpu_init (void) { extern void __devinit ia64_mmu_init (void *); unsigned long num_phys_stacked; pal_vm_info_2_u_t vmi; unsigned int max_ctx; struct cpuinfo_ia64 *cpu_info; void *cpu_data; cpu_data = per_cpu_init(); get_max_cacheline_size(); /* * We can't pass "local_cpu_data" to identify_cpu() because we haven't called * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it * depends on the data returned by identify_cpu(). We break the dependency by * accessing cpu_data() through the canonical per-CPU address. */ cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); identify_cpu(cpu_info); #ifdef CONFIG_MCKINLEY { # define FEATURE_SET 16 struct ia64_pal_retval iprv; if (cpu_info->family == 0x1f) { PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, (iprv.v1 | 0x80), FEATURE_SET, 0); } } #endif /* Clear the stack memory reserved for pt_regs: */ memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); ia64_set_kr(IA64_KR_FPU_OWNER, 0); /* * Initialize default control register to defer all speculative faults. The * kernel MUST NOT depend on a particular setting of these bits (in other words, * the kernel must have recovery code for all speculative accesses). Turn on * dcr.lc as per recommendation by the architecture team. Most IA-32 apps * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * be fine). */ ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; if (current->mm) BUG(); ia64_mmu_init(ia64_imva(cpu_data)); #ifdef CONFIG_IA32_SUPPORT ia32_cpu_init(); #endif /* Clear ITC to eliminiate sched_clock() overflows in human time. */ ia64_set_itc(0); /* disable all local interrupt sources: */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* clear TPR & XTP to enable all interrupt classes: */ ia64_setreg(_IA64_REG_CR_TPR, 0); #ifdef CONFIG_SMP normal_xtp(); #endif /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ if (ia64_pal_vm_summary(NULL, &vmi) == 0) max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; else { printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); max_ctx = (1U << 15) - 1; /* use architected minimum */ } while (max_ctx < ia64_ctx.max_ctx) { unsigned int old = ia64_ctx.max_ctx; if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) break; } if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " "stacked regs\n"); num_phys_stacked = 96; } /* size of physical stacked register partition plus 8 bytes: */ __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; platform_cpu_init(); }
static void __cpuinit ip27_init_secondary(void) { per_cpu_init(); }