static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) { xen_kexec_image_t *image = arg; int ii; /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); /* Mask CMC and Performance Monitor interrupts */ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* Mask ITV and Local Redirect Registers */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); /* terminate possible nested in-service interrupts */ for (ii = 0; ii < 16; ii++) ia64_eoi(); /* unmask TPR and clear any pending interrupts */ ia64_setreg(_IA64_REG_CR_TPR, 0); ia64_srlz_d(); while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) ia64_eoi(); platform_kernel_launch_event(); relocate_new_kernel(image->indirection_page, image->start_address, __pa(ia64_boot_param), image->reboot_code_buffer); BUG(); }
static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) { struct kimage *image = arg; relocate_new_kernel_t rnk; void *pal_addr = efi_get_pal_addr(); unsigned long code_addr = (unsigned long)page_address(image->control_code_page); unsigned long vector; int ii; u64 fp, gp; ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump; BUG_ON(!image); if (image->type == KEXEC_TYPE_CRASH) { crash_save_this_cpu(); current->thread.ksp = (__u64)info->sw - 16; /* Register noop init handler */ fp = ia64_tpa(init_handler->fp); gp = ia64_tpa(ia64_getreg(_IA64_REG_GP)); ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0); } else { /* Unregister init handlers of current kernel */ ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0); } /* Unregister mca handler - No more recovery on current kernel */ ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0); /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); /* Mask CMC and Performance Monitor interrupts */ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* Mask ITV and Local Redirect Registers */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); /* terminate possible nested in-service interrupts */ for (ii = 0; ii < 16; ii++) ia64_eoi(); /* unmask TPR and clear any pending interrupts */ ia64_setreg(_IA64_REG_CR_TPR, 0); ia64_srlz_d(); vector = ia64_get_ivr(); while (vector != IA64_SPURIOUS_INT_VECTOR) { ia64_eoi(); vector = ia64_get_ivr(); } platform_kernel_launch_event(); rnk = (relocate_new_kernel_t)&code_addr; (*rnk)(image->head, image->start, ia64_boot_param, GRANULEROUNDDOWN((unsigned long) pal_addr)); BUG(); }
/* * Do not allocate memory (or fail in any way) in machine_kexec(). * We are past the point of no return, committed to rebooting now. */ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) { struct kimage *image = arg; relocate_new_kernel_t rnk; void *pal_addr = efi_get_pal_addr(); unsigned long code_addr = (unsigned long)page_address(image->control_code_page); int ii; BUG_ON(!image); if (image->type == KEXEC_TYPE_CRASH) { crash_save_this_cpu(); current->thread.ksp = (__u64)info->sw - 16; } /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); /* Mask CMC and Performance Monitor interrupts */ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* Mask ITV and Local Redirect Registers */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); /* terminate possible nested in-service interrupts */ for (ii = 0; ii < 16; ii++) ia64_eoi(); /* unmask TPR and clear any pending interrupts */ ia64_setreg(_IA64_REG_CR_TPR, 0); ia64_srlz_d(); while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) ia64_eoi(); platform_kernel_launch_event(); rnk = (relocate_new_kernel_t)&code_addr; (*rnk)(image->head, image->start, ia64_boot_param, GRANULEROUNDDOWN((unsigned long) pal_addr)); BUG(); }
/* * cpu_init() initializes state that is per-CPU. This function acts * as a 'CPU state barrier', nothing should get across. */ void cpu_init (void) { extern void __devinit ia64_mmu_init (void *); unsigned long num_phys_stacked; pal_vm_info_2_u_t vmi; unsigned int max_ctx; struct cpuinfo_ia64 *cpu_info; void *cpu_data; cpu_data = per_cpu_init(); get_max_cacheline_size(); /* * We can't pass "local_cpu_data" to identify_cpu() because we haven't called * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it * depends on the data returned by identify_cpu(). We break the dependency by * accessing cpu_data() through the canonical per-CPU address. */ cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); identify_cpu(cpu_info); #ifdef CONFIG_MCKINLEY { # define FEATURE_SET 16 struct ia64_pal_retval iprv; if (cpu_info->family == 0x1f) { PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, (iprv.v1 | 0x80), FEATURE_SET, 0); } } #endif /* Clear the stack memory reserved for pt_regs: */ memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); ia64_set_kr(IA64_KR_FPU_OWNER, 0); /* * Initialize default control register to defer all speculative faults. The * kernel MUST NOT depend on a particular setting of these bits (in other words, * the kernel must have recovery code for all speculative accesses). Turn on * dcr.lc as per recommendation by the architecture team. Most IA-32 apps * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * be fine). */ ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; if (current->mm) BUG(); ia64_mmu_init(ia64_imva(cpu_data)); #ifdef CONFIG_IA32_SUPPORT ia32_cpu_init(); #endif /* Clear ITC to eliminiate sched_clock() overflows in human time. */ ia64_set_itc(0); /* disable all local interrupt sources: */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* clear TPR & XTP to enable all interrupt classes: */ ia64_setreg(_IA64_REG_CR_TPR, 0); #ifdef CONFIG_SMP normal_xtp(); #endif /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ if (ia64_pal_vm_summary(NULL, &vmi) == 0) max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; else { printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); max_ctx = (1U << 15) - 1; /* use architected minimum */ } while (max_ctx < ia64_ctx.max_ctx) { unsigned int old = ia64_ctx.max_ctx; if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) break; } if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " "stacked regs\n"); num_phys_stacked = 96; } /* size of physical stacked register partition plus 8 bytes: */ __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; platform_cpu_init(); }