static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) { xen_kexec_image_t *image = arg; int ii; /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); /* Mask CMC and Performance Monitor interrupts */ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* Mask ITV and Local Redirect Registers */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); /* terminate possible nested in-service interrupts */ for (ii = 0; ii < 16; ii++) ia64_eoi(); /* unmask TPR and clear any pending interrupts */ ia64_setreg(_IA64_REG_CR_TPR, 0); ia64_srlz_d(); while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) ia64_eoi(); platform_kernel_launch_event(); relocate_new_kernel(image->indirection_page, image->start_address, __pa(ia64_boot_param), image->reboot_code_buffer); BUG(); }
static u_int ia64_ih_clock(struct thread *td, u_int xiv, struct trapframe *tf) { struct eventtimer *et; uint64_t itc, load; uint32_t mode; PCPU_INC(md.stats.pcs_nclks); intrcnt[INTRCNT_CLOCK]++; itc = ia64_get_itc(); PCPU_SET(md.clock, itc); mode = PCPU_GET(md.clock_mode); if (mode == CLOCK_ET_PERIODIC) { load = PCPU_GET(md.clock_load); ia64_set_itm(itc + load); } else ia64_set_itv((1 << 16) | xiv); ia64_set_eoi(0); ia64_srlz_d(); et = &ia64_clock_et; if (et->et_active) et->et_event_cb(et, et->et_arg); return (1); }
static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) { struct kimage *image = arg; relocate_new_kernel_t rnk; void *pal_addr = efi_get_pal_addr(); unsigned long code_addr = (unsigned long)page_address(image->control_code_page); unsigned long vector; int ii; u64 fp, gp; ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump; BUG_ON(!image); if (image->type == KEXEC_TYPE_CRASH) { crash_save_this_cpu(); current->thread.ksp = (__u64)info->sw - 16; /* Register noop init handler */ fp = ia64_tpa(init_handler->fp); gp = ia64_tpa(ia64_getreg(_IA64_REG_GP)); ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0); } else { /* Unregister init handlers of current kernel */ ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0); } /* Unregister mca handler - No more recovery on current kernel */ ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0); /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); /* Mask CMC and Performance Monitor interrupts */ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* Mask ITV and Local Redirect Registers */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); /* terminate possible nested in-service interrupts */ for (ii = 0; ii < 16; ii++) ia64_eoi(); /* unmask TPR and clear any pending interrupts */ ia64_setreg(_IA64_REG_CR_TPR, 0); ia64_srlz_d(); vector = ia64_get_ivr(); while (vector != IA64_SPURIOUS_INT_VECTOR) { ia64_eoi(); vector = ia64_get_ivr(); } platform_kernel_launch_event(); rnk = (relocate_new_kernel_t)&code_addr; (*rnk)(image->head, image->start, ia64_boot_param, GRANULEROUNDDOWN((unsigned long) pal_addr)); BUG(); }
void pcpu_initclock(void) { PCPU_SET(clockadj, 0); PCPU_SET(clock, ia64_get_itc()); ia64_set_itm(PCPU_GET(clock) + ia64_clock_reload); ia64_set_itv(CLOCK_VECTOR); /* highest priority class */ ia64_srlz_d(); }
void fixup_irqs(void) { unsigned int irq; extern void ia64_process_pending_intr(void); extern volatile int time_keeper_id; /* Mask ITV to disable timer */ ia64_set_itv(1 << 16); /* * Find a new timesync master */ if (smp_processor_id() == time_keeper_id) { time_keeper_id = cpumask_first(cpu_online_mask); printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id); } /* * Phase 1: Locate IRQs bound to this cpu and * relocate them for cpu removal. */ migrate_irqs(); /* * Phase 2: Perform interrupt processing for all entries reported in * local APIC. */ ia64_process_pending_intr(); /* * Phase 3: Now handle any interrupts not captured in local APIC. * This is to account for cases that device interrupted during the time the * rte was being disabled and re-programmed. */ for (irq=0; irq < NR_IRQS; irq++) { if (vectors_in_migration[irq]) { struct pt_regs *old_regs = set_irq_regs(NULL); vectors_in_migration[irq]=0; generic_handle_irq(irq); set_irq_regs(old_regs); } } /* * Now let processor die. We do irq disable and max_xtp() to * ensure there is no more interrupts routed to this processor. * But the local timer interrupt can have 1 pending which we * take care in timer_interrupt(). */ max_xtp(); local_irq_disable(); }
/* * Do not allocate memory (or fail in any way) in machine_kexec(). * We are past the point of no return, committed to rebooting now. */ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) { struct kimage *image = arg; relocate_new_kernel_t rnk; void *pal_addr = efi_get_pal_addr(); unsigned long code_addr = (unsigned long)page_address(image->control_code_page); int ii; BUG_ON(!image); if (image->type == KEXEC_TYPE_CRASH) { crash_save_this_cpu(); current->thread.ksp = (__u64)info->sw - 16; } /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); /* Mask CMC and Performance Monitor interrupts */ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* Mask ITV and Local Redirect Registers */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); /* terminate possible nested in-service interrupts */ for (ii = 0; ii < 16; ii++) ia64_eoi(); /* unmask TPR and clear any pending interrupts */ ia64_setreg(_IA64_REG_CR_TPR, 0); ia64_srlz_d(); while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) ia64_eoi(); platform_kernel_launch_event(); rnk = (relocate_new_kernel_t)&code_addr; (*rnk)(image->head, image->start, ia64_boot_param, GRANULEROUNDDOWN((unsigned long) pal_addr)); BUG(); }
/* * Encapsulate access to the itm structure for SMP. */ void ia64_cpu_local_tick (void) { int cpu = smp_processor_id(); unsigned long shift = 0, delta; /* arrange for the cycle counter to generate a timer interrupt: */ ia64_set_itv(IA64_TIMER_VECTOR); delta = local_cpu_data->itm_delta; /* * Stagger the timer tick for each CPU so they don't occur all at (almost) the * same time: */ if (cpu) { unsigned long hi = 1UL << ia64_fls(cpu); shift = (2*(cpu - hi) + 1) * delta/hi/2; } local_cpu_data->itm_next = ia64_get_itc() + delta + shift; ia64_set_itm(local_cpu_data->itm_next); }
static void __init check_sal_cache_flush (void) { unsigned long flags; int cpu; u64 vector; cpu = get_cpu(); local_irq_save(flags); /* * Schedule a timer interrupt, wait until it's reported, and see if * SAL_CACHE_FLUSH drops it. */ ia64_set_itv(IA64_TIMER_VECTOR); ia64_set_itm(ia64_get_itc() + 1000); while (!ia64_get_irr(IA64_TIMER_VECTOR)) cpu_relax(); ia64_sal_cache_flush(3); if (ia64_get_irr(IA64_TIMER_VECTOR)) { vector = ia64_get_ivr(); ia64_eoi(); WARN_ON(vector != IA64_TIMER_VECTOR); } else { sal_cache_flush_drops_interrupts = 1; printk(KERN_ERR "SAL: SAL_CACHE_FLUSH drops interrupts; " "PAL_CACHE_FLUSH will be used instead\n"); ia64_eoi(); } local_irq_restore(flags); put_cpu(); }
/* * cpu_init() initializes state that is per-CPU. This function acts * as a 'CPU state barrier', nothing should get across. */ void cpu_init (void) { extern void __devinit ia64_mmu_init (void *); unsigned long num_phys_stacked; pal_vm_info_2_u_t vmi; unsigned int max_ctx; struct cpuinfo_ia64 *cpu_info; void *cpu_data; cpu_data = per_cpu_init(); get_max_cacheline_size(); /* * We can't pass "local_cpu_data" to identify_cpu() because we haven't called * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it * depends on the data returned by identify_cpu(). We break the dependency by * accessing cpu_data() through the canonical per-CPU address. */ cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); identify_cpu(cpu_info); #ifdef CONFIG_MCKINLEY { # define FEATURE_SET 16 struct ia64_pal_retval iprv; if (cpu_info->family == 0x1f) { PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, (iprv.v1 | 0x80), FEATURE_SET, 0); } } #endif /* Clear the stack memory reserved for pt_regs: */ memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); ia64_set_kr(IA64_KR_FPU_OWNER, 0); /* * Initialize default control register to defer all speculative faults. The * kernel MUST NOT depend on a particular setting of these bits (in other words, * the kernel must have recovery code for all speculative accesses). Turn on * dcr.lc as per recommendation by the architecture team. Most IA-32 apps * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * be fine). */ ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; if (current->mm) BUG(); ia64_mmu_init(ia64_imva(cpu_data)); #ifdef CONFIG_IA32_SUPPORT ia32_cpu_init(); #endif /* Clear ITC to eliminiate sched_clock() overflows in human time. */ ia64_set_itc(0); /* disable all local interrupt sources: */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* clear TPR & XTP to enable all interrupt classes: */ ia64_setreg(_IA64_REG_CR_TPR, 0); #ifdef CONFIG_SMP normal_xtp(); #endif /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ if (ia64_pal_vm_summary(NULL, &vmi) == 0) max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; else { printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); max_ctx = (1U << 15) - 1; /* use architected minimum */ } while (max_ctx < ia64_ctx.max_ctx) { unsigned int old = ia64_ctx.max_ctx; if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) break; } if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " "stacked regs\n"); num_phys_stacked = 96; } /* size of physical stacked register partition plus 8 bytes: */ __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8; platform_cpu_init(); }
void __devinit ia64_disable_timer(void) { ia64_set_itv(1 << 16); }
static void rthal_set_itv(void) { rthal_itm_next[rthal_processor_id()] = ia64_get_itc(); ia64_set_itv(irq_to_vector(rthal_tick_irq)); }