static irqreturn_t hub_eint_handler(int irq, void *arg) { struct hubdev_info *hubdev_info; struct ia64_sal_retval ret_stuff; nasid_t nasid; ret_stuff.status = 0; ret_stuff.v0 = 0; hubdev_info = (struct hubdev_info *)arg; nasid = hubdev_info->hdi_nasid; if (is_shub1()) { SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT, (u64) nasid, 0, 0, 0, 0, 0, 0); if ((int)ret_stuff.v0) panic("%s: Fatal %s Error", __func__, ((nasid & 1) ? "TIO" : "HUBII")); if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ (void)hubiio_crb_error_handler(hubdev_info); } else if (nasid & 1) { /* TIO errors */ SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT, (u64) nasid, 0, 0, 0, 0, 0, 0); if ((int)ret_stuff.v0) panic("%s: Fatal TIO Error", __func__); } else bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); return IRQ_HANDLED; }
static enum xp_retval xp_register_nofault_code_sn2(void) { int ret; u64 func_addr; u64 err_func_addr; func_addr = *(u64 *)xp_nofault_PIOR; err_func_addr = *(u64 *)xp_error_PIOR; ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr, 1, 1); if (ret != 0) { dev_err(xp, "can't register nofault code, error=%d\n", ret); return xpSalError; } /* * Setup the nofault PIO read target. (There is no special reason why * SH_IPI_ACCESS was selected.) */ if (is_shub1()) xp_nofault_PIOR_target = SH1_IPI_ACCESS; else if (is_shub2()) xp_nofault_PIOR_target = SH2_IPI_ACCESS0; return xpSuccess; }
static enum xp_retval xp_register_nofault_code_sn2(void) { int ret; u64 func_addr; u64 err_func_addr; func_addr = *(u64 *)xp_nofault_PIOR; err_func_addr = *(u64 *)xp_error_PIOR; ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr, 1, 1); if (ret != 0) { dev_err(xp, "can't register nofault code, error=%d\n", ret); return xpSalError; } /* */ if (is_shub1()) xp_nofault_PIOR_target = SH1_IPI_ACCESS; else if (is_shub2()) xp_nofault_PIOR_target = SH2_IPI_ACCESS0; return xpSuccess; }
void sn_flush_all_caches(long flush_addr, long bytes) { unsigned long addr = flush_addr; /* */ if (is_shub1() && (addr & RGN_BITS) == RGN_BASE(RGN_UNCACHED)) addr = (addr - RGN_BASE(RGN_UNCACHED)) + RGN_BASE(RGN_KERNEL); flush_icache_range(addr, addr + bytes); /* */ flush_icache_range(addr, addr + bytes); mb(); }
/** * sn_flush_all_caches - flush a range of address from all caches (incl. L4) * @flush_addr: identity mapped region 7 address to start flushing * @bytes: number of bytes to flush * * Flush a range of addresses from all caches including L4. * All addresses fully or partially contained within * @flush_addr to @flush_addr + @bytes are flushed * from all caches. */ void sn_flush_all_caches(long flush_addr, long bytes) { unsigned long addr = flush_addr; /* SHub1 requires a cached address */ if (is_shub1() && (addr & RGN_BITS) == RGN_BASE(RGN_UNCACHED)) addr = (addr - RGN_BASE(RGN_UNCACHED)) + RGN_BASE(RGN_KERNEL); flush_icache_range(addr, addr + bytes); /* * The last call may have returned before the caches * were actually flushed, so we call it again to make * sure. */ flush_icache_range(addr, addr + bytes); mb(); }
void sn_timer_interrupt(int irq, void *dev_id) { /* LED blinking */ if (!pda->hb_count--) { pda->hb_count = HZ / 2; set_led_bits(pda->hb_state ^= LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT); } if (is_shub1()) { if (enable_shub_wars_1_1()) { /* Bugfix code for SHUB 1.1 */ if (pda->pio_shub_war_cam_addr) *pda->pio_shub_war_cam_addr = 0x8000000000000010UL; } if (pda->sn_lb_int_war_ticks == 0) sn_lb_int_war_check(); pda->sn_lb_int_war_ticks++; if (pda->sn_lb_int_war_ticks >= SN_LB_INT_WAR_INTERVAL) pda->sn_lb_int_war_ticks = 0; } }
/* * Wait until all BTE related CRBs are completed * and then reset the interfaces. */ void bte_error_handler(unsigned long _nodepda) { struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock; int i; unsigned long irq_flags; volatile u64 *notify; bte_result_t bh_error; BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda, smp_processor_id())); spin_lock_irqsave(recovery_lock, irq_flags); /* * Lock all interfaces on this node to prevent new transfers * from being queued. */ for (i = 0; i < BTES_PER_NODE; i++) { if (err_nodepda->bte_if[i].cleanup_active) { continue; } spin_lock(&err_nodepda->bte_if[i].spinlock); BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda, smp_processor_id(), i)); err_nodepda->bte_if[i].cleanup_active = 1; } if (is_shub1()) { if (shub1_bte_error_handler(_nodepda)) { spin_unlock_irqrestore(recovery_lock, irq_flags); return; } } else { if (shub2_bte_error_handler(_nodepda)) { spin_unlock_irqrestore(recovery_lock, irq_flags); return; } } for (i = 0; i < BTES_PER_NODE; i++) { bh_error = err_nodepda->bte_if[i].bh_error; if (bh_error != BTE_SUCCESS) { /* There is an error which needs to be notified */ notify = err_nodepda->bte_if[i].most_rcnt_na; BTE_PRINTK(("cnode %d bte %d error=0x%lx\n", err_nodepda->bte_if[i].bte_cnode, err_nodepda->bte_if[i].bte_num, IBLS_ERROR | (u64) bh_error)); *notify = IBLS_ERROR | bh_error; err_nodepda->bte_if[i].bh_error = BTE_SUCCESS; } err_nodepda->bte_if[i].cleanup_active = 0; BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda, smp_processor_id(), i)); spin_unlock(&err_nodepda->bte_if[i].spinlock); } spin_unlock_irqrestore(recovery_lock, irq_flags); }
/** * sn_cpu_init - initialize per-cpu data areas * @cpuid: cpuid of the caller * * Called during cpu initialization on each cpu as it starts. * Currently, initializes the per-cpu data area for SNIA. * Also sets up a few fields in the nodepda. Also known as * platform_cpu_init() by the ia64 machvec code. */ void __cpuinit sn_cpu_init(void) { int cpuid; int cpuphyid; int nasid; int subnode; int slice; int cnode; int i; static int wars_have_been_checked; cpuid = smp_processor_id(); if (cpuid == 0 && IS_MEDUSA()) { if (ia64_sn_is_fake_prom()) sn_prom_type = 2; else sn_prom_type = 1; printk(KERN_INFO "Running on medusa with %s PROM\n", (sn_prom_type == 1) ? "real" : "fake"); } memset(pda, 0, sizeof(pda)); if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift, &sn_system_size, &sn_sharing_domain_size, &sn_partition_id, &sn_coherency_id, &sn_region_size)) BUG(); sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2; /* * Don't check status. The SAL call is not supported on all PROMs * but a failure is harmless. */ (void) ia64_sn_set_cpu_number(cpuid); /* * The boot cpu makes this call again after platform initialization is * complete. */ if (nodepdaindr[0] == NULL) return; for (i = 0; i < MAX_PROM_FEATURE_SETS; i++) if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0) break; cpuphyid = get_sapicid(); if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice)) BUG(); for (i=0; i < MAX_NUMNODES; i++) { if (nodepdaindr[i]) { nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid; nodepdaindr[i]->phys_cpuid[cpuid].slice = slice; nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode; } } cnode = nasid_to_cnodeid(nasid); sn_nodepda = nodepdaindr[cnode]; pda->led_address = (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); pda->led_state = LED_ALWAYS_SET; pda->hb_count = HZ / 2; pda->hb_state = 0; pda->idle_flag = 0; if (cpuid != 0) { /* copy cpu 0's sn_cnodeid_to_nasid table to this cpu's */ memcpy(sn_cnodeid_to_nasid, (&per_cpu(__sn_cnodeid_to_nasid, 0)), sizeof(__ia64_per_cpu_var(__sn_cnodeid_to_nasid))); } /* * Check for WARs. * Only needs to be done once, on BSP. * Has to be done after loop above, because it uses this cpu's * sn_cnodeid_to_nasid table which was just initialized if this * isn't cpu 0. * Has to be done before assignment below. */ if (!wars_have_been_checked) { sn_check_for_wars(); wars_have_been_checked = 1; } sn_hub_info->shub_1_1_found = shub_1_1_found; /* * Set up addresses of PIO/MEM write status registers. */ { u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0}; u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_3}; u64 *pio; pio = is_shub1() ? pio1 : pio2; pda->pio_write_status_addr = (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, pio[slice]); pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0; } /* * WAR addresses for SHUB 1.x. */ if (local_node_data->active_cpu_count++ == 0 && is_shub1()) { int buddy_nasid; buddy_nasid = cnodeid_to_nasid(numa_node_id() == num_online_nodes() - 1 ? 0 : numa_node_id() + 1); pda->pio_shub_war_cam_addr = (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, SH1_PI_CAM_CONTROL); } }
/** * sn_setup - SN platform setup routine * @cmdline_p: kernel command line * * Handles platform setup for SN machines. This includes determining * the RTC frequency (via a SAL call), initializing secondary CPUs, and * setting up per-node data areas. The console is also initialized here. */ void __init sn_setup(char **cmdline_p) { long status, ticks_per_sec, drift; u32 version = sn_sal_rev(); extern void sn_cpu_init(void); sn2_rtc_initial = rtc_time(); ia64_sn_plat_set_error_handling_features(); // obsolete ia64_sn_set_os_feature(OSF_MCA_SLV_TO_OS_INIT_SLV); ia64_sn_set_os_feature(OSF_FEAT_LOG_SBES); #if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) /* * Handle SN vga console. * * SN systems do not have enough ACPI table information * being passed from prom to identify VGA adapters and the legacy * addresses to access them. Until that is done, SN systems rely * on the PCDP table to identify the primary VGA console if one * exists. * * However, kernel PCDP support is optional, and even if it is built * into the kernel, it will not be used if the boot cmdline contains * console= directives. * * So, to work around this mess, we duplicate some of the PCDP code * here so that the primary VGA console (as defined by PCDP) will * work on SN systems even if a different console (e.g. serial) is * selected on the boot line (or CONFIG_EFI_PCDP is off). */ if (! vga_console_membase) sn_scan_pcdp(); if (vga_console_membase) { /* usable vga ... make tty0 the preferred default console */ if (!strstr(*cmdline_p, "console=")) add_preferred_console("tty", 0, NULL); } else { printk(KERN_DEBUG "SGI: Disabling VGA console\n"); if (!strstr(*cmdline_p, "console=")) add_preferred_console("ttySG", 0, NULL); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #else conswitchp = NULL; #endif /* CONFIG_DUMMY_CONSOLE */ } #endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */ MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY; /* * Build the tables for managing cnodes. */ build_cnode_tables(); status = ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, &drift); if (status != 0 || ticks_per_sec < 100000) { printk(KERN_WARNING "unable to determine platform RTC clock frequency, guessing.\n"); /* PROM gives wrong value for clock freq. so guess */ sn_rtc_cycles_per_second = 1000000000000UL / 30000UL; } else sn_rtc_cycles_per_second = ticks_per_sec; platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR; ia64_printk_clock = ia64_sn2_printk_clock; /* * Old PROMs do not provide an ACPI FADT. Disable legacy keyboard * support here so we don't have to listen to failed keyboard probe * messages. */ if (is_shub1() && version <= 0x0209 && acpi_kbd_controller_present) { printk(KERN_INFO "Disabling legacy keyboard support as prom " "is too old and doesn't provide FADT\n"); acpi_kbd_controller_present = 0; } printk("SGI SAL version %x.%02x\n", version >> 8, version & 0x00FF); /* * we set the default root device to /dev/hda * to make simulation easy */ ROOT_DEV = Root_HDA1; /* * Create the PDAs and NODEPDAs for all the cpus. */ sn_init_pdas(cmdline_p); ia64_mark_idle = &snidle; /* * For the bootcpu, we do this here. All other cpus will make the * call as part of cpu_init in slave cpu initialization. */ sn_cpu_init(); #ifdef CONFIG_SMP init_smp_config(); #endif screen_info = sn_screen_info; sn_timer_init(); /* * set pm_power_off to a SAL call to allow * sn machines to power off. The SAL call can be replaced * by an ACPI interface call when ACPI is fully implemented * for sn. */ pm_power_off = ia64_sn_power_down; current->thread.flags |= IA64_THREAD_MIGRATION; }
/** * sn_cpu_init - initialize per-cpu data areas * @cpuid: cpuid of the caller * * Called during cpu initialization on each cpu as it starts. * Currently, initializes the per-cpu data area for SNIA. * Also sets up a few fields in the nodepda. Also known as * platform_cpu_init() by the ia64 machvec code. */ void __init sn_cpu_init(void) { int cpuid; int cpuphyid; int nasid; int subnode; int slice; int cnode; int i; static int wars_have_been_checked; memset(pda, 0, sizeof(pda)); if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift, &sn_system_size, &sn_sharing_domain_size, &sn_partition_id, &sn_coherency_id, &sn_region_size)) BUG(); sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2; /* * The boot cpu makes this call again after platform initialization is * complete. */ if (nodepdaindr[0] == NULL) return; cpuid = smp_processor_id(); cpuphyid = get_sapicid(); if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice)) BUG(); for (i=0; i < MAX_NUMNODES; i++) { if (nodepdaindr[i]) { nodepdaindr[i]->phys_cpuid[cpuid].nasid = nasid; nodepdaindr[i]->phys_cpuid[cpuid].slice = slice; nodepdaindr[i]->phys_cpuid[cpuid].subnode = subnode; } } cnode = nasid_to_cnodeid(nasid); pda->p_nodepda = nodepdaindr[cnode]; pda->led_address = (typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT)); pda->led_state = LED_ALWAYS_SET; pda->hb_count = HZ / 2; pda->hb_state = 0; pda->idle_flag = 0; if (cpuid != 0) { memcpy(pda->cnodeid_to_nasid_table, pdacpu(0)->cnodeid_to_nasid_table, sizeof(pda->cnodeid_to_nasid_table)); } /* * Check for WARs. * Only needs to be done once, on BSP. * Has to be done after loop above, because it uses pda.cnodeid_to_nasid_table[i]. * Has to be done before assignment below. */ if (!wars_have_been_checked) { sn_check_for_wars(); wars_have_been_checked = 1; } sn_hub_info->shub_1_1_found = shub_1_1_found; /* * Set up addresses of PIO/MEM write status registers. */ { u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0}; u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1, SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3}; u64 *pio; pio = is_shub1() ? pio1 : pio2; pda->pio_write_status_addr = (volatile unsigned long *) LOCAL_MMR_ADDR(pio[slice]); pda->pio_write_status_val = is_shub1() ? SH_PIO_WRITE_STATUS_PENDING_WRITE_COUNT_MASK : 0; } /* * WAR addresses for SHUB 1.x. */ if (local_node_data->active_cpu_count++ == 0 && is_shub1()) { int buddy_nasid; buddy_nasid = cnodeid_to_nasid(numa_node_id() == num_online_nodes() - 1 ? 0 : numa_node_id() + 1); pda->pio_shub_war_cam_addr = (volatile unsigned long *)GLOBAL_MMR_ADDR(nasid, SH1_PI_CAM_CONTROL); } }