static enum xp_retval xp_register_nofault_code_sn2(void) { int ret; u64 func_addr; u64 err_func_addr; func_addr = *(u64 *)xp_nofault_PIOR; err_func_addr = *(u64 *)xp_error_PIOR; ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr, 1, 1); if (ret != 0) { dev_err(xp, "can't register nofault code, error=%d\n", ret); return xpSalError; } /* * Setup the nofault PIO read target. (There is no special reason why * SH_IPI_ACCESS was selected.) */ if (is_shub1()) xp_nofault_PIOR_target = SH1_IPI_ACCESS; else if (is_shub2()) xp_nofault_PIOR_target = SH2_IPI_ACCESS0; return xpSuccess; }
static enum xp_retval xp_register_nofault_code_sn2(void) { int ret; u64 func_addr; u64 err_func_addr; func_addr = *(u64 *)xp_nofault_PIOR; err_func_addr = *(u64 *)xp_error_PIOR; ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr, 1, 1); if (ret != 0) { dev_err(xp, "can't register nofault code, error=%d\n", ret); return xpSalError; } /* */ if (is_shub1()) xp_nofault_PIOR_target = SH1_IPI_ACCESS; else if (is_shub2()) xp_nofault_PIOR_target = SH2_IPI_ACCESS0; return xpSuccess; }
static inline int mspec_zero_block(unsigned long addr, int len) { int status; if (is_sn2) { if (is_shub2()) { int nid; void *p; int i; nid = nasid_to_cnodeid(get_node_number(__pa(addr))); p = (void *)TO_AMO(scratch_page[nid]); for (i=0; i < SH2_AMO_CACHE_ENTRIES; i++) { FETCHOP_LOAD_OP(p, FETCHOP_LOAD); p += FETCHOP_VAR_SIZE; } } status = bte_copy(0, addr & ~__IA64_UNCACHED_OFFSET, len, BTE_WACQUIRE | BTE_ZERO_FILL, NULL); } else { memset((char *) addr, 0, len); status = 0; } return status; }
void sn_set_err_irq_affinity(unsigned int irq) { /* * On systems which support CPU disabling (SHub2), all error interrupts * are targetted at the boot CPU. */ if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) set_irq_affinity_info(irq, cpu_physical_id(0), 0); }
static inline int __init is_shub_1_1(int nasid) { unsigned long id; int rev; if (is_shub2()) return 0; id = REMOTE_HUB_L(nasid, SH1_SHUB_ID); rev = (id & SH1_SHUB_ID_REVISION_MASK) >> SH1_SHUB_ID_REVISION_SHFT; return rev <= 2; }
static void __init sn_check_for_wars(void) { int cnode; if (is_shub2()) { /* none yet */ } else { for_each_online_node(cnode) { if (is_shub_1_1(cnodeid_to_nasid(cnode))) shub_1_1_found = 1; } } }
int __init xp_init(void) { int ret, ch_number; u64 func_addr = *(u64 *) xp_nofault_PIOR; u64 err_func_addr = *(u64 *) xp_error_PIOR; if (!ia64_platform_is("sn2")) { return -ENODEV; } /* * Register a nofault code region which performs a cross-partition * PIO read. If the PIO read times out, the MCA handler will consume * the error and return to a kernel-provided instruction to indicate * an error. This PIO read exists because it is guaranteed to timeout * if the destination is down (AMO operations do not timeout on at * least some CPUs on Shubs <= v1.2, which unfortunately we have to * work around). */ if ((ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr, 1, 1)) != 0) { printk(KERN_ERR "XP: can't register nofault code, error=%d\n", ret); } /* * Setup the nofault PIO read target. (There is no special reason why * SH_IPI_ACCESS was selected.) */ if (is_shub2()) { xp_nofault_PIOR_target = SH2_IPI_ACCESS0; } else { xp_nofault_PIOR_target = SH1_IPI_ACCESS; } /* initialize the connection registration mutex */ for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) { mutex_init(&xpc_registrations[ch_number].mutex); } return 0; }
/* * Wrapper for bte_copy(). * * dst_pa - physical address of the destination of the transfer. * src_pa - physical address of the source of the transfer. * len - number of bytes to transfer from source to destination. * * Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock. */ static enum xp_retval xp_remote_memcpy_sn2(unsigned long dst_pa, const unsigned long src_pa, size_t len) { bte_result_t ret; ret = bte_copy(src_pa, dst_pa, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL); if (ret == BTE_SUCCESS) return xpSuccess; if (is_shub2()) { dev_err(xp, "bte_copy() on shub2 failed, error=0x%x dst_pa=" "0x%016lx src_pa=0x%016lx len=%ld\\n", ret, dst_pa, src_pa, len); } else { dev_err(xp, "bte_copy() failed, error=%d dst_pa=0x%016lx " "src_pa=0x%016lx len=%ld\\n", ret, dst_pa, src_pa, len); } return xpBteCopyError; }
/* * SAL has provided a partition and machine mask. The partition mask * contains a bit for each even nasid in our partition. The machine * mask contains a bit for each even nasid in the entire machine. * * Using those two bit arrays, we can determine which nasids are * known in the machine. Each should also have a reserved page * initialized if they are available for partitioning. */ void xpc_discovery(void) { void *remote_rp_base; struct xpc_rsvd_page *remote_rp; unsigned long remote_rp_pa; int region; int region_size; int max_regions; int nasid; struct xpc_rsvd_page *rp; unsigned long *discovered_nasids; enum xp_retval ret; remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes, GFP_KERNEL, &remote_rp_base); if (remote_rp == NULL) return; discovered_nasids = kzalloc(sizeof(long) * xpc_nasid_mask_nlongs, GFP_KERNEL); if (discovered_nasids == NULL) { kfree(remote_rp_base); return; } rp = (struct xpc_rsvd_page *)xpc_rsvd_page; /* * The term 'region' in this context refers to the minimum number of * nodes that can comprise an access protection grouping. The access * protection is in regards to memory, IOI and IPI. */ max_regions = 64; region_size = xp_region_size; switch (region_size) { case 128: max_regions *= 2; case 64: max_regions *= 2; case 32: max_regions *= 2; region_size = 16; DBUG_ON(!is_shub2()); } for (region = 0; region < max_regions; region++) { if (xpc_exiting) break; dev_dbg(xpc_part, "searching region %d\n", region); for (nasid = (region * region_size * 2); nasid < ((region + 1) * region_size * 2); nasid += 2) { if (xpc_exiting) break; dev_dbg(xpc_part, "checking nasid %d\n", nasid); if (test_bit(nasid / 2, xpc_part_nasids)) { dev_dbg(xpc_part, "PROM indicates Nasid %d is " "part of the local partition; skipping " "region\n", nasid); break; } if (!(test_bit(nasid / 2, xpc_mach_nasids))) { dev_dbg(xpc_part, "PROM indicates Nasid %d was " "not on Numa-Link network at reset\n", nasid); continue; } if (test_bit(nasid / 2, discovered_nasids)) { dev_dbg(xpc_part, "Nasid %d is part of a " "partition which was previously " "discovered\n", nasid); continue; } /* pull over the rsvd page header & part_nasids mask */ ret = xpc_get_remote_rp(nasid, discovered_nasids, remote_rp, &remote_rp_pa); if (ret != xpSuccess) { dev_dbg(xpc_part, "unable to get reserved page " "from nasid %d, reason=%d\n", nasid, ret); if (ret == xpLocalPartid) break; continue; } xpc_request_partition_activation(remote_rp, remote_rp_pa, nasid); } } kfree(discovered_nasids); kfree(remote_rp_base); }