void __init uv_setup(char **cmdline_p) { union uvh_si_addr_map_config_u m_n_config; union uvh_node_id_u node_id; unsigned long gnode_upper; int nid, cpu, m_val, n_val; unsigned long mmr_base, lowmem_redir_base, lowmem_redir_size; if (IS_MEDUSA()) { lowmem_redir_base = 0; lowmem_redir_size = 0; node_id.v = 0; m_n_config.s.m_skt = 37; m_n_config.s.n_skt = 0; mmr_base = 0; #if 0 /* Need BIOS calls - TDB */ if (!ia64_sn_is_fake_prom()) sn_prom_type = 1; else #endif sn_prom_type = 2; printk(KERN_INFO "Running on medusa with %s PROM\n", (sn_prom_type == 1) ? "real" : "fake"); } else { get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); node_id.v = uv_read_local_mmr(UVH_NODE_ID); m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); mmr_base = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & ~UV_MMR_ENABLE; } m_val = m_n_config.s.m_skt; n_val = m_n_config.s.n_skt; printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); gnode_upper = (((unsigned long)node_id.s.node_id) & ~((1 << n_val) - 1)) << m_val; for_each_present_cpu(cpu) { nid = cpu_to_node(cpu); uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_base + lowmem_redir_size; uv_cpu_hub_info(cpu)->m_val = m_val; uv_cpu_hub_info(cpu)->n_val = n_val; uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) -1; uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ printk(KERN_DEBUG "UV cpu %d, nid %d\n", cpu, nid); } }
static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) { union uvh_si_alias0_overlay_config_u alias; union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; int i; for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { alias.v = uv_read_local_mmr(redir_addrs[i].alias); if (alias.s.base == 0) { *size = (1UL << alias.s.m_alias); redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; return; } } BUG(); }
/* * Process the receipt of a RETRY message */ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp, struct bau_control *bcp) { int i; int cancel_count = 0; int slot2; unsigned long msg_res; unsigned long mmr = 0; struct bau_payload_queue_entry *msg; struct bau_payload_queue_entry *msg2; struct ptc_stats *stat; msg = mdp->msg; stat = bcp->statp; stat->d_retries++; /* * cancel any message from msg+1 to the retry itself */ for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) { if (msg2 > mdp->va_queue_last) msg2 = mdp->va_queue_first; if (msg2 == msg) break; /* same conditions for cancellation as uv_do_reset */ if ((msg2->replied_to == 0) && (msg2->canceled == 0) && (msg2->sw_ack_vector) && ((msg2->sw_ack_vector & msg->sw_ack_vector) == 0) && (msg2->sending_cpu == msg->sending_cpu) && (msg2->msg_type != MSG_NOOP)) { slot2 = msg2 - mdp->va_queue_first; mmr = uv_read_local_mmr (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); msg_res = msg2->sw_ack_vector; /* * This is a message retry; clear the resources held * by the previous message only if they timed out. * If it has not timed out we have an unexpected * situation to report. */ if (mmr & (msg_res << UV_SW_ACK_NPENDING)) { /* * is the resource timed out? * make everyone ignore the cancelled message. */ msg2->canceled = 1; stat->d_canceled++; cancel_count++; uv_write_local_mmr( UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, (msg_res << UV_SW_ACK_NPENDING) | msg_res); } } } if (!cancel_count) stat->d_nocanceled++; }
static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift) { unsigned long descriptor_status; descriptor_status = uv_read_local_mmr(mmr_offset); descriptor_status >>= right_shift; descriptor_status &= UV_ACT_STATUS_MASK; return descriptor_status; }
/* * Last resort when we get a large number of destination timeouts is * to clear resources held by a given cpu. * Do this with IPI so that all messages in the BAU message queue * can be identified by their nonzero sw_ack_vector field. * * This is entered for a single cpu on the uvhub. * The sender want's this uvhub to free a specific message's * sw_ack resources. */ static void uv_do_reset(void *ptr) { int i; int slot; int count = 0; unsigned long mmr; unsigned long msg_res; struct bau_control *bcp; struct reset_args *rap; struct bau_payload_queue_entry *msg; struct ptc_stats *stat; bcp = &per_cpu(bau_control, smp_processor_id()); rap = (struct reset_args *)ptr; stat = bcp->statp; stat->d_resets++; /* * We're looking for the given sender, and * will free its sw_ack resource. * If all cpu's finally responded after the timeout, its * message 'replied_to' was set. */ for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) { /* uv_do_reset: same conditions for cancellation as uv_bau_process_retry_msg() */ if ((msg->replied_to == 0) && (msg->canceled == 0) && (msg->sending_cpu == rap->sender) && (msg->sw_ack_vector) && (msg->msg_type != MSG_NOOP)) { /* * make everyone else ignore this message */ msg->canceled = 1; slot = msg - bcp->va_queue_first; count++; /* * only reset the resource if it is still pending */ mmr = uv_read_local_mmr (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); msg_res = msg->sw_ack_vector; if (mmr & msg_res) { stat->d_rcanceled++; uv_write_local_mmr( UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, (msg_res << UV_SW_ACK_NPENDING) | msg_res); } } } return; }
/* Setup which NMI support is present in system */ static void uv_nmi_setup_mmrs(void) { if (uv_read_local_mmr(UVH_NMI_MMRX_SUPPORTED)) { uv_write_local_mmr(UVH_NMI_MMRX_REQ, 1UL << UVH_NMI_MMRX_REQ_SHIFT); nmi_mmr = UVH_NMI_MMRX; nmi_mmr_clear = UVH_NMI_MMRX_CLEAR; nmi_mmr_pending = 1UL << UVH_NMI_MMRX_SHIFT; pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMRX_TYPE); } else { nmi_mmr = UVH_NMI_MMR; nmi_mmr_clear = UVH_NMI_MMR_CLEAR; nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT; pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE); } }
/* Read NMI MMR and check if NMI flag was set by BMC. */ static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi) { hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr); atomic_inc(&hub_nmi->read_mmr_count); return !!(hub_nmi->nmi_value & nmi_mmr_pending); }