/* * Process the receipt of a RETRY message */ static inline void uv_bau_process_retry_msg(struct msg_desc *mdp, struct bau_control *bcp) { int i; int cancel_count = 0; int slot2; unsigned long msg_res; unsigned long mmr = 0; struct bau_payload_queue_entry *msg; struct bau_payload_queue_entry *msg2; struct ptc_stats *stat; msg = mdp->msg; stat = bcp->statp; stat->d_retries++; /* * cancel any message from msg+1 to the retry itself */ for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) { if (msg2 > mdp->va_queue_last) msg2 = mdp->va_queue_first; if (msg2 == msg) break; /* same conditions for cancellation as uv_do_reset */ if ((msg2->replied_to == 0) && (msg2->canceled == 0) && (msg2->sw_ack_vector) && ((msg2->sw_ack_vector & msg->sw_ack_vector) == 0) && (msg2->sending_cpu == msg->sending_cpu) && (msg2->msg_type != MSG_NOOP)) { slot2 = msg2 - mdp->va_queue_first; mmr = uv_read_local_mmr (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); msg_res = msg2->sw_ack_vector; /* * This is a message retry; clear the resources held * by the previous message only if they timed out. * If it has not timed out we have an unexpected * situation to report. */ if (mmr & (msg_res << UV_SW_ACK_NPENDING)) { /* * is the resource timed out? * make everyone ignore the cancelled message. */ msg2->canceled = 1; stat->d_canceled++; cancel_count++; uv_write_local_mmr( UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, (msg_res << UV_SW_ACK_NPENDING) | msg_res); } } } if (!cancel_count) stat->d_nocanceled++; }
/* * Last resort when we get a large number of destination timeouts is * to clear resources held by a given cpu. * Do this with IPI so that all messages in the BAU message queue * can be identified by their nonzero sw_ack_vector field. * * This is entered for a single cpu on the uvhub. * The sender want's this uvhub to free a specific message's * sw_ack resources. */ static void uv_do_reset(void *ptr) { int i; int slot; int count = 0; unsigned long mmr; unsigned long msg_res; struct bau_control *bcp; struct reset_args *rap; struct bau_payload_queue_entry *msg; struct ptc_stats *stat; bcp = &per_cpu(bau_control, smp_processor_id()); rap = (struct reset_args *)ptr; stat = bcp->statp; stat->d_resets++; /* * We're looking for the given sender, and * will free its sw_ack resource. * If all cpu's finally responded after the timeout, its * message 'replied_to' was set. */ for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) { /* uv_do_reset: same conditions for cancellation as uv_bau_process_retry_msg() */ if ((msg->replied_to == 0) && (msg->canceled == 0) && (msg->sending_cpu == rap->sender) && (msg->sw_ack_vector) && (msg->msg_type != MSG_NOOP)) { /* * make everyone else ignore this message */ msg->canceled = 1; slot = msg - bcp->va_queue_first; count++; /* * only reset the resource if it is still pending */ mmr = uv_read_local_mmr (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE); msg_res = msg->sw_ack_vector; if (mmr & msg_res) { stat->d_rcanceled++; uv_write_local_mmr( UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, (msg_res << UV_SW_ACK_NPENDING) | msg_res); } } } return; }
/* Setup which NMI support is present in system */ static void uv_nmi_setup_mmrs(void) { if (uv_read_local_mmr(UVH_NMI_MMRX_SUPPORTED)) { uv_write_local_mmr(UVH_NMI_MMRX_REQ, 1UL << UVH_NMI_MMRX_REQ_SHIFT); nmi_mmr = UVH_NMI_MMRX; nmi_mmr_clear = UVH_NMI_MMRX_CLEAR; nmi_mmr_pending = 1UL << UVH_NMI_MMRX_SHIFT; pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMRX_TYPE); } else { nmi_mmr = UVH_NMI_MMR; nmi_mmr_clear = UVH_NMI_MMR_CLEAR; nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT; pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE); } }
/* * Free a software acknowledge hardware resource by clearing its Pending * bit. This will return a reply to the sender. * If the message has timed out, a reply has already been sent by the * hardware but the resource has not been released. In that case our * clear of the Timeout bit (as well) will free the resource. No reply will * be sent (the hardware will only do one reply per message). */ static inline void uv_reply_to_message(struct msg_desc *mdp, struct bau_control *bcp) { unsigned long dw; struct bau_payload_queue_entry *msg; msg = mdp->msg; if (!msg->canceled) { dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) | msg->sw_ack_vector; uv_write_local_mmr( UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw); } msg->replied_to = 1; msg->sw_ack_vector = 0; }
static inline void uv_local_mmr_clear_nmi(void) { uv_write_local_mmr(nmi_mmr_clear, nmi_mmr_pending); }