void xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) { unsigned long irq_flags; int ch_number; struct xpc_channel *ch; dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", XPC_PARTID(part), reason); if (!xpc_part_ref(part)) { return; } for (ch_number = 0; ch_number < part->nchannels; ch_number++) { ch = &part->channels[ch_number]; xpc_msgqueue_ref(ch); spin_lock_irqsave(&ch->lock, irq_flags); XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_msgqueue_deref(ch); } xpc_wakeup_channel_mgr(part); xpc_part_deref(part); }
/* * XPC's heartbeat code calls this function to inform XPC that a partition is * going down. XPC responds by tearing down the XPartition Communication * infrastructure used for the just downed partition. * * XPC's heartbeat code will never call this function and xpc_partition_up() * at the same time. Nor will it ever make multiple calls to either function * at the same time. */ void xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) { unsigned long irq_flags; int ch_number; struct xpc_channel *ch; dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", XPC_PARTID(part), reason); if (!xpc_part_ref(part)) { /* infrastructure for this partition isn't currently set up */ return; } /* disconnect channels associated with the partition going down */ for (ch_number = 0; ch_number < part->nchannels; ch_number++) { ch = &part->channels[ch_number]; xpc_msgqueue_ref(ch); spin_lock_irqsave(&ch->lock, irq_flags); XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_msgqueue_deref(ch); } xpc_wakeup_channel_mgr(part); xpc_part_deref(part); }
static void xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) { unsigned long irq_flags; spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); xpc_wakeup_channel_mgr(part); }
void xpc_disconnect_wait(int ch_number) { unsigned long irq_flags; short partid; struct xpc_partition *part; struct xpc_channel *ch; int wakeup_channel_mgr; /* now wait for all callouts to the caller's function to cease */ for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { part = &xpc_partitions[partid]; if (!xpc_part_ref(part)) continue; ch = &part->channels[ch_number]; if (!(ch->flags & XPC_C_WDISCONNECT)) { xpc_part_deref(part); continue; } wait_for_completion(&ch->wdisconnect_wait); spin_lock_irqsave(&ch->lock, irq_flags); DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); wakeup_channel_mgr = 0; if (ch->delayed_IPI_flags) { if (part->act_state != XPC_P_DEACTIVATING) { spin_lock(&part->IPI_lock); XPC_SET_IPI_FLAGS(part->local_IPI_amo, ch->number, ch->delayed_IPI_flags); spin_unlock(&part->IPI_lock); wakeup_channel_mgr = 1; } ch->delayed_IPI_flags = 0; } ch->flags &= ~XPC_C_WDISCONNECT; spin_unlock_irqrestore(&ch->lock, irq_flags); if (wakeup_channel_mgr) xpc_wakeup_channel_mgr(part); xpc_part_deref(part); } }
void xpc_disconnect_wait(int ch_number) { unsigned long irq_flags; short partid; struct xpc_partition *part; struct xpc_channel *ch; int wakeup_channel_mgr; /* */ for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; if (!xpc_part_ref(part)) continue; ch = &part->channels[ch_number]; if (!(ch->flags & XPC_C_WDISCONNECT)) { xpc_part_deref(part); continue; } wait_for_completion(&ch->wdisconnect_wait); spin_lock_irqsave(&ch->lock, irq_flags); DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED)); wakeup_channel_mgr = 0; if (ch->delayed_chctl_flags) { if (part->act_state != XPC_P_AS_DEACTIVATING) { spin_lock(&part->chctl_lock); part->chctl.flags[ch->number] |= ch->delayed_chctl_flags; spin_unlock(&part->chctl_lock); wakeup_channel_mgr = 1; } ch->delayed_chctl_flags = 0; } ch->flags &= ~XPC_C_WDISCONNECT; spin_unlock_irqrestore(&ch->lock, irq_flags); if (wakeup_channel_mgr) xpc_wakeup_channel_mgr(part); xpc_part_deref(part); } }
void xpc_initiate_connect(int ch_number) { short partid; struct xpc_partition *part; struct xpc_channel *ch; DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; if (xpc_part_ref(part)) { ch = &part->channels[ch_number]; xpc_wakeup_channel_mgr(part); xpc_part_deref(part); } } }
/* * See if the other side has responded to a partition deactivate request * from us. Though we requested the remote partition to deactivate with regard * to us, we really only need to wait for the other side to disengage from us. */ int xpc_partition_disengaged(struct xpc_partition *part) { short partid = XPC_PARTID(part); int disengaged; disengaged = !xpc_partition_engaged(partid); if (part->disengage_timeout) { if (!disengaged) { if (time_is_after_jiffies(part->disengage_timeout)) { /* timelimit hasn't been reached yet */ return 0; } /* * Other side hasn't responded to our deactivate * request in a timely fashion, so assume it's dead. */ dev_info(xpc_part, "deactivate request to remote " "partition %d timed out\n", partid); xpc_disengage_timedout = 1; xpc_assume_partition_disengaged(partid); disengaged = 1; } part->disengage_timeout = 0; /* cancel the timer function, provided it's not us */ if (!in_interrupt()) del_singleshot_timer_sync(&part->disengage_timer); DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING && part->act_state != XPC_P_AS_INACTIVE); if (part->act_state != XPC_P_AS_INACTIVE) xpc_wakeup_channel_mgr(part); xpc_cancel_partition_deactivation_request(part); } return disengaged; }
/* * Called by XP at the time of channel connection registration to cause * XPC to establish connections to all currently active partitions. */ void xpc_initiate_connect(int ch_number) { short partid; struct xpc_partition *part; struct xpc_channel *ch; DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; if (xpc_part_ref(part)) { ch = &part->channels[ch_number]; /* * Initiate the establishment of a connection on the * newly registered channel to the remote partition. */ xpc_wakeup_channel_mgr(part); xpc_part_deref(part); } } }
static void xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, struct xpc_activate_mq_msghdr_uv *msg_hdr, int part_setup, int *wakeup_hb_checker) { unsigned long irq_flags; struct xpc_partition_uv *part_uv = &part->sn.uv; struct xpc_openclose_args *args; part_uv->remote_act_state = msg_hdr->act_state; switch (msg_hdr->type) { case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: /* syncing of remote_act_state was just done above */ break; case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { struct xpc_activate_mq_msg_activate_req_uv *msg; /* * ??? Do we deal here with ts_jiffies being different * ??? if act_state != XPC_P_AS_INACTIVE instead of * ??? below? */ msg = container_of(msg_hdr, struct xpc_activate_mq_msg_activate_req_uv, hdr); spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); if (part_uv->act_state_req == 0) xpc_activate_IRQ_rcvd++; part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; part_uv->heartbeat_gpa = msg->heartbeat_gpa; if (msg->activate_gru_mq_desc_gpa != part_uv->activate_gru_mq_desc_gpa) { spin_lock(&part_uv->flags_lock); part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; spin_unlock(&part_uv->flags_lock); part_uv->activate_gru_mq_desc_gpa = msg->activate_gru_mq_desc_gpa; } spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); (*wakeup_hb_checker)++; break; } case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { struct xpc_activate_mq_msg_deactivate_req_uv *msg; msg = container_of(msg_hdr, struct xpc_activate_mq_msg_deactivate_req_uv, hdr); spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); if (part_uv->act_state_req == 0) xpc_activate_IRQ_rcvd++; part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; part_uv->reason = msg->reason; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); (*wakeup_hb_checker)++; return; } case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; if (!part_setup) break; msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_closerequest_uv, hdr); args = &part->remote_openclose_args[msg->ch_number]; args->reason = msg->reason; spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); xpc_wakeup_channel_mgr(part); break; } case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { struct xpc_activate_mq_msg_chctl_closereply_uv *msg; if (!part_setup) break; msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_closereply_uv, hdr); spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); xpc_wakeup_channel_mgr(part); break; } case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; if (!part_setup) break; msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_openrequest_uv, hdr); args = &part->remote_openclose_args[msg->ch_number]; args->entry_size = msg->entry_size; args->local_nentries = msg->local_nentries; spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); xpc_wakeup_channel_mgr(part); break; } case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { struct xpc_activate_mq_msg_chctl_openreply_uv *msg; if (!part_setup) break; msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_openreply_uv, hdr); args = &part->remote_openclose_args[msg->ch_number]; args->remote_nentries = msg->remote_nentries; args->local_nentries = msg->local_nentries; args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa; spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); xpc_wakeup_channel_mgr(part); break; } case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; if (!part_setup) break; msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); xpc_wakeup_channel_mgr(part); } case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: spin_lock_irqsave(&part_uv->flags_lock, irq_flags); part_uv->flags |= XPC_P_ENGAGED_UV; spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); break; case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: spin_lock_irqsave(&part_uv->flags_lock, irq_flags); part_uv->flags &= ~XPC_P_ENGAGED_UV; spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); break; default: dev_err(xpc_part, "received unknown activate_mq msg type=%d " "from partition=%d\n", msg_hdr->type, XPC_PARTID(part)); /* get hb checker to deactivate from the remote partition */ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); if (part_uv->act_state_req == 0) xpc_activate_IRQ_rcvd++; part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; part_uv->reason = xpBadMsgType; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); (*wakeup_hb_checker)++; return; } if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && part->remote_rp_ts_jiffies != 0) { /* * ??? Does what we do here need to be sensitive to * ??? act_state or remote_act_state? */ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); if (part_uv->act_state_req == 0) xpc_activate_IRQ_rcvd++; part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); (*wakeup_hb_checker)++; } }
static void xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, struct xpc_activate_mq_msghdr_uv *msg_hdr, int *wakeup_hb_checker) { unsigned long irq_flags; struct xpc_partition_uv *part_uv = &part->sn.uv; struct xpc_openclose_args *args; part_uv->remote_act_state = msg_hdr->act_state; switch (msg_hdr->type) { case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: break; case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { struct xpc_activate_mq_msg_activate_req_uv *msg; msg = container_of(msg_hdr, struct xpc_activate_mq_msg_activate_req_uv, hdr); spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); if (part_uv->act_state_req == 0) xpc_activate_IRQ_rcvd++; part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; part->remote_rp_pa = msg->rp_gpa; part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; part_uv->heartbeat_gpa = msg->heartbeat_gpa; if (msg->activate_gru_mq_desc_gpa != part_uv->activate_gru_mq_desc_gpa) { spin_lock_irqsave(&part_uv->flags_lock, irq_flags); part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV; spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); part_uv->activate_gru_mq_desc_gpa = msg->activate_gru_mq_desc_gpa; } spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); (*wakeup_hb_checker)++; break; } case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { struct xpc_activate_mq_msg_deactivate_req_uv *msg; msg = container_of(msg_hdr, struct xpc_activate_mq_msg_deactivate_req_uv, hdr); spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); if (part_uv->act_state_req == 0) xpc_activate_IRQ_rcvd++; part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; part_uv->reason = msg->reason; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); (*wakeup_hb_checker)++; return; } case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_closerequest_uv, hdr); args = &part->remote_openclose_args[msg->ch_number]; args->reason = msg->reason; spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); xpc_wakeup_channel_mgr(part); break; } case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { struct xpc_activate_mq_msg_chctl_closereply_uv *msg; msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_closereply_uv, hdr); spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); xpc_wakeup_channel_mgr(part); break; } case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_openrequest_uv, hdr); args = &part->remote_openclose_args[msg->ch_number]; args->entry_size = msg->entry_size; args->local_nentries = msg->local_nentries; spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); xpc_wakeup_channel_mgr(part); break; } case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { struct xpc_activate_mq_msg_chctl_openreply_uv *msg; msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_openreply_uv, hdr); args = &part->remote_openclose_args[msg->ch_number]; args->remote_nentries = msg->remote_nentries; args->local_nentries = msg->local_nentries; args->local_msgqueue_pa = msg->notify_gru_mq_desc_gpa; spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); xpc_wakeup_channel_mgr(part); break; } case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; msg = container_of(msg_hdr, struct xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); spin_lock_irqsave(&part->chctl_lock, irq_flags); part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE; spin_unlock_irqrestore(&part->chctl_lock, irq_flags); xpc_wakeup_channel_mgr(part); } case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: spin_lock_irqsave(&part_uv->flags_lock, irq_flags); part_uv->flags |= XPC_P_ENGAGED_UV; spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); break; case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: spin_lock_irqsave(&part_uv->flags_lock, irq_flags); part_uv->flags &= ~XPC_P_ENGAGED_UV; spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); break; default: dev_err(xpc_part, "received unknown activate_mq msg type=%d " "from partition=%d\n", msg_hdr->type, XPC_PARTID(part)); spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); if (part_uv->act_state_req == 0) xpc_activate_IRQ_rcvd++; part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; part_uv->reason = xpBadMsgType; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); (*wakeup_hb_checker)++; return; } if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && part->remote_rp_ts_jiffies != 0) { spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); if (part_uv->act_state_req == 0) xpc_activate_IRQ_rcvd++; part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); (*wakeup_hb_checker)++; } }