/* * XPC's heartbeat code calls this function to inform XPC that a partition is * going down. XPC responds by tearing down the XPartition Communication * infrastructure used for the just downed partition. * * XPC's heartbeat code will never call this function and xpc_partition_up() * at the same time. Nor will it ever make multiple calls to either function * at the same time. */ void xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) { unsigned long irq_flags; int ch_number; struct xpc_channel *ch; dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", XPC_PARTID(part), reason); if (!xpc_part_ref(part)) { /* infrastructure for this partition isn't currently set up */ return; } /* disconnect channels associated with the partition going down */ for (ch_number = 0; ch_number < part->nchannels; ch_number++) { ch = &part->channels[ch_number]; xpc_msgqueue_ref(ch); spin_lock_irqsave(&ch->lock, irq_flags); XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_msgqueue_deref(ch); } xpc_wakeup_channel_mgr(part); xpc_part_deref(part); }
void xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) { unsigned long irq_flags; int ch_number; struct xpc_channel *ch; dev_dbg(xpc_chan, "deactivating partition %d, reason=%d\n", XPC_PARTID(part), reason); if (!xpc_part_ref(part)) { return; } for (ch_number = 0; ch_number < part->nchannels; ch_number++) { ch = &part->channels[ch_number]; xpc_msgqueue_ref(ch); spin_lock_irqsave(&ch->lock, irq_flags); XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_msgqueue_deref(ch); } xpc_wakeup_channel_mgr(part); xpc_part_deref(part); }
/* * Acknowledge receipt of a delivered message's payload. * * This function, although called by users, does not call xpc_part_ref() to * ensure that the partition infrastructure is in place. It relies on the * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload(). * * Arguments: * * partid - ID of partition to which the channel is connected. * ch_number - channel # message received on. * payload - pointer to the payload area allocated via * xpc_initiate_send() or xpc_initiate_send_notify(). */ void xpc_initiate_received(short partid, int ch_number, void *payload) { struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_channel *ch; DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); ch = &part->channels[ch_number]; xpc_received_payload(ch, payload); /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */ xpc_msgqueue_deref(ch); }
void xpc_initiate_received(short partid, int ch_number, void *payload) { struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_channel *ch; DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); ch = &part->channels[ch_number]; xpc_arch_ops.received_payload(ch, payload); xpc_msgqueue_deref(ch); }
static void xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number) { struct xpc_channel *ch = &part->channels[ch_number]; int ndeliverable_payloads; xpc_msgqueue_ref(ch); ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch); if (ndeliverable_payloads > 0 && (ch->flags & XPC_C_CONNECTED) && (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) { xpc_activate_kthreads(ch, ndeliverable_payloads); } xpc_msgqueue_deref(ch); }
/* * Called by XP at the time of channel connection unregistration to cause * XPC to teardown all current connections for the specified channel. * * Before returning xpc_initiate_disconnect() will wait until all connections * on the specified channel have been closed/torndown. So the caller can be * assured that they will not be receiving any more callouts from XPC to the * function they registered via xpc_connect(). * * Arguments: * * ch_number - channel # to unregister. */ void xpc_initiate_disconnect(int ch_number) { unsigned long irq_flags; short partid; struct xpc_partition *part; struct xpc_channel *ch; DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); /* initiate the channel disconnect for every active partition */ for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; if (xpc_part_ref(part)) { ch = &part->channels[ch_number]; xpc_msgqueue_ref(ch); spin_lock_irqsave(&ch->lock, irq_flags); if (!(ch->flags & XPC_C_DISCONNECTED)) { ch->flags |= XPC_C_WDISCONNECT; XPC_DISCONNECT_CHANNEL(ch, xpUnregistering, &irq_flags); } spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_msgqueue_deref(ch); xpc_part_deref(part); } } xpc_disconnect_wait(ch_number); }
/* * For each partition that XPC has established communications with, there is * a minimum of one kernel thread assigned to perform any operation that * may potentially sleep or block (basically the callouts to the asynchronous * functions registered via xpc_connect()). * * Additional kthreads are created and destroyed by XPC as the workload * demands. * * A kthread is assigned to one of the active channels that exists for a given * partition. */ void xpc_create_kthreads(struct xpc_channel *ch, int needed, int ignore_disconnecting) { unsigned long irq_flags; u64 args = XPC_PACK_ARGS(ch->partid, ch->number); struct xpc_partition *part = &xpc_partitions[ch->partid]; struct task_struct *kthread; while (needed-- > 0) { /* * The following is done on behalf of the newly created * kthread. That kthread is responsible for doing the * counterpart to the following before it exits. */ if (ignore_disconnecting) { if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { /* kthreads assigned had gone to zero */ BUG_ON(!(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); break; } } else if (ch->flags & XPC_C_DISCONNECTING) { break; } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && atomic_inc_return(&part->nchannels_engaged) == 1) { xpc_indicate_partition_engaged(part); } (void)xpc_part_ref(part); xpc_msgqueue_ref(ch); kthread = kthread_run(xpc_kthread_start, (void *)args, "xpc%02dc%d", ch->partid, ch->number); if (IS_ERR(kthread)) { /* the fork failed */ /* * NOTE: if (ignore_disconnecting && * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true, * then we'll deadlock if all other kthreads assigned * to this channel are blocked in the channel's * registerer, because the only thing that will unblock * them is the xpDisconnecting callout that this * failed kthread_run() would have made. */ if (atomic_dec_return(&ch->kthreads_assigned) == 0 && atomic_dec_return(&part->nchannels_engaged) == 0) { xpc_indicate_partition_disengaged(part); } xpc_msgqueue_deref(ch); xpc_part_deref(part); if (atomic_read(&ch->kthreads_assigned) < ch->kthreads_idle_limit) { /* * Flag this as an error only if we have an * insufficient #of kthreads for the channel * to function. */ spin_lock_irqsave(&ch->lock, irq_flags); XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources, &irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags); } break; } } }
static int xpc_kthread_start(void *args) { short partid = XPC_UNPACK_ARG1(args); u16 ch_number = XPC_UNPACK_ARG2(args); struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_channel *ch; int n_needed; unsigned long irq_flags; dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", partid, ch_number); ch = &part->channels[ch_number]; if (!(ch->flags & XPC_C_DISCONNECTING)) { /* let registerer know that connection has been established */ spin_lock_irqsave(&ch->lock, irq_flags); if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { ch->flags |= XPC_C_CONNECTEDCALLOUT; spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_connected_callout(ch); spin_lock_irqsave(&ch->lock, irq_flags); ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; spin_unlock_irqrestore(&ch->lock, irq_flags); /* * It is possible that while the callout was being * made that the remote partition sent some messages. * If that is the case, we may need to activate * additional kthreads to help deliver them. We only * need one less than total #of messages to deliver. */ n_needed = xpc_n_of_deliverable_payloads(ch) - 1; if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) xpc_activate_kthreads(ch, n_needed); } else { spin_unlock_irqrestore(&ch->lock, irq_flags); } xpc_kthread_waitmsgs(part, ch); } /* let registerer know that connection is disconnecting */ spin_lock_irqsave(&ch->lock, irq_flags); if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { ch->flags |= XPC_C_DISCONNECTINGCALLOUT; spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_disconnect_callout(ch, xpDisconnecting); spin_lock_irqsave(&ch->lock, irq_flags); ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; } spin_unlock_irqrestore(&ch->lock, irq_flags); if (atomic_dec_return(&ch->kthreads_assigned) == 0 && atomic_dec_return(&part->nchannels_engaged) == 0) { xpc_indicate_partition_disengaged(part); } xpc_msgqueue_deref(ch); dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", partid, ch_number); xpc_part_deref(part); return 0; }
void xpc_create_kthreads(struct xpc_channel *ch, int needed, int ignore_disconnecting) { unsigned long irq_flags; u64 args = XPC_PACK_ARGS(ch->partid, ch->number); struct xpc_partition *part = &xpc_partitions[ch->partid]; struct task_struct *kthread; void (*indicate_partition_disengaged) (struct xpc_partition *) = xpc_arch_ops.indicate_partition_disengaged; while (needed-- > 0) { /* */ if (ignore_disconnecting) { if (!atomic_inc_not_zero(&ch->kthreads_assigned)) { /* */ BUG_ON(!(ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE)); break; } } else if (ch->flags & XPC_C_DISCONNECTING) { break; } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && atomic_inc_return(&part->nchannels_engaged) == 1) { xpc_arch_ops.indicate_partition_engaged(part); } (void)xpc_part_ref(part); xpc_msgqueue_ref(ch); kthread = kthread_run(xpc_kthread_start, (void *)args, "xpc%02dc%d", ch->partid, ch->number); if (IS_ERR(kthread)) { /* */ /* */ if (atomic_dec_return(&ch->kthreads_assigned) == 0 && atomic_dec_return(&part->nchannels_engaged) == 0) { indicate_partition_disengaged(part); } xpc_msgqueue_deref(ch); xpc_part_deref(part); if (atomic_read(&ch->kthreads_assigned) < ch->kthreads_idle_limit) { /* */ spin_lock_irqsave(&ch->lock, irq_flags); XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources, &irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags); } break; } } }
static int xpc_kthread_start(void *args) { short partid = XPC_UNPACK_ARG1(args); u16 ch_number = XPC_UNPACK_ARG2(args); struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_channel *ch; int n_needed; unsigned long irq_flags; int (*n_of_deliverable_payloads) (struct xpc_channel *) = xpc_arch_ops.n_of_deliverable_payloads; dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", partid, ch_number); ch = &part->channels[ch_number]; if (!(ch->flags & XPC_C_DISCONNECTING)) { /* */ spin_lock_irqsave(&ch->lock, irq_flags); if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { ch->flags |= XPC_C_CONNECTEDCALLOUT; spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_connected_callout(ch); spin_lock_irqsave(&ch->lock, irq_flags); ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; spin_unlock_irqrestore(&ch->lock, irq_flags); /* */ n_needed = n_of_deliverable_payloads(ch) - 1; if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) xpc_activate_kthreads(ch, n_needed); } else { spin_unlock_irqrestore(&ch->lock, irq_flags); } xpc_kthread_waitmsgs(part, ch); } /* */ spin_lock_irqsave(&ch->lock, irq_flags); if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { ch->flags |= XPC_C_DISCONNECTINGCALLOUT; spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_disconnect_callout(ch, xpDisconnecting); spin_lock_irqsave(&ch->lock, irq_flags); ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; } spin_unlock_irqrestore(&ch->lock, irq_flags); if (atomic_dec_return(&ch->kthreads_assigned) == 0 && atomic_dec_return(&part->nchannels_engaged) == 0) { xpc_arch_ops.indicate_partition_disengaged(part); } xpc_msgqueue_deref(ch); dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", partid, ch_number); xpc_part_deref(part); return 0; }
static enum xp_retval xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, u16 payload_size, u8 notify_type, xpc_notify_func func, void *key) { enum xp_retval ret = xpSuccess; struct xpc_send_msg_slot_uv *msg_slot = NULL; struct xpc_notify_mq_msg_uv *msg; u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV]; size_t msg_size; DBUG_ON(notify_type != XPC_N_CALL); msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size; if (msg_size > ch->entry_size) return xpPayloadTooBig; xpc_msgqueue_ref(ch); if (ch->flags & XPC_C_DISCONNECTING) { ret = ch->reason; goto out_1; } if (!(ch->flags & XPC_C_CONNECTED)) { ret = xpNotConnected; goto out_1; } ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot); if (ret != xpSuccess) goto out_1; if (func != NULL) { atomic_inc(&ch->n_to_notify); msg_slot->key = key; smp_wmb(); /* a non-NULL func must hit memory after the key */ msg_slot->func = func; if (ch->flags & XPC_C_DISCONNECTING) { ret = ch->reason; goto out_2; } } msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer; msg->hdr.partid = xp_partition_id; msg->hdr.ch_number = ch->number; msg->hdr.size = msg_size; msg->hdr.msg_slot_number = msg_slot->msg_slot_number; memcpy(&msg->payload, payload, payload_size); ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, msg_size); if (ret == xpSuccess) goto out_1; XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); out_2: if (func != NULL) { /* * Try to NULL the msg_slot's func field. If we fail, then * xpc_notify_senders_of_disconnect_uv() beat us to it, in which * case we need to pretend we succeeded to send the message * since the user will get a callout for the disconnect error * by xpc_notify_senders_of_disconnect_uv(), and to also get an * error returned here will confuse them. Additionally, since * in this case the channel is being disconnected we don't need * to put the the msg_slot back on the free list. */ if (cmpxchg(&msg_slot->func, func, NULL) != func) { ret = xpSuccess; goto out_1; } msg_slot->key = NULL; atomic_dec(&ch->n_to_notify); } xpc_free_msg_slot_uv(ch, msg_slot); out_1: xpc_msgqueue_deref(ch); return ret; }
static void xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, struct xpc_notify_mq_msg_uv *msg) { struct xpc_partition_uv *part_uv = &part->sn.uv; struct xpc_channel *ch; struct xpc_channel_uv *ch_uv; struct xpc_notify_mq_msg_uv *msg_slot; unsigned long irq_flags; int ch_number = msg->hdr.ch_number; if (unlikely(ch_number >= part->nchannels)) { dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid " "channel number=0x%x in message from partid=%d\n", ch_number, XPC_PARTID(part)); /* get hb checker to deactivate from the remote partition */ spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); if (part_uv->act_state_req == 0) xpc_activate_IRQ_rcvd++; part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; part_uv->reason = xpBadChannelNumber; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); wake_up_interruptible(&xpc_activate_IRQ_wq); return; } ch = &part->channels[ch_number]; xpc_msgqueue_ref(ch); if (!(ch->flags & XPC_C_CONNECTED)) { xpc_msgqueue_deref(ch); return; } /* see if we're really dealing with an ACK for a previously sent msg */ if (msg->hdr.size == 0) { xpc_handle_notify_mq_ack_uv(ch, msg); xpc_msgqueue_deref(ch); return; } /* we're dealing with a normal message sent via the notify_mq */ ch_uv = &ch->sn.uv; msg_slot = ch_uv->recv_msg_slots + (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; BUG_ON(msg_slot->hdr.size != 0); memcpy(msg_slot, msg, msg->hdr.size); xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next); if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { /* * If there is an existing idle kthread get it to deliver * the payload, otherwise we'll have to get the channel mgr * for this partition to create a kthread to do the delivery. */ if (atomic_read(&ch->kthreads_idle) > 0) wake_up_nr(&ch->idle_wq, 1); else xpc_send_chctl_local_msgrequest_uv(part, ch->number); } xpc_msgqueue_deref(ch); }
static enum xp_retval xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, u16 payload_size, u8 notify_type, xpc_notify_func func, void *key) { enum xp_retval ret = xpSuccess; struct xpc_send_msg_slot_uv *msg_slot = NULL; struct xpc_notify_mq_msg_uv *msg; u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV]; size_t msg_size; DBUG_ON(notify_type != XPC_N_CALL); msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size; if (msg_size > ch->entry_size) return xpPayloadTooBig; xpc_msgqueue_ref(ch); if (ch->flags & XPC_C_DISCONNECTING) { ret = ch->reason; goto out_1; } if (!(ch->flags & XPC_C_CONNECTED)) { ret = xpNotConnected; goto out_1; } ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot); if (ret != xpSuccess) goto out_1; if (func != NULL) { atomic_inc(&ch->n_to_notify); msg_slot->key = key; smp_wmb(); msg_slot->func = func; if (ch->flags & XPC_C_DISCONNECTING) { ret = ch->reason; goto out_2; } } msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer; msg->hdr.partid = xp_partition_id; msg->hdr.ch_number = ch->number; msg->hdr.size = msg_size; msg->hdr.msg_slot_number = msg_slot->msg_slot_number; memcpy(&msg->payload, payload, payload_size); ret = xpc_send_gru_msg(ch->sn.uv.cached_notify_gru_mq_desc, msg, msg_size); if (ret == xpSuccess) goto out_1; XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); out_2: if (func != NULL) { if (cmpxchg(&msg_slot->func, func, NULL) != func) { ret = xpSuccess; goto out_1; } msg_slot->key = NULL; atomic_dec(&ch->n_to_notify); } xpc_free_msg_slot_uv(ch, msg_slot); out_1: xpc_msgqueue_deref(ch); return ret; }
static void xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, struct xpc_notify_mq_msg_uv *msg) { struct xpc_partition_uv *part_uv = &part->sn.uv; struct xpc_channel *ch; struct xpc_channel_uv *ch_uv; struct xpc_notify_mq_msg_uv *msg_slot; unsigned long irq_flags; int ch_number = msg->hdr.ch_number; if (unlikely(ch_number >= part->nchannels)) { dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid " "channel number=0x%x in message from partid=%d\n", ch_number, XPC_PARTID(part)); spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); if (part_uv->act_state_req == 0) xpc_activate_IRQ_rcvd++; part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; part_uv->reason = xpBadChannelNumber; spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); wake_up_interruptible(&xpc_activate_IRQ_wq); return; } ch = &part->channels[ch_number]; xpc_msgqueue_ref(ch); if (!(ch->flags & XPC_C_CONNECTED)) { xpc_msgqueue_deref(ch); return; } if (msg->hdr.size == 0) { xpc_handle_notify_mq_ack_uv(ch, msg); xpc_msgqueue_deref(ch); return; } ch_uv = &ch->sn.uv; msg_slot = ch_uv->recv_msg_slots + (msg->hdr.msg_slot_number % ch->remote_nentries) * ch->entry_size; BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number); BUG_ON(msg_slot->hdr.size != 0); memcpy(msg_slot, msg, msg->hdr.size); xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next); if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { if (atomic_read(&ch->kthreads_idle) > 0) wake_up_nr(&ch->idle_wq, 1); else xpc_send_chctl_local_msgrequest_uv(part, ch->number); } xpc_msgqueue_deref(ch); }