static void xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) { int (*n_of_deliverable_payloads) (struct xpc_channel *) = xpc_arch_ops.n_of_deliverable_payloads; do { /* */ while (n_of_deliverable_payloads(ch) > 0 && !(ch->flags & XPC_C_DISCONNECTING)) { xpc_deliver_payload(ch); } if (atomic_inc_return(&ch->kthreads_idle) > ch->kthreads_idle_limit) { /* */ atomic_dec(&ch->kthreads_idle); break; } dev_dbg(xpc_chan, "idle kthread calling " "wait_event_interruptible_exclusive()\n"); (void)wait_event_interruptible_exclusive(ch->idle_wq, (n_of_deliverable_payloads(ch) > 0 || (ch->flags & XPC_C_DISCONNECTING))); atomic_dec(&ch->kthreads_idle); } while (!(ch->flags & XPC_C_DISCONNECTING)); }
static int xpc_kthread_start(void *args) { short partid = XPC_UNPACK_ARG1(args); u16 ch_number = XPC_UNPACK_ARG2(args); struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_channel *ch; int n_needed; unsigned long irq_flags; int (*n_of_deliverable_payloads) (struct xpc_channel *) = xpc_arch_ops.n_of_deliverable_payloads; dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", partid, ch_number); ch = &part->channels[ch_number]; if (!(ch->flags & XPC_C_DISCONNECTING)) { /* */ spin_lock_irqsave(&ch->lock, irq_flags); if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { ch->flags |= XPC_C_CONNECTEDCALLOUT; spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_connected_callout(ch); spin_lock_irqsave(&ch->lock, irq_flags); ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; spin_unlock_irqrestore(&ch->lock, irq_flags); /* */ n_needed = n_of_deliverable_payloads(ch) - 1; if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) xpc_activate_kthreads(ch, n_needed); } else { spin_unlock_irqrestore(&ch->lock, irq_flags); } xpc_kthread_waitmsgs(part, ch); } /* */ spin_lock_irqsave(&ch->lock, irq_flags); if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { ch->flags |= XPC_C_DISCONNECTINGCALLOUT; spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_disconnect_callout(ch, xpDisconnecting); spin_lock_irqsave(&ch->lock, irq_flags); ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; } spin_unlock_irqrestore(&ch->lock, irq_flags); if (atomic_dec_return(&ch->kthreads_assigned) == 0 && atomic_dec_return(&part->nchannels_engaged) == 0) { xpc_arch_ops.indicate_partition_disengaged(part); } xpc_msgqueue_deref(ch); dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", partid, ch_number); xpc_part_deref(part); return 0; }
static int xpc_kthread_start(void *args) { short partid = XPC_UNPACK_ARG1(args); u16 ch_number = XPC_UNPACK_ARG2(args); struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_channel *ch; int n_needed; unsigned long irq_flags; int (*n_of_deliverable_payloads) (struct xpc_channel *) = xpc_arch_ops.n_of_deliverable_payloads; dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", partid, ch_number); ch = &part->channels[ch_number]; if (!(ch->flags & XPC_C_DISCONNECTING)) { /* let registerer know that connection has been established */ spin_lock_irqsave(&ch->lock, irq_flags); if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) { ch->flags |= XPC_C_CONNECTEDCALLOUT; spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_connected_callout(ch); spin_lock_irqsave(&ch->lock, irq_flags); ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE; spin_unlock_irqrestore(&ch->lock, irq_flags); /* * It is possible that while the callout was being * made that the remote partition sent some messages. * If that is the case, we may need to activate * additional kthreads to help deliver them. We only * need one less than total #of messages to deliver. */ n_needed = n_of_deliverable_payloads(ch) - 1; if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) xpc_activate_kthreads(ch, n_needed); } else { spin_unlock_irqrestore(&ch->lock, irq_flags); } xpc_kthread_waitmsgs(part, ch); } /* let registerer know that connection is disconnecting */ spin_lock_irqsave(&ch->lock, irq_flags); if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { ch->flags |= XPC_C_DISCONNECTINGCALLOUT; spin_unlock_irqrestore(&ch->lock, irq_flags); xpc_disconnect_callout(ch, xpDisconnecting); spin_lock_irqsave(&ch->lock, irq_flags); ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; } spin_unlock_irqrestore(&ch->lock, irq_flags); if (atomic_dec_return(&ch->kthreads_assigned) == 0 && atomic_dec_return(&part->nchannels_engaged) == 0) { xpc_arch_ops.indicate_partition_disengaged(part); } xpc_msgqueue_deref(ch); dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n", partid, ch_number); xpc_part_deref(part); return 0; }