Пример #1
0
static void
xpc_teardown_ch_structures(struct xpc_partition *part)
{
	DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
	DBUG_ON(atomic_read(&part->nchannels_active) != 0);

	/*
                                                                     
                                                                     
                                        
  */
	DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
	part->setup_state = XPC_P_SS_WTEARDOWN;

	wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));

	/*                                                  */

	xpc_arch_ops.teardown_ch_structures(part);

	kfree(part->remote_openclose_args_base);
	part->remote_openclose_args = NULL;
	kfree(part->channels);
	part->channels = NULL;

	part->setup_state = XPC_P_SS_TORNDOWN;
}
Пример #2
0
/*
 * Wait for a message entry to become available for the specified channel,
 * but don't wait any longer than 1 jiffy.
 */
enum xp_retval
xpc_allocate_msg_wait(struct xpc_channel *ch)
{
	enum xp_retval ret;

	if (ch->flags & XPC_C_DISCONNECTING) {
		DBUG_ON(ch->reason == xpInterrupted);
		return ch->reason;
	}

	atomic_inc(&ch->n_on_msg_allocate_wq);
	ret = interruptible_sleep_on_timeout(&ch->msg_allocate_wq, 1);
	atomic_dec(&ch->n_on_msg_allocate_wq);

	if (ch->flags & XPC_C_DISCONNECTING) {
		ret = ch->reason;
		DBUG_ON(ch->reason == xpInterrupted);
	} else if (ret == 0) {
		ret = xpTimeout;
	} else {
		ret = xpInterrupted;
	}

	return ret;
}
Пример #3
0
/*
 * Teardown the channel structures necessary to support XPartition Communication
 * between the specified remote partition and the local one.
 */
static void
xpc_teardown_ch_structures(struct xpc_partition *part)
{
	DBUG_ON(atomic_read(&part->nchannels_engaged) != 0);
	DBUG_ON(atomic_read(&part->nchannels_active) != 0);

	/*
	 * Make this partition inaccessible to local processes by marking it
	 * as no longer setup. Then wait before proceeding with the teardown
	 * until all existing references cease.
	 */
	DBUG_ON(part->setup_state != XPC_P_SS_SETUP);
	part->setup_state = XPC_P_SS_WTEARDOWN;

	wait_event(part->teardown_wq, (atomic_read(&part->references) == 0));

	/* now we can begin tearing down the infrastructure */

	xpc_teardown_ch_structures_sn(part);

	kfree(part->remote_openclose_args_base);
	part->remote_openclose_args = NULL;
	kfree(part->channels);
	part->channels = NULL;

	part->setup_state = XPC_P_SS_TORNDOWN;
}
Пример #4
0
/*
 * Wait for a message entry to become available for the specified channel,
 * but don't wait any longer than 1 jiffy.
 */
enum xp_retval
xpc_allocate_msg_wait(struct xpc_channel *ch)
{
	enum xp_retval ret;
	DEFINE_WAIT(wait);

	if (ch->flags & XPC_C_DISCONNECTING) {
		DBUG_ON(ch->reason == xpInterrupted);
		return ch->reason;
	}

	atomic_inc(&ch->n_on_msg_allocate_wq);
	prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
	ret = schedule_timeout(1);
	finish_wait(&ch->msg_allocate_wq, &wait);
	atomic_dec(&ch->n_on_msg_allocate_wq);

	if (ch->flags & XPC_C_DISCONNECTING) {
		ret = ch->reason;
		DBUG_ON(ch->reason == xpInterrupted);
	} else if (ret == 0) {
		ret = xpTimeout;
	} else {
		ret = xpInterrupted;
	}

	return ret;
}
Пример #5
0
/*
 * Timer function to enforce the timelimit on the partition disengage.
 */
static void
xpc_timeout_partition_disengage(unsigned long data)
{
	struct xpc_partition *part = (struct xpc_partition *)data;

	DBUG_ON(time_is_after_jiffies(part->disengage_timeout));

	(void)xpc_partition_disengaged(part);

	DBUG_ON(part->disengage_timeout != 0);
	DBUG_ON(xpc_partition_engaged(XPC_PARTID(part)));
}
Пример #6
0
/*
 * Timer function to enforce the timelimit on the partition disengage request.
 */
static void
xpc_timeout_partition_disengage_request(unsigned long data)
{
    struct xpc_partition *part = (struct xpc_partition *)data;

    DBUG_ON(time_before(jiffies, part->disengage_request_timeout));

    (void)xpc_partition_disengaged(part);

    DBUG_ON(part->disengage_request_timeout != 0);
    DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
}
Пример #7
0
/*
 * When XPC HB determines that a partition has come up, it will create a new
 * kthread and that kthread will call this function to attempt to set up the
 * basic infrastructure used for Cross Partition Communication with the newly
 * upped partition.
 *
 * The kthread that was created by XPC HB and which setup the XPC
 * infrastructure will remain assigned to the partition becoming the channel
 * manager for that partition until the partition is deactivating, at which
 * time the kthread will teardown the XPC infrastructure and then exit.
 */
static int
xpc_activating(void *__partid)
{
	short partid = (u64)__partid;
	struct xpc_partition *part = &xpc_partitions[partid];
	unsigned long irq_flags;

	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);

	spin_lock_irqsave(&part->act_lock, irq_flags);

	if (part->act_state == XPC_P_AS_DEACTIVATING) {
		part->act_state = XPC_P_AS_INACTIVE;
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
		part->remote_rp_pa = 0;
		return 0;
	}

	/* indicate the thread is activating */
	DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ);
	part->act_state = XPC_P_AS_ACTIVATING;

	XPC_SET_REASON(part, 0, 0);
	spin_unlock_irqrestore(&part->act_lock, irq_flags);

	dev_dbg(xpc_part, "activating partition %d\n", partid);

	xpc_allow_hb(partid);

	if (xpc_setup_ch_structures(part) == xpSuccess) {
		(void)xpc_part_ref(part);	/* this will always succeed */

		if (xpc_make_first_contact(part) == xpSuccess) {
			xpc_mark_partition_active(part);
			xpc_channel_mgr(part);
			/* won't return until partition is deactivating */
		}

		xpc_part_deref(part);
		xpc_teardown_ch_structures(part);
	}

	xpc_disallow_hb(partid);
	xpc_mark_partition_inactive(part);

	if (part->reason == xpReactivating) {
		/* interrupting ourselves results in activating partition */
		xpc_request_partition_reactivation(part);
	}

	return 0;
}
Пример #8
0
static void
xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
{
	enum xp_retval ret;

	DBUG_ON(!spin_is_locked(&ch->lock));

	if (!(ch->flags & XPC_C_OPENREQUEST) ||
	    !(ch->flags & XPC_C_ROPENREQUEST)) {
		
		return;
	}
	DBUG_ON(!(ch->flags & XPC_C_CONNECTING));

	if (!(ch->flags & XPC_C_SETUP)) {
		spin_unlock_irqrestore(&ch->lock, *irq_flags);
		ret = xpc_arch_ops.setup_msg_structures(ch);
		spin_lock_irqsave(&ch->lock, *irq_flags);

		if (ret != xpSuccess)
			XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);
		else
			ch->flags |= XPC_C_SETUP;

		if (ch->flags & XPC_C_DISCONNECTING)
			return;
	}

	if (!(ch->flags & XPC_C_OPENREPLY)) {
		ch->flags |= XPC_C_OPENREPLY;
		xpc_arch_ops.send_chctl_openreply(ch, irq_flags);
	}

	if (!(ch->flags & XPC_C_ROPENREPLY))
		return;

	if (!(ch->flags & XPC_C_OPENCOMPLETE)) {
		ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED);
		xpc_arch_ops.send_chctl_opencomplete(ch, irq_flags);
	}

	if (!(ch->flags & XPC_C_ROPENCOMPLETE))
		return;

	dev_info(xpc_chan, "channel %d to partition %d connected\n",
		 ch->number, ch->partid);

	ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP);	
}
Пример #9
0
void
xpc_initiate_received(short partid, int ch_number, void *payload)
{
	struct xpc_partition *part = &xpc_partitions[partid];
	struct xpc_channel *ch;

	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
	DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);

	ch = &part->channels[ch_number];
	xpc_arch_ops.received_payload(ch, payload);

	
	xpc_msgqueue_deref(ch);
}
Пример #10
0
/*
 * Acknowledge receipt of a delivered message's payload.
 *
 * This function, although called by users, does not call xpc_part_ref() to
 * ensure that the partition infrastructure is in place. It relies on the
 * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload().
 *
 * Arguments:
 *
 *	partid - ID of partition to which the channel is connected.
 *	ch_number - channel # message received on.
 *	payload - pointer to the payload area allocated via
 *			xpc_initiate_send() or xpc_initiate_send_notify().
 */
void
xpc_initiate_received(short partid, int ch_number, void *payload)
{
	struct xpc_partition *part = &xpc_partitions[partid];
	struct xpc_channel *ch;

	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
	DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);

	ch = &part->channels[ch_number];
	xpc_received_payload(ch, payload);

	/* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload()  */
	xpc_msgqueue_deref(ch);
}
Пример #11
0
/*
 * Allocate msg_slots associated with the channel.
 */
static enum xp_retval
xpc_setup_msg_structures_uv(struct xpc_channel *ch)
{
	static enum xp_retval ret;
	struct xpc_channel_uv *ch_uv = &ch->sn.uv;

	DBUG_ON(ch->flags & XPC_C_SETUP);

	ch_uv->cached_notify_gru_mq_desc = kmalloc(sizeof(struct
						   gru_message_queue_desc),
						   GFP_KERNEL);
	if (ch_uv->cached_notify_gru_mq_desc == NULL)
		return xpNoMemory;

	ret = xpc_allocate_send_msg_slot_uv(ch);
	if (ret == xpSuccess) {

		ret = xpc_allocate_recv_msg_slot_uv(ch);
		if (ret != xpSuccess) {
			kfree(ch_uv->send_msg_slots);
			xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
		}
	}
	return ret;
}
Пример #12
0
/*
 * When XPC HB determines that a partition has come up, it will create a new
 * kthread and that kthread will call this function to attempt to set up the
 * basic infrastructure used for Cross Partition Communication with the newly
 * upped partition.
 *
 * The kthread that was created by XPC HB and which setup the XPC
 * infrastructure will remain assigned to the partition until the partition
 * goes down. At which time the kthread will teardown the XPC infrastructure
 * and then exit.
 *
 * XPC HB will put the remote partition's XPC per partition specific variables
 * physical address into xpc_partitions[partid].remote_vars_part_pa prior to
 * calling xpc_partition_up().
 */
static void
xpc_partition_up(struct xpc_partition *part)
{
    DBUG_ON(part->channels != NULL);

    dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));

    if (xpc_setup_infrastructure(part) != xpSuccess)
        return;

    /*
     * The kthread that XPC HB called us with will become the
     * channel manager for this partition. It will not return
     * back to XPC HB until the partition's XPC infrastructure
     * has been dismantled.
     */

    (void)xpc_part_ref(part);    /* this will always succeed */

    if (xpc_make_first_contact(part) == xpSuccess)
        xpc_channel_mgr(part);

    xpc_part_deref(part);

    xpc_teardown_infrastructure(part);
}
Пример #13
0
void
xpc_disconnect(int ch_number)
{
	struct xpc_registration *registration;

	DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);

	registration = &xpc_registrations[ch_number];

	
	mutex_lock(&registration->mutex);

	
	if (registration->func == NULL) {
		mutex_unlock(&registration->mutex);
		return;
	}

	
	registration->func = NULL;
	registration->key = NULL;
	registration->nentries = 0;
	registration->entry_size = 0;
	registration->assigned_limit = 0;
	registration->idle_limit = 0;

	xpc_interface.disconnect(ch_number);

	mutex_unlock(&registration->mutex);

	return;
}
Пример #14
0
void
xpc_activate_kthreads(struct xpc_channel *ch, int needed)
{
	int idle = atomic_read(&ch->kthreads_idle);
	int assigned = atomic_read(&ch->kthreads_assigned);
	int wakeup;

	DBUG_ON(needed <= 0);

	if (idle > 0) {
		wakeup = (needed > idle) ? idle : needed;
		needed -= wakeup;

		dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
			"channel=%d\n", wakeup, ch->partid, ch->number);

		/* only wakeup the requested number of kthreads */
		wake_up_nr(&ch->idle_wq, wakeup);
	}

	if (needed <= 0)
		return;

	if (needed + assigned > ch->kthreads_assigned_limit) {
		needed = ch->kthreads_assigned_limit - assigned;
		if (needed <= 0)
			return;
	}

	dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
		needed, ch->partid, ch->number);

	xpc_create_kthreads(ch, needed, 0);
}
Пример #15
0
void
xpc_activate_partition(struct xpc_partition *part)
{
	short partid = XPC_PARTID(part);
	unsigned long irq_flags;
	struct task_struct *kthread;

	spin_lock_irqsave(&part->act_lock, irq_flags);

	DBUG_ON(part->act_state != XPC_P_AS_INACTIVE);

	part->act_state = XPC_P_AS_ACTIVATION_REQ;
	XPC_SET_REASON(part, xpCloneKThread, __LINE__);

	spin_unlock_irqrestore(&part->act_lock, irq_flags);

	kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
			      partid);
	if (IS_ERR(kthread)) {
		spin_lock_irqsave(&part->act_lock, irq_flags);
		part->act_state = XPC_P_AS_INACTIVE;
		XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
		spin_unlock_irqrestore(&part->act_lock, irq_flags);
	}
}
Пример #16
0
/*
 * To disconnect a channel, and reflect it back to all who may be waiting.
 *
 * An OPEN is not allowed until XPC_C_DISCONNECTING is cleared by
 * xpc_process_disconnect(), and if set, XPC_C_WDISCONNECT is cleared by
 * xpc_disconnect_wait().
 *
 * THE CHANNEL IS TO BE LOCKED BY THE CALLER AND WILL REMAIN LOCKED UPON RETURN.
 */
void
xpc_disconnect_channel(const int line, struct xpc_channel *ch,
		       enum xp_retval reason, unsigned long *irq_flags)
{
	u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);

	DBUG_ON(!spin_is_locked(&ch->lock));

	if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
		return;

	DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));

	dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
		reason, line, ch->partid, ch->number);

	XPC_SET_REASON(ch, reason, line);

	ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
	/* some of these may not have been set */
	ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
		       XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
		       XPC_C_CONNECTING | XPC_C_CONNECTED);

	xpc_send_chctl_closerequest(ch, irq_flags);

	if (channel_was_connected)
		ch->flags |= XPC_C_WASCONNECTED;

	spin_unlock_irqrestore(&ch->lock, *irq_flags);

	/* wake all idle kthreads so they can exit */
	if (atomic_read(&ch->kthreads_idle) > 0) {
		wake_up_all(&ch->idle_wq);

	} else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
		   !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
		/* start a kthread that will do the xpDisconnecting callout */
		xpc_create_kthreads(ch, 1, 1);
	}

	/* wake those waiting to allocate an entry from the local msg queue */
	if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
		wake_up(&ch->msg_allocate_wq);

	spin_lock_irqsave(&ch->lock, *irq_flags);
}
Пример #17
0
void
xpc_disconnect_channel(const int line, struct xpc_channel *ch,
		       enum xp_retval reason, unsigned long *irq_flags)
{
	u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);

	DBUG_ON(!spin_is_locked(&ch->lock));

	if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
		return;

	DBUG_ON(!(ch->flags & (XPC_C_CONNECTING | XPC_C_CONNECTED)));

	dev_dbg(xpc_chan, "reason=%d, line=%d, partid=%d, channel=%d\n",
		reason, line, ch->partid, ch->number);

	XPC_SET_REASON(ch, reason, line);

	ch->flags |= (XPC_C_CLOSEREQUEST | XPC_C_DISCONNECTING);
	
	ch->flags &= ~(XPC_C_OPENREQUEST | XPC_C_OPENREPLY |
		       XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY |
		       XPC_C_CONNECTING | XPC_C_CONNECTED);

	xpc_arch_ops.send_chctl_closerequest(ch, irq_flags);

	if (channel_was_connected)
		ch->flags |= XPC_C_WASCONNECTED;

	spin_unlock_irqrestore(&ch->lock, *irq_flags);

	
	if (atomic_read(&ch->kthreads_idle) > 0) {
		wake_up_all(&ch->idle_wq);

	} else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
		   !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
		
		xpc_create_kthreads(ch, 1, 1);
	}

	
	if (atomic_read(&ch->n_on_msg_allocate_wq) > 0)
		wake_up(&ch->msg_allocate_wq);

	spin_lock_irqsave(&ch->lock, *irq_flags);
}
Пример #18
0
/*
 * Process a connect message from a remote partition.
 *
 * Note: xpc_process_connect() is expecting to be called with the
 * spin_lock_irqsave held and will leave it locked upon return.
 */
static void
xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
{
	enum xp_retval ret;

	DBUG_ON(!spin_is_locked(&ch->lock));

	if (!(ch->flags & XPC_C_OPENREQUEST) ||
	    !(ch->flags & XPC_C_ROPENREQUEST)) {
		/* nothing more to do for now */
		return;
	}
	DBUG_ON(!(ch->flags & XPC_C_CONNECTING));

	if (!(ch->flags & XPC_C_SETUP)) {
		spin_unlock_irqrestore(&ch->lock, *irq_flags);
		ret = xpc_setup_msg_structures(ch);
		spin_lock_irqsave(&ch->lock, *irq_flags);

		if (ret != xpSuccess)
			XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags);

		ch->flags |= XPC_C_SETUP;

		if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING))
			return;
	}

	if (!(ch->flags & XPC_C_OPENREPLY)) {
		ch->flags |= XPC_C_OPENREPLY;
		xpc_send_chctl_openreply(ch, irq_flags);
	}

	if (!(ch->flags & XPC_C_ROPENREPLY))
		return;

	ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP);	/* clear all else */

	dev_info(xpc_chan, "channel %d to partition %d connected\n",
		 ch->number, ch->partid);

	spin_unlock_irqrestore(&ch->lock, *irq_flags);
	xpc_create_kthreads(ch, 1, 0);
	spin_lock_irqsave(&ch->lock, *irq_flags);
}
Пример #19
0
static enum xp_retval
xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch,
			       unsigned long gru_mq_desc_gpa)
{
	struct xpc_channel_uv *ch_uv = &ch->sn.uv;

	DBUG_ON(ch_uv->cached_notify_gru_mq_desc == NULL);
	return xpc_cache_remote_gru_mq_desc_uv(ch_uv->cached_notify_gru_mq_desc,
					       gru_mq_desc_gpa);
}
Пример #20
0
static enum xp_retval
xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size,
			 int msg_type)
{
	struct xpc_activate_mq_msghdr_uv *msg_hdr = msg;
	struct xpc_partition_uv *part_uv = &part->sn.uv;
	struct gru_message_queue_desc *gru_mq_desc;
	unsigned long irq_flags;
	enum xp_retval ret;

	DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV);

	msg_hdr->type = msg_type;
	msg_hdr->partid = xp_partition_id;
	msg_hdr->act_state = part->act_state;
	msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies;

	mutex_lock(&part_uv->cached_activate_gru_mq_desc_mutex);
again:
	if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV)) {
		gru_mq_desc = part_uv->cached_activate_gru_mq_desc;
		if (gru_mq_desc == NULL) {
			gru_mq_desc = kmalloc(sizeof(struct
					      gru_message_queue_desc),
					      GFP_KERNEL);
			if (gru_mq_desc == NULL) {
				ret = xpNoMemory;
				goto done;
			}
			part_uv->cached_activate_gru_mq_desc = gru_mq_desc;
		}

		ret = xpc_cache_remote_gru_mq_desc_uv(gru_mq_desc,
						      part_uv->
						      activate_gru_mq_desc_gpa);
		if (ret != xpSuccess)
			goto done;

		spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
		part_uv->flags |= XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
		spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
	}

	/* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */
	ret = xpc_send_gru_msg(part_uv->cached_activate_gru_mq_desc, msg,
			       msg_size);
	if (ret != xpSuccess) {
		smp_rmb();	/* ensure a fresh copy of part_uv->flags */
		if (!(part_uv->flags & XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV))
			goto again;
	}
done:
	mutex_unlock(&part_uv->cached_activate_gru_mq_desc_mutex);
	return ret;
}
Пример #21
0
/*
 * Send a message that contains the user's payload on the specified channel
 * connected to the specified partition.
 *
 * NOTE that this routine can sleep waiting for a message entry to become
 * available. To not sleep, pass in the XPC_NOWAIT flag.
 *
 * Once sent, this routine will not wait for the message to be received, nor
 * will notification be given when it does happen.
 *
 * Arguments:
 *
 *	partid - ID of partition to which the channel is connected.
 *	ch_number - channel # to send message on.
 *	flags - see xp.h for valid flags.
 *	payload - pointer to the payload which is to be sent.
 *	payload_size - size of the payload in bytes.
 */
enum xp_retval
xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload,
		  u16 payload_size)
{
	struct xpc_partition *part = &xpc_partitions[partid];
	enum xp_retval ret = xpUnknownReason;

	dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload,
		partid, ch_number);

	DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
	DBUG_ON(ch_number < 0 || ch_number >= part->nchannels);
	DBUG_ON(payload == NULL);

	if (xpc_part_ref(part)) {
		ret = xpc_send_payload(&part->channels[ch_number], flags,
				       payload, payload_size, 0, NULL, NULL);
		xpc_part_deref(part);
	}

	return ret;
}
Пример #22
0
enum xp_retval
xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
	    u16 nentries, u32 assigned_limit, u32 idle_limit)
{
	struct xpc_registration *registration;

	DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS);
	DBUG_ON(payload_size == 0 || nentries == 0);
	DBUG_ON(func == NULL);
	DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);

	if (XPC_MSG_SIZE(payload_size) > XPC_MSG_MAX_SIZE)
		return xpPayloadTooBig;

	registration = &xpc_registrations[ch_number];

	if (mutex_lock_interruptible(&registration->mutex) != 0)
		return xpInterrupted;

	
	if (registration->func != NULL) {
		mutex_unlock(&registration->mutex);
		return xpAlreadyRegistered;
	}

	
	registration->entry_size = XPC_MSG_SIZE(payload_size);
	registration->nentries = nentries;
	registration->assigned_limit = assigned_limit;
	registration->idle_limit = idle_limit;
	registration->key = key;
	registration->func = func;

	mutex_unlock(&registration->mutex);

	xpc_interface.connect(ch_number);

	return xpSuccess;
}
Пример #23
0
/*
 * Register for automatic establishment of a channel connection whenever
 * a partition comes up.
 *
 * Arguments:
 *
 *	ch_number - channel # to register for connection.
 *	func - function to call for asynchronous notification of channel
 *	       state changes (i.e., connection, disconnection, error) and
 *	       the arrival of incoming messages.
 *      key - pointer to optional user-defined value that gets passed back
 *	      to the user on any callouts made to func.
 *	payload_size - size in bytes of the XPC message's payload area which
 *		       contains a user-defined message. The user should make
 *		       this large enough to hold their largest message.
 *	nentries - max #of XPC message entries a message queue can contain.
 *		   The actual number, which is determined when a connection
 * 		   is established and may be less then requested, will be
 *		   passed to the user via the xpcConnected callout.
 *	assigned_limit - max number of kthreads allowed to be processing
 * 			 messages (per connection) at any given instant.
 *	idle_limit - max number of kthreads allowed to be idle at any given
 * 		     instant.
 */
enum xpc_retval
xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size,
		u16 nentries, u32 assigned_limit, u32 idle_limit)
{
	struct xpc_registration *registration;


	DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS);
	DBUG_ON(payload_size == 0 || nentries == 0);
	DBUG_ON(func == NULL);
	DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit);

	registration = &xpc_registrations[ch_number];

	if (mutex_lock_interruptible(&registration->mutex) != 0) {
		return xpcInterrupted;
	}

	/* if XPC_CHANNEL_REGISTERED(ch_number) */
	if (registration->func != NULL) {
		mutex_unlock(&registration->mutex);
		return xpcAlreadyRegistered;
	}

	/* register the channel for connection */
	registration->msg_size = XPC_MSG_SIZE(payload_size);
	registration->nentries = nentries;
	registration->assigned_limit = assigned_limit;
	registration->idle_limit = idle_limit;
	registration->key = key;
	registration->func = func;

	mutex_unlock(&registration->mutex);

	xpc_interface.connect(ch_number);

	return xpcSuccess;
}
Пример #24
0
void
xpc_disconnect_wait(int ch_number)
{
    unsigned long irq_flags;
    short partid;
    struct xpc_partition *part;
    struct xpc_channel *ch;
    int wakeup_channel_mgr;

    /* now wait for all callouts to the caller's function to cease */
    for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
        part = &xpc_partitions[partid];

        if (!xpc_part_ref(part))
            continue;

        ch = &part->channels[ch_number];

        if (!(ch->flags & XPC_C_WDISCONNECT)) {
            xpc_part_deref(part);
            continue;
        }

        wait_for_completion(&ch->wdisconnect_wait);

        spin_lock_irqsave(&ch->lock, irq_flags);
        DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
        wakeup_channel_mgr = 0;

        if (ch->delayed_IPI_flags) {
            if (part->act_state != XPC_P_DEACTIVATING) {
                spin_lock(&part->IPI_lock);
                XPC_SET_IPI_FLAGS(part->local_IPI_amo,
                                  ch->number,
                                  ch->delayed_IPI_flags);
                spin_unlock(&part->IPI_lock);
                wakeup_channel_mgr = 1;
            }
            ch->delayed_IPI_flags = 0;
        }

        ch->flags &= ~XPC_C_WDISCONNECT;
        spin_unlock_irqrestore(&ch->lock, irq_flags);

        if (wakeup_channel_mgr)
            xpc_wakeup_channel_mgr(part);

        xpc_part_deref(part);
    }
}
Пример #25
0
static void
xpc_process_activate_IRQ_rcvd_uv(void)
{
	unsigned long irq_flags;
	short partid;
	struct xpc_partition *part;
	u8 act_state_req;

	DBUG_ON(xpc_activate_IRQ_rcvd == 0);

	spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
	for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) {
		part = &xpc_partitions[partid];

		if (part->sn.uv.act_state_req == 0)
			continue;

		xpc_activate_IRQ_rcvd--;
		BUG_ON(xpc_activate_IRQ_rcvd < 0);

		act_state_req = part->sn.uv.act_state_req;
		part->sn.uv.act_state_req = 0;
		spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);

		if (act_state_req == XPC_P_ASR_ACTIVATE_UV) {
			if (part->act_state == XPC_P_AS_INACTIVE)
				xpc_activate_partition(part);
			else if (part->act_state == XPC_P_AS_DEACTIVATING)
				XPC_DEACTIVATE_PARTITION(part, xpReactivating);

		} else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) {
			if (part->act_state == XPC_P_AS_INACTIVE)
				xpc_activate_partition(part);
			else
				XPC_DEACTIVATE_PARTITION(part, xpReactivating);

		} else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) {
			XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason);

		} else {
			BUG();
		}

		spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags);
		if (xpc_activate_IRQ_rcvd == 0)
			break;
	}
	spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags);

}
Пример #26
0
void
xpc_disconnect_wait(int ch_number)
{
	unsigned long irq_flags;
	short partid;
	struct xpc_partition *part;
	struct xpc_channel *ch;
	int wakeup_channel_mgr;

	/*                                                             */
	for (partid = 0; partid < xp_max_npartitions; partid++) {
		part = &xpc_partitions[partid];

		if (!xpc_part_ref(part))
			continue;

		ch = &part->channels[ch_number];

		if (!(ch->flags & XPC_C_WDISCONNECT)) {
			xpc_part_deref(part);
			continue;
		}

		wait_for_completion(&ch->wdisconnect_wait);

		spin_lock_irqsave(&ch->lock, irq_flags);
		DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
		wakeup_channel_mgr = 0;

		if (ch->delayed_chctl_flags) {
			if (part->act_state != XPC_P_AS_DEACTIVATING) {
				spin_lock(&part->chctl_lock);
				part->chctl.flags[ch->number] |=
				    ch->delayed_chctl_flags;
				spin_unlock(&part->chctl_lock);
				wakeup_channel_mgr = 1;
			}
			ch->delayed_chctl_flags = 0;
		}

		ch->flags &= ~XPC_C_WDISCONNECT;
		spin_unlock_irqrestore(&ch->lock, irq_flags);

		if (wakeup_channel_mgr)
			xpc_wakeup_channel_mgr(part);

		xpc_part_deref(part);
	}
}
Пример #27
0
void
xpc_process_sent_chctl_flags(struct xpc_partition *part)
{
	unsigned long irq_flags;
	union xpc_channel_ctl_flags chctl;
	struct xpc_channel *ch;
	int ch_number;
	u32 ch_flags;

	chctl.all_flags = xpc_arch_ops.get_chctl_all_flags(part);

	

	for (ch_number = 0; ch_number < part->nchannels; ch_number++) {
		ch = &part->channels[ch_number];

		

		if (chctl.flags[ch_number] & XPC_OPENCLOSE_CHCTL_FLAGS) {
			xpc_process_openclose_chctl_flags(part, ch_number,
							chctl.flags[ch_number]);
		}

		ch_flags = ch->flags;	

		if (ch_flags & XPC_C_DISCONNECTING) {
			spin_lock_irqsave(&ch->lock, irq_flags);
			xpc_process_disconnect(ch, &irq_flags);
			spin_unlock_irqrestore(&ch->lock, irq_flags);
			continue;
		}

		if (part->act_state == XPC_P_AS_DEACTIVATING)
			continue;

		if (!(ch_flags & XPC_C_CONNECTED)) {
			if (!(ch_flags & XPC_C_OPENREQUEST)) {
				DBUG_ON(ch_flags & XPC_C_SETUP);
				(void)xpc_connect_channel(ch);
			}
			continue;
		}

		

		if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS)
			xpc_arch_ops.process_msg_chctl_flags(part, ch_number);
	}
}
Пример #28
0
/*
 * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
 * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
 * than one partition, we use an AMO_t structure per partition to indicate
 * whether a partition has sent an IPI or not.  If it has, then wake up the
 * associated kthread to handle it.
 *
 * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
 * running on other partitions.
 *
 * Noteworthy Arguments:
 *
 *    irq - Interrupt ReQuest number. NOT USED.
 *
 *    dev_id - partid of IPI's potential sender.
 */
irqreturn_t
xpc_notify_IRQ_handler(int irq, void *dev_id)
{
    short partid = (short)(u64)dev_id;
    struct xpc_partition *part = &xpc_partitions[partid];

    DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);

    if (xpc_part_ref(part)) {
        xpc_check_for_channel_activity(part);

        xpc_part_deref(part);
    }
    return IRQ_HANDLED;
}
Пример #29
0
/*
 * Free up msg_slots and clear other stuff that were setup for the specified
 * channel.
 */
static void
xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
{
	struct xpc_channel_uv *ch_uv = &ch->sn.uv;

	DBUG_ON(!spin_is_locked(&ch->lock));

	kfree(ch_uv->cached_notify_gru_mq_desc);
	ch_uv->cached_notify_gru_mq_desc = NULL;

	if (ch->flags & XPC_C_SETUP) {
		xpc_init_fifo_uv(&ch_uv->msg_slot_free_list);
		kfree(ch_uv->send_msg_slots);
		xpc_init_fifo_uv(&ch_uv->recv_msg_list);
		kfree(ch_uv->recv_msg_slots);
	}
}
Пример #30
0
/*
 * Tell the callers of xpc_send_notify() that the status of their payloads
 * is unknown because the channel is now disconnecting.
 *
 * We don't worry about putting these msg_slots on the free list since the
 * msg_slots themselves are about to be kfree'd.
 */
static void
xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch)
{
	struct xpc_send_msg_slot_uv *msg_slot;
	int entry;

	DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING));

	for (entry = 0; entry < ch->local_nentries; entry++) {

		if (atomic_read(&ch->n_to_notify) == 0)
			break;

		msg_slot = &ch->sn.uv.send_msg_slots[entry];
		if (msg_slot->func != NULL)
			xpc_notify_sender_uv(ch, msg_slot, ch->reason);
	}
}