Ejemplo n.º 1
0
static void sender(void                *send_info,
		   struct ipmi_smi_msg *msg,
		   int                 priority)
{
	struct kcs_info *kcs_info = (struct kcs_info *) send_info;
	enum kcs_result result;
	unsigned long   flags;
#ifdef DEBUG_TIMING
	struct timeval t;
#endif

	spin_lock_irqsave(&(kcs_info->msg_lock), flags);
#ifdef DEBUG_TIMING
	do_gettimeofday(&t);
	printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif

	if (kcs_info->run_to_completion) {
		/* If we are running to completion, then throw it in
		   the list and run transactions until everything is
		   clear.  Priority doesn't matter here. */
		list_add_tail(&(msg->link), &(kcs_info->xmit_msgs));

		/* We have to release the msg lock and claim the kcs
		   lock in this case, because of race conditions. */
		spin_unlock_irqrestore(&(kcs_info->msg_lock), flags);

		spin_lock_irqsave(&(kcs_info->kcs_lock), flags);
		result = kcs_event_handler(kcs_info, 0);
		while (result != KCS_SM_IDLE) {
			udelay(KCS_SHORT_TIMEOUT_USEC);
			result = kcs_event_handler(kcs_info,
						   KCS_SHORT_TIMEOUT_USEC);
		}
		spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
		return;
	} else {
		if (priority > 0) {
			list_add_tail(&(msg->link), &(kcs_info->hp_xmit_msgs));
		} else {
			list_add_tail(&(msg->link), &(kcs_info->xmit_msgs));
		}
	}
	spin_unlock_irqrestore(&(kcs_info->msg_lock), flags);

	spin_lock_irqsave(&(kcs_info->kcs_lock), flags);
	if ((kcs_info->kcs_state == KCS_NORMAL)
	    && (kcs_info->curr_msg == NULL))
	{
		start_next_msg(kcs_info);
		kcs_restart_short_timer(kcs_info);
	}
	spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
}
Ejemplo n.º 2
0
static void check_start_timer_thread(struct smi_info *smi_info)
{
	if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
		smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);

		if (smi_info->thread)
			wake_up_process(smi_info->thread);

		start_next_msg(smi_info);
		smi_event_handler(smi_info, 0);
	}
}
Ejemplo n.º 3
0
/*
 * Called on timeouts and events.  Timeouts should pass the elapsed
 * time, interrupts should pass in zero.  Must be called with
 * si_lock held and interrupts disabled.
 */
static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
					   int time)
{
	enum si_sm_result si_sm_result;

 restart:
	/*
	 * There used to be a loop here that waited a little while
	 * (around 25us) before giving up.  That turned out to be
	 * pointless, the minimum delays I was seeing were in the 300us
	 * range, which is far too long to wait in an interrupt.  So
	 * we just run until the state machine tells us something
	 * happened or it needs a delay.
	 */
	si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
	time = 0;
	while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);

	if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
		smi_inc_stat(smi_info, complete_transactions);

		handle_transaction_done(smi_info);
		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
	} else if (si_sm_result == SI_SM_HOSED) {
		smi_inc_stat(smi_info, hosed_count);

		/*
		 * Do the before return_hosed_msg, because that
		 * releases the lock.
		 */
		smi_info->si_state = SI_NORMAL;
		if (smi_info->curr_msg != NULL) {
			/*
			 * If we were handling a user message, format
			 * a response to send to the upper layer to
			 * tell it about the error.
			 */
			return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
		}
		si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
	}

	/*
	 * We prefer handling attn over new messages.  But don't do
	 * this if there is not yet an upper layer to handle anything.
	 */
	if (likely(smi_info->intf) &&
	    (si_sm_result == SI_SM_ATTN || smi_info->got_attn)) {
		unsigned char msg[2];

		if (smi_info->si_state != SI_NORMAL) {
			/*
			 * We got an ATTN, but we are doing something else.
			 * Handle the ATTN later.
			 */
			smi_info->got_attn = true;
		} else {
			smi_info->got_attn = false;
			smi_inc_stat(smi_info, attentions);

			/*
			 * Got a attn, send down a get message flags to see
			 * what's causing it.  It would be better to handle
			 * this in the upper layer, but due to the way
			 * interrupts work with the SMI, that's not really
			 * possible.
			 */
			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
			msg[1] = IPMI_GET_MSG_FLAGS_CMD;

			smi_info->handlers->start_transaction(
				smi_info->si_sm, msg, 2);
			smi_info->si_state = SI_GETTING_FLAGS;
			goto restart;
		}
	}

	/* If we are currently idle, try to start the next message. */
	if (si_sm_result == SI_SM_IDLE) {
		smi_inc_stat(smi_info, idles);

		si_sm_result = start_next_msg(smi_info);
		if (si_sm_result != SI_SM_IDLE)
			goto restart;
	}

	if ((si_sm_result == SI_SM_IDLE)
	    && (atomic_read(&smi_info->req_events))) {
		/*
		 * We are idle and the upper layer requested that I fetch
		 * events, so do so.
		 */
		atomic_set(&smi_info->req_events, 0);

		/*
		 * Take this opportunity to check the interrupt and
		 * message enable state for the BMC.  The BMC can be
		 * asynchronously reset, and may thus get interrupts
		 * disable and messages disabled.
		 */
		if (smi_info->supports_event_msg_buff || smi_info->irq) {
			start_check_enables(smi_info);
		} else {
			smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
			if (!smi_info->curr_msg)
				goto out;

			start_getting_events(smi_info);
		}
		goto restart;
	}
 out:
	return si_sm_result;
}
Ejemplo n.º 4
0
/* Called on timeouts and events.  Timeouts should pass the elapsed
   time, interrupts should pass in zero. */
static enum kcs_result kcs_event_handler(struct kcs_info *kcs_info, int time)
{
	enum kcs_result kcs_result;

 restart:
	/* There used to be a loop here that waited a little while
	   (around 25us) before giving up.  That turned out to be
	   pointless, the minimum delays I was seeing were in the 300us
	   range, which is far too long to wait in an interrupt.  So
	   we just run until the state machine tells us something
	   happened or it needs a delay. */
	kcs_result = kcs_event(kcs_info->kcs_sm, time);
	time = 0;
	while (kcs_result == KCS_CALL_WITHOUT_DELAY)
	{
		kcs_result = kcs_event(kcs_info->kcs_sm, 0);
	}

	if (kcs_result == KCS_TRANSACTION_COMPLETE)
	{
		handle_transaction_done(kcs_info);
		kcs_result = kcs_event(kcs_info->kcs_sm, 0);
	}
	else if (kcs_result == KCS_SM_HOSED)
	{
		if (kcs_info->curr_msg != NULL) {
			/* If we were handling a user message, format
                           a response to send to the upper layer to
                           tell it about the error. */
			return_hosed_msg(kcs_info);
		}
		kcs_result = kcs_event(kcs_info->kcs_sm, 0);
		kcs_info->kcs_state = KCS_NORMAL;
	}

	/* We prefer handling attn over new messages. */
	if (kcs_result == KCS_ATTN)
	{
		unsigned char msg[2];

		/* Got a attn, send down a get message flags to see
                   what's causing it.  It would be better to handle
                   this in the upper layer, but due to the way
                   interrupts work with the KCS, that's not really
                   possible. */
		msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
		msg[1] = IPMI_GET_MSG_FLAGS_CMD;

		start_kcs_transaction(kcs_info->kcs_sm, msg, 2);
		kcs_info->kcs_state = KCS_GETTING_FLAGS;
		goto restart;
	}

	/* If we are currently idle, try to start the next message. */
	if (kcs_result == KCS_SM_IDLE) {
		kcs_result = start_next_msg(kcs_info);
		if (kcs_result != KCS_SM_IDLE)
			goto restart;
        }

	if ((kcs_result == KCS_SM_IDLE)
	    && (atomic_read(&kcs_info->req_events)))
	{
		/* We are idle and the upper layer requested that I fetch
		   events, so do so. */
		unsigned char msg[2];

		atomic_set(&kcs_info->req_events, 0);
		msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
		msg[1] = IPMI_GET_MSG_FLAGS_CMD;

		start_kcs_transaction(kcs_info->kcs_sm, msg, 2);
		kcs_info->kcs_state = KCS_GETTING_FLAGS;
		goto restart;
	}

	return kcs_result;
}