Esempio n. 1
0
static void sender(void                *send_info,
		   struct ipmi_smi_msg *msg,
		   int                 priority)
{
	struct kcs_info *kcs_info = (struct kcs_info *) send_info;
	enum kcs_result result;
	unsigned long   flags;
#ifdef DEBUG_TIMING
	struct timeval t;
#endif

	spin_lock_irqsave(&(kcs_info->msg_lock), flags);
#ifdef DEBUG_TIMING
	do_gettimeofday(&t);
	printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif

	if (kcs_info->run_to_completion) {
		/* If we are running to completion, then throw it in
		   the list and run transactions until everything is
		   clear.  Priority doesn't matter here. */
		list_add_tail(&(msg->link), &(kcs_info->xmit_msgs));

		/* We have to release the msg lock and claim the kcs
		   lock in this case, because of race conditions. */
		spin_unlock_irqrestore(&(kcs_info->msg_lock), flags);

		spin_lock_irqsave(&(kcs_info->kcs_lock), flags);
		result = kcs_event_handler(kcs_info, 0);
		while (result != KCS_SM_IDLE) {
			udelay(KCS_SHORT_TIMEOUT_USEC);
			result = kcs_event_handler(kcs_info,
						   KCS_SHORT_TIMEOUT_USEC);
		}
		spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
		return;
	} else {
		if (priority > 0) {
			list_add_tail(&(msg->link), &(kcs_info->hp_xmit_msgs));
		} else {
			list_add_tail(&(msg->link), &(kcs_info->xmit_msgs));
		}
	}
	spin_unlock_irqrestore(&(kcs_info->msg_lock), flags);

	spin_lock_irqsave(&(kcs_info->kcs_lock), flags);
	if ((kcs_info->kcs_state == KCS_NORMAL)
	    && (kcs_info->curr_msg == NULL))
	{
		start_next_msg(kcs_info);
		kcs_restart_short_timer(kcs_info);
	}
	spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
}
Esempio n. 2
0
static void set_run_to_completion(void *send_info, int i_run_to_completion)
{
	struct kcs_info *kcs_info = (struct kcs_info *) send_info;
	enum kcs_result result;
	unsigned long   flags;

	spin_lock_irqsave(&(kcs_info->kcs_lock), flags);

	kcs_info->run_to_completion = i_run_to_completion;
	if (i_run_to_completion) {
		result = kcs_event_handler(kcs_info, 0);
		while (result != KCS_SM_IDLE) {
			udelay(KCS_SHORT_TIMEOUT_USEC);
			result = kcs_event_handler(kcs_info,
						   KCS_SHORT_TIMEOUT_USEC);
		}
	}

	spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
}
Esempio n. 3
0
static void kcs_irq_handler(int irq, void *data, struct pt_regs *regs)
{
	struct kcs_info *kcs_info = (struct kcs_info *) data;
	unsigned long   flags;
#ifdef DEBUG_TIMING
	struct timeval t;
#endif

	spin_lock_irqsave(&(kcs_info->kcs_lock), flags);
	if (kcs_info->stop_operation)
		goto out;

#ifdef DEBUG_TIMING
	do_gettimeofday(&t);
	printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
	kcs_event_handler(kcs_info, 0);
 out:
	spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
}
Esempio n. 4
0
static void kcs_timeout(unsigned long data)
{
	struct kcs_info *kcs_info = (struct kcs_info *) data;
	enum kcs_result kcs_result;
	unsigned long   flags;
	unsigned long   jiffies_now;
	unsigned long   time_diff;
#ifdef DEBUG_TIMING
	struct timeval t;
#endif

	if (kcs_info->stop_operation) {
		kcs_info->timer_stopped = 1;
		return;
	}

	spin_lock_irqsave(&(kcs_info->kcs_lock), flags);
#ifdef DEBUG_TIMING
	do_gettimeofday(&t);
	printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
	jiffies_now = jiffies;

	time_diff = ((jiffies_now - kcs_info->last_timeout_jiffies)
		     * KCS_USEC_PER_JIFFY);
	kcs_result = kcs_event_handler(kcs_info, time_diff);

	kcs_info->last_timeout_jiffies = jiffies_now;

	if ((kcs_info->irq) && (! kcs_info->interrupt_disabled)) {
		/* Running with interrupts, only do long timeouts. */
		kcs_info->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;
		goto do_add_timer;
	}

	/* If the state machine asks for a short delay, then shorten
           the timer timeout. */
#ifdef CONFIG_HIGH_RES_TIMERS
	if (kcs_result == KCS_CALL_WITH_DELAY) {
		kcs_info->kcs_timer.sub_expires
			+= usec_to_arch_cycles(KCS_SHORT_TIMEOUT_USEC);
		while (kcs_info->kcs_timer.sub_expires >= cycles_per_jiffies) {
			kcs_info->kcs_timer.expires++;
			kcs_info->kcs_timer.sub_expires -= cycles_per_jiffies;
		}
	} else {
		kcs_info->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;
		kcs_info->kcs_timer.sub_expires = 0;
	}
#else
	/* If requested, take the shortest delay possible */
	if (kcs_result == KCS_CALL_WITH_DELAY) {
		kcs_info->kcs_timer.expires = jiffies + 1;
	} else {
		kcs_info->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;
	}
#endif

 do_add_timer:
	add_timer(&(kcs_info->kcs_timer));
	spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
}
Esempio n. 5
0
static void poll(void *send_info)
{
	struct kcs_info *kcs_info = (struct kcs_info *) send_info;

	kcs_event_handler(kcs_info, 0);
}