示例#1
0
static void handle_flags(struct smi_info *smi_info)
{
 retry:
	if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
		/* Watchdog pre-timeout */
		smi_inc_stat(smi_info, watchdog_pretimeouts);

		start_clear_flags(smi_info);
		smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
		if (smi_info->intf)
			ipmi_smi_watchdog_pretimeout(smi_info->intf);
	} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
		/* Messages available. */
		smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
		if (!smi_info->curr_msg)
			return;

		start_getting_msg_queue(smi_info);
	} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
		/* Events available. */
		smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
		if (!smi_info->curr_msg)
			return;

		start_getting_events(smi_info);
	} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
		   smi_info->oem_data_avail_handler) {
		if (smi_info->oem_data_avail_handler(smi_info))
			goto retry;
	} else
		smi_info->si_state = SI_NORMAL;
}
示例#2
0
static void handle_flags(struct kcs_info *kcs_info)
{
	if (kcs_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
		/* Watchdog pre-timeout */
		start_clear_flags(kcs_info);
		kcs_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
		spin_unlock(&(kcs_info->kcs_lock));
		ipmi_smi_watchdog_pretimeout(kcs_info->intf);
		spin_lock(&(kcs_info->kcs_lock));
	} else if (kcs_info->msg_flags & RECEIVE_MSG_AVAIL) {
		/* Messages available. */
		kcs_info->curr_msg = ipmi_alloc_smi_msg();
		if (!kcs_info->curr_msg) {
			disable_kcs_irq(kcs_info);
			kcs_info->kcs_state = KCS_NORMAL;
			return;
		}
		enable_kcs_irq(kcs_info);

		kcs_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
		kcs_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
		kcs_info->curr_msg->data_size = 2;

		kcs_smi_handlers.start_transaction(
		    kcs_info->kcs_sm,
		    kcs_info->curr_msg->data,
		    kcs_info->curr_msg->data_size);
		kcs_info->kcs_state = KCS_GETTING_MESSAGES;
	} else if (kcs_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
		/* Events available. */
		kcs_info->curr_msg = ipmi_alloc_smi_msg();
		if (!kcs_info->curr_msg) {
			disable_kcs_irq(kcs_info);
			kcs_info->kcs_state = KCS_NORMAL;
			return;
		}
		enable_kcs_irq(kcs_info);

		kcs_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
		kcs_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
		kcs_info->curr_msg->data_size = 2;

		kcs_smi_handlers.start_transaction(
		    kcs_info->kcs_sm,
		    kcs_info->curr_msg->data,
		    kcs_info->curr_msg->data_size);
		kcs_info->kcs_state = KCS_GETTING_EVENTS;
	} else {
		kcs_info->kcs_state = KCS_NORMAL;
	}
}
示例#3
0
/* Returns 0 if initialized, or negative on an error. */
static int init_one_kcs(int kcs_port, 
			int irq, 
			unsigned long kcs_physaddr,
			struct kcs_info **kcs)
{
	int		rv;
	struct kcs_info *new_kcs;

	/* Did anything get passed in at all?  Both == zero disables the
	   driver. */

	if (!(kcs_port || kcs_physaddr)) 
		return -ENODEV;
	
	/* Only initialize a port OR a physical address on this call.
	   Also, IRQs can go with either ports or addresses. */

	if (kcs_port && kcs_physaddr)
		return -EINVAL;

	new_kcs = kmalloc(sizeof(*new_kcs), GFP_KERNEL);
	if (!new_kcs) {
		printk(KERN_ERR "ipmi_kcs: out of memory\n");
		return -ENOMEM;
	}

	/* So we know not to free it unless we have allocated one. */
	new_kcs->kcs_sm = NULL;

	new_kcs->addr = NULL;
	new_kcs->physaddr = kcs_physaddr;
	new_kcs->port = kcs_port;

	if (kcs_port) {
		if (request_region(kcs_port, 2, DEVICE_NAME) == NULL) {
			kfree(new_kcs);
			printk(KERN_ERR 
			       "ipmi_kcs: can't reserve port @ 0x%4.4x\n",
		       	       kcs_port);
			return -EIO;
		}
	} else {
		if (request_mem_region(kcs_physaddr, 2, DEVICE_NAME) == NULL) {
			kfree(new_kcs);
			printk(KERN_ERR 
			       "ipmi_kcs: can't reserve memory @ 0x%lx\n",
		       	       kcs_physaddr);
			return -EIO;
		}
		if ((new_kcs->addr = ioremap(kcs_physaddr, 2)) == NULL) {
			kfree(new_kcs);
			printk(KERN_ERR 
			       "ipmi_kcs: can't remap memory at 0x%lx\n",
		       	       kcs_physaddr);
			return -EIO;
		}
	}

	new_kcs->kcs_sm = kmalloc(kcs_size(), GFP_KERNEL);
	if (!new_kcs->kcs_sm) {
		printk(KERN_ERR "ipmi_kcs: out of memory\n");
		rv = -ENOMEM;
		goto out_err;
	}
	init_kcs_data(new_kcs->kcs_sm, kcs_port, new_kcs->addr);
	spin_lock_init(&(new_kcs->kcs_lock));
	spin_lock_init(&(new_kcs->msg_lock));

	rv = ipmi_kcs_detect_hardware(kcs_port, new_kcs->addr, new_kcs->kcs_sm);
	if (rv) {
		if (kcs_port) 
			printk(KERN_ERR 
			       "ipmi_kcs: No KCS @ port 0x%4.4x\n", 
			       kcs_port);
		else
			printk(KERN_ERR 
			       "ipmi_kcs: No KCS @ addr 0x%lx\n", 
			       kcs_physaddr);
		goto out_err;
	}

	if (irq != 0) {
		rv = request_irq(irq,
				 kcs_irq_handler,
				 SA_INTERRUPT,
				 DEVICE_NAME,
				 new_kcs);
		if (rv) {
			printk(KERN_WARNING
			       "ipmi_kcs: %s unable to claim interrupt %d,"
			       " running polled\n",
			       DEVICE_NAME, irq);
			irq = 0;
		}
	}
	new_kcs->irq = irq;

	INIT_LIST_HEAD(&(new_kcs->xmit_msgs));
	INIT_LIST_HEAD(&(new_kcs->hp_xmit_msgs));
	new_kcs->curr_msg = NULL;
	atomic_set(&new_kcs->req_events, 0);
	new_kcs->run_to_completion = 0;

	start_clear_flags(new_kcs);

	if (irq) {
		new_kcs->kcs_state = KCS_CLEARING_FLAGS_THEN_SET_IRQ;

		printk(KERN_INFO 
		       "ipmi_kcs: Acquiring BMC @ port=0x%x irq=%d\n",
		       kcs_port, irq);

	} else {
		if (kcs_port)
			printk(KERN_INFO 
			       "ipmi_kcs: Acquiring BMC @ port=0x%x\n",
		       	       kcs_port);
		else
			printk(KERN_INFO 
			       "ipmi_kcs: Acquiring BMC @ addr=0x%lx\n",
		       	       kcs_physaddr);
	}

	rv = ipmi_register_smi(&handlers,
			       new_kcs,
			       ipmi_version_major,
			       ipmi_version_minor,
			       &(new_kcs->intf));
	if (rv) {
		free_irq(irq, new_kcs);
		printk(KERN_ERR 
		       "ipmi_kcs: Unable to register device: error %d\n",
		       rv);
		goto out_err;
	}

	new_kcs->interrupt_disabled = 0;
	new_kcs->timer_stopped = 0;
	new_kcs->stop_operation = 0;

	init_timer(&(new_kcs->kcs_timer));
	new_kcs->kcs_timer.data = (long) new_kcs;
	new_kcs->kcs_timer.function = kcs_timeout;
	new_kcs->last_timeout_jiffies = jiffies;
	new_kcs->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;
	add_timer(&(new_kcs->kcs_timer));

	*kcs = new_kcs;

	return 0;

 out_err:
	if (kcs_port) 
		release_region (kcs_port, 2);
	if (new_kcs->addr) 
		iounmap(new_kcs->addr);
	if (kcs_physaddr) 
		release_mem_region(kcs_physaddr, 2);
	if (new_kcs->kcs_sm)
		kfree(new_kcs->kcs_sm);
	kfree(new_kcs);
	return rv;
}