static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
{
	unsigned char status;
	unsigned char state;

	status = read_status(kcs);

	if (kcs_debug & KCS_DEBUG_STATES)
		printk(KERN_DEBUG "KCS: State = %d, %x\n", kcs->state, status);

	
	if (!check_ibf(kcs, status, time))
		return SI_SM_CALL_WITH_DELAY;

	
	state = GET_STATUS_STATE(status);

	switch (kcs->state) {
	case KCS_IDLE:
		
		clear_obf(kcs, status);

		if (GET_STATUS_ATN(status))
			return SI_SM_ATTN;
		else
			return SI_SM_IDLE;

	case KCS_START_OP:
		if (state != KCS_IDLE_STATE) {
			start_error_recovery(kcs,
					     "State machine not idle at start");
			break;
		}

		clear_obf(kcs, status);
		write_cmd(kcs, KCS_WRITE_START);
		kcs->state = KCS_WAIT_WRITE_START;
		break;

	case KCS_WAIT_WRITE_START:
		if (state != KCS_WRITE_STATE) {
			start_error_recovery(
				kcs,
				"Not in write state at write start");
			break;
		}
		read_data(kcs);
		if (kcs->write_count == 1) {
			write_cmd(kcs, KCS_WRITE_END);
			kcs->state = KCS_WAIT_WRITE_END;
		} else {
			write_next_byte(kcs);
			kcs->state = KCS_WAIT_WRITE;
		}
		break;

	case KCS_WAIT_WRITE:
		if (state != KCS_WRITE_STATE) {
			start_error_recovery(kcs,
					     "Not in write state for write");
			break;
		}
		clear_obf(kcs, status);
		if (kcs->write_count == 1) {
			write_cmd(kcs, KCS_WRITE_END);
			kcs->state = KCS_WAIT_WRITE_END;
		} else {
			write_next_byte(kcs);
		}
		break;

	case KCS_WAIT_WRITE_END:
		if (state != KCS_WRITE_STATE) {
			start_error_recovery(kcs,
					     "Not in write state"
					     " for write end");
			break;
		}
		clear_obf(kcs, status);
		write_next_byte(kcs);
		kcs->state = KCS_WAIT_READ;
		break;

	case KCS_WAIT_READ:
		if ((state != KCS_READ_STATE) && (state != KCS_IDLE_STATE)) {
			start_error_recovery(
				kcs,
				"Not in read or idle in read state");
			break;
		}

		if (state == KCS_READ_STATE) {
			if (!check_obf(kcs, status, time))
				return SI_SM_CALL_WITH_DELAY;
			read_next_byte(kcs);
		} else {
			clear_obf(kcs, status);
			kcs->orig_write_count = 0;
			kcs->state = KCS_IDLE;
			return SI_SM_TRANSACTION_COMPLETE;
		}
		break;

	case KCS_ERROR0:
		clear_obf(kcs, status);
		status = read_status(kcs);
		if (GET_STATUS_OBF(status))
			
			if (time_before(jiffies, kcs->error0_timeout))
				return SI_SM_CALL_WITH_TICK_DELAY;
		write_cmd(kcs, KCS_GET_STATUS_ABORT);
		kcs->state = KCS_ERROR1;
		break;

	case KCS_ERROR1:
		clear_obf(kcs, status);
		write_data(kcs, 0);
		kcs->state = KCS_ERROR2;
		break;

	case KCS_ERROR2:
		if (state != KCS_READ_STATE) {
			start_error_recovery(kcs,
					     "Not in read state for error2");
			break;
		}
		if (!check_obf(kcs, status, time))
			return SI_SM_CALL_WITH_DELAY;

		clear_obf(kcs, status);
		write_data(kcs, KCS_READ_BYTE);
		kcs->state = KCS_ERROR3;
		break;

	case KCS_ERROR3:
		if (state != KCS_IDLE_STATE) {
			start_error_recovery(kcs,
					     "Not in idle state for error3");
			break;
		}

		if (!check_obf(kcs, status, time))
			return SI_SM_CALL_WITH_DELAY;

		clear_obf(kcs, status);
		if (kcs->orig_write_count) {
			restart_kcs_transaction(kcs);
		} else {
			kcs->state = KCS_IDLE;
			return SI_SM_TRANSACTION_COMPLETE;
		}
		break;

	case KCS_HOSED:
		break;
	}

	if (kcs->state == KCS_HOSED) {
		init_kcs_data(kcs, kcs->io);
		return SI_SM_HOSED;
	}

	return SI_SM_CALL_WITHOUT_DELAY;
}
示例#2
0
/*
 * This implements the state machine defined in the IPMI manual, see
 * that for details on how this works.  Divide that flowchart into
 * sections delimited by "Wait for IBF" and this will become clear.
 */
static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
{
	unsigned char status;
	unsigned char state;

	status = read_status(kcs);

	if (kcs_debug & KCS_DEBUG_STATES)
		printk(KERN_DEBUG "KCS: State = %d, %x\n", kcs->state, status);

	/* All states wait for ibf, so just do it here. */
	if (!check_ibf(kcs, status, time))
		return SI_SM_CALL_WITH_DELAY;

	/* Just about everything looks at the KCS state, so grab that, too. */
	state = GET_STATUS_STATE(status);

	switch (kcs->state) {
	case KCS_IDLE:
		/* If there's and interrupt source, turn it off. */
		clear_obf(kcs, status);

		if (GET_STATUS_ATN(status))
			return SI_SM_ATTN;
		else
			return SI_SM_IDLE;

	case KCS_START_OP:
		if (state != KCS_IDLE_STATE) {
			start_error_recovery(kcs,
					     "State machine not idle at start");
			break;
		}

		clear_obf(kcs, status);
		write_cmd(kcs, KCS_WRITE_START);
		kcs->state = KCS_WAIT_WRITE_START;
		break;

	case KCS_WAIT_WRITE_START:
		if (state != KCS_WRITE_STATE) {
			start_error_recovery(
				kcs,
				"Not in write state at write start");
			break;
		}
		read_data(kcs);
		if (kcs->write_count == 1) {
			write_cmd(kcs, KCS_WRITE_END);
			kcs->state = KCS_WAIT_WRITE_END;
		} else {
			write_next_byte(kcs);
			kcs->state = KCS_WAIT_WRITE;
		}
		break;

	case KCS_WAIT_WRITE:
		if (state != KCS_WRITE_STATE) {
			start_error_recovery(kcs,
					     "Not in write state for write");
			break;
		}
		clear_obf(kcs, status);
		if (kcs->write_count == 1) {
			write_cmd(kcs, KCS_WRITE_END);
			kcs->state = KCS_WAIT_WRITE_END;
		} else {
			write_next_byte(kcs);
		}
		break;

	case KCS_WAIT_WRITE_END:
		if (state != KCS_WRITE_STATE) {
			start_error_recovery(kcs,
					     "Not in write state"
					     " for write end");
			break;
		}
		clear_obf(kcs, status);
		write_next_byte(kcs);
		kcs->state = KCS_WAIT_READ;
		break;

	case KCS_WAIT_READ:
		if ((state != KCS_READ_STATE) && (state != KCS_IDLE_STATE)) {
			start_error_recovery(
				kcs,
				"Not in read or idle in read state");
			break;
		}

		if (state == KCS_READ_STATE) {
			if (!check_obf(kcs, status, time))
				return SI_SM_CALL_WITH_DELAY;
			read_next_byte(kcs);
		} else {
			/*
			 * We don't implement this exactly like the state
			 * machine in the spec.  Some broken hardware
			 * does not write the final dummy byte to the
			 * read register.  Thus obf will never go high
			 * here.  We just go straight to idle, and we
			 * handle clearing out obf in idle state if it
			 * happens to come in.
			 */
			clear_obf(kcs, status);
			kcs->orig_write_count = 0;
			kcs->state = KCS_IDLE;
			return SI_SM_TRANSACTION_COMPLETE;
		}
		break;

	case KCS_ERROR0:
		clear_obf(kcs, status);
		status = read_status(kcs);
		if (GET_STATUS_OBF(status))
			/* controller isn't responding */
			if (time_before(jiffies, kcs->error0_timeout))
				return SI_SM_CALL_WITH_TICK_DELAY;
		write_cmd(kcs, KCS_GET_STATUS_ABORT);
		kcs->state = KCS_ERROR1;
		break;

	case KCS_ERROR1:
		clear_obf(kcs, status);
		write_data(kcs, 0);
		kcs->state = KCS_ERROR2;
		break;

	case KCS_ERROR2:
		if (state != KCS_READ_STATE) {
			start_error_recovery(kcs,
					     "Not in read state for error2");
			break;
		}
		if (!check_obf(kcs, status, time))
			return SI_SM_CALL_WITH_DELAY;

		clear_obf(kcs, status);
		write_data(kcs, KCS_READ_BYTE);
		kcs->state = KCS_ERROR3;
		break;

	case KCS_ERROR3:
		if (state != KCS_IDLE_STATE) {
			start_error_recovery(kcs,
					     "Not in idle state for error3");
			break;
		}

		if (!check_obf(kcs, status, time))
			return SI_SM_CALL_WITH_DELAY;

		clear_obf(kcs, status);
		if (kcs->orig_write_count) {
			restart_kcs_transaction(kcs);
		} else {
			kcs->state = KCS_IDLE;
			return SI_SM_TRANSACTION_COMPLETE;
		}
		break;

	case KCS_HOSED:
		break;
	}

	if (kcs->state == KCS_HOSED) {
		init_kcs_data(kcs, kcs->io);
		return SI_SM_HOSED;
	}

	return SI_SM_CALL_WITHOUT_DELAY;
}
示例#3
0
/* Returns 0 if initialized, or negative on an error. */
static int init_one_kcs(int kcs_port, 
			int irq, 
			unsigned long kcs_physaddr,
			struct kcs_info **kcs)
{
	int		rv;
	struct kcs_info *new_kcs;

	/* Did anything get passed in at all?  Both == zero disables the
	   driver. */

	if (!(kcs_port || kcs_physaddr)) 
		return -ENODEV;
	
	/* Only initialize a port OR a physical address on this call.
	   Also, IRQs can go with either ports or addresses. */

	if (kcs_port && kcs_physaddr)
		return -EINVAL;

	new_kcs = kmalloc(sizeof(*new_kcs), GFP_KERNEL);
	if (!new_kcs) {
		printk(KERN_ERR "ipmi_kcs: out of memory\n");
		return -ENOMEM;
	}

	/* So we know not to free it unless we have allocated one. */
	new_kcs->kcs_sm = NULL;

	new_kcs->addr = NULL;
	new_kcs->physaddr = kcs_physaddr;
	new_kcs->port = kcs_port;

	if (kcs_port) {
		if (request_region(kcs_port, 2, DEVICE_NAME) == NULL) {
			kfree(new_kcs);
			printk(KERN_ERR 
			       "ipmi_kcs: can't reserve port @ 0x%4.4x\n",
		       	       kcs_port);
			return -EIO;
		}
	} else {
		if (request_mem_region(kcs_physaddr, 2, DEVICE_NAME) == NULL) {
			kfree(new_kcs);
			printk(KERN_ERR 
			       "ipmi_kcs: can't reserve memory @ 0x%lx\n",
		       	       kcs_physaddr);
			return -EIO;
		}
		if ((new_kcs->addr = ioremap(kcs_physaddr, 2)) == NULL) {
			kfree(new_kcs);
			printk(KERN_ERR 
			       "ipmi_kcs: can't remap memory at 0x%lx\n",
		       	       kcs_physaddr);
			return -EIO;
		}
	}

	new_kcs->kcs_sm = kmalloc(kcs_size(), GFP_KERNEL);
	if (!new_kcs->kcs_sm) {
		printk(KERN_ERR "ipmi_kcs: out of memory\n");
		rv = -ENOMEM;
		goto out_err;
	}
	init_kcs_data(new_kcs->kcs_sm, kcs_port, new_kcs->addr);
	spin_lock_init(&(new_kcs->kcs_lock));
	spin_lock_init(&(new_kcs->msg_lock));

	rv = ipmi_kcs_detect_hardware(kcs_port, new_kcs->addr, new_kcs->kcs_sm);
	if (rv) {
		if (kcs_port) 
			printk(KERN_ERR 
			       "ipmi_kcs: No KCS @ port 0x%4.4x\n", 
			       kcs_port);
		else
			printk(KERN_ERR 
			       "ipmi_kcs: No KCS @ addr 0x%lx\n", 
			       kcs_physaddr);
		goto out_err;
	}

	if (irq != 0) {
		rv = request_irq(irq,
				 kcs_irq_handler,
				 SA_INTERRUPT,
				 DEVICE_NAME,
				 new_kcs);
		if (rv) {
			printk(KERN_WARNING
			       "ipmi_kcs: %s unable to claim interrupt %d,"
			       " running polled\n",
			       DEVICE_NAME, irq);
			irq = 0;
		}
	}
	new_kcs->irq = irq;

	INIT_LIST_HEAD(&(new_kcs->xmit_msgs));
	INIT_LIST_HEAD(&(new_kcs->hp_xmit_msgs));
	new_kcs->curr_msg = NULL;
	atomic_set(&new_kcs->req_events, 0);
	new_kcs->run_to_completion = 0;

	start_clear_flags(new_kcs);

	if (irq) {
		new_kcs->kcs_state = KCS_CLEARING_FLAGS_THEN_SET_IRQ;

		printk(KERN_INFO 
		       "ipmi_kcs: Acquiring BMC @ port=0x%x irq=%d\n",
		       kcs_port, irq);

	} else {
		if (kcs_port)
			printk(KERN_INFO 
			       "ipmi_kcs: Acquiring BMC @ port=0x%x\n",
		       	       kcs_port);
		else
			printk(KERN_INFO 
			       "ipmi_kcs: Acquiring BMC @ addr=0x%lx\n",
		       	       kcs_physaddr);
	}

	rv = ipmi_register_smi(&handlers,
			       new_kcs,
			       ipmi_version_major,
			       ipmi_version_minor,
			       &(new_kcs->intf));
	if (rv) {
		free_irq(irq, new_kcs);
		printk(KERN_ERR 
		       "ipmi_kcs: Unable to register device: error %d\n",
		       rv);
		goto out_err;
	}

	new_kcs->interrupt_disabled = 0;
	new_kcs->timer_stopped = 0;
	new_kcs->stop_operation = 0;

	init_timer(&(new_kcs->kcs_timer));
	new_kcs->kcs_timer.data = (long) new_kcs;
	new_kcs->kcs_timer.function = kcs_timeout;
	new_kcs->last_timeout_jiffies = jiffies;
	new_kcs->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;
	add_timer(&(new_kcs->kcs_timer));

	*kcs = new_kcs;

	return 0;

 out_err:
	if (kcs_port) 
		release_region (kcs_port, 2);
	if (new_kcs->addr) 
		iounmap(new_kcs->addr);
	if (kcs_physaddr) 
		release_mem_region(kcs_physaddr, 2);
	if (new_kcs->kcs_sm)
		kfree(new_kcs->kcs_sm);
	kfree(new_kcs);
	return rv;
}