Example #1
0
static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
{
	unsigned char status, BT_CAP[8];
	static enum bt_states last_printed = BT_STATE_PRINTME;
	int i;

	status = BT_STATUS;
	bt->nonzero_status |= status;
	if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) {
		printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n",
			STATE2TXT,
			STATUS2TXT,
			bt->timeout,
			time);
		last_printed = bt->state;
	}

	/*
	 * Commands that time out may still (eventually) provide a response.
	 * This stale response will get in the way of a new response so remove
	 * it if possible (hopefully during IDLE).  Even if it comes up later
	 * it will be rejected by its (now-forgotten) seq number.
	 */

	if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
		drain_BMC2HOST(bt);
		BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
	}

	if ((bt->state != BT_STATE_IDLE) &&
	    (bt->state <  BT_STATE_PRINTME)) {
		/* check timeout */
		bt->timeout -= time;
		if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
			return error_recovery(bt,
					      status,
					      IPMI_TIMEOUT_ERR);
	}

	switch (bt->state) {

	/*
	 * Idle state first checks for asynchronous messages from another
	 * channel, then does some opportunistic housekeeping.
	 */

	case BT_STATE_IDLE:
		if (status & BT_SMS_ATN) {
			BT_CONTROL(BT_SMS_ATN);	/* clear it */
			return SI_SM_ATTN;
		}

		if (status & BT_H_BUSY)		/* clear a leftover H_BUSY */
			BT_CONTROL(BT_H_BUSY);

		/* Read BT capabilities if it hasn't been done yet */
		if (!bt->BT_CAP_outreqs)
			BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
					SI_SM_CALL_WITHOUT_DELAY);
		bt->timeout = bt->BT_CAP_req2rsp;
		BT_SI_SM_RETURN(SI_SM_IDLE);

	case BT_STATE_XACTION_START:
		if (status & (BT_B_BUSY | BT_H2B_ATN))
			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
		if (BT_STATUS & BT_H_BUSY)
			BT_CONTROL(BT_H_BUSY);	/* force clear */
		BT_STATE_CHANGE(BT_STATE_WRITE_BYTES,
				SI_SM_CALL_WITHOUT_DELAY);

	case BT_STATE_WRITE_BYTES:
		if (status & BT_H_BUSY)
			BT_CONTROL(BT_H_BUSY);	/* clear */
		BT_CONTROL(BT_CLR_WR_PTR);
		write_all_bytes(bt);
		BT_CONTROL(BT_H2B_ATN);	/* can clear too fast to catch */
		BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME,
				SI_SM_CALL_WITHOUT_DELAY);

	case BT_STATE_WRITE_CONSUME:
		if (status & (BT_B_BUSY | BT_H2B_ATN))
			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
		BT_STATE_CHANGE(BT_STATE_READ_WAIT,
				SI_SM_CALL_WITHOUT_DELAY);

	/* Spinning hard can suppress B2H_ATN and force a timeout */

	case BT_STATE_READ_WAIT:
		if (!(status & BT_B2H_ATN))
			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
		BT_CONTROL(BT_H_BUSY);		/* set */

		/*
		 * Uncached, ordered writes should just proceeed serially but
		 * some BMCs don't clear B2H_ATN with one hit.  Fast-path a
		 * workaround without too much penalty to the general case.
		 */

		BT_CONTROL(BT_B2H_ATN);		/* clear it to ACK the BMC */
		BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
				SI_SM_CALL_WITHOUT_DELAY);

	case BT_STATE_CLEAR_B2H:
		if (status & BT_B2H_ATN) {
			/* keep hitting it */
			BT_CONTROL(BT_B2H_ATN);
			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
		}
		BT_STATE_CHANGE(BT_STATE_READ_BYTES,
				SI_SM_CALL_WITHOUT_DELAY);

	case BT_STATE_READ_BYTES:
		if (!(status & BT_H_BUSY))
			/* check in case of retry */
			BT_CONTROL(BT_H_BUSY);
		BT_CONTROL(BT_CLR_RD_PTR);	/* start of BMC2HOST buffer */
		i = read_all_bytes(bt);		/* true == packet seq match */
		BT_CONTROL(BT_H_BUSY);		/* NOW clear */
		if (!i) 			/* Not my message */
			BT_STATE_CHANGE(BT_STATE_READ_WAIT,
					SI_SM_CALL_WITHOUT_DELAY);
		bt->state = bt->complete;
		return bt->state == BT_STATE_IDLE ?	/* where to next? */
			SI_SM_TRANSACTION_COMPLETE :	/* normal */
			SI_SM_CALL_WITHOUT_DELAY;	/* Startup magic */

	case BT_STATE_LONG_BUSY:	/* For example: after FW update */
		if (!(status & BT_B_BUSY)) {
			reset_flags(bt);	/* next state is now IDLE */
			bt_init_data(bt, bt->io);
		}
		return SI_SM_CALL_WITH_DELAY;	/* No repeat printing */

	case BT_STATE_RESET1:
		reset_flags(bt);
		drain_BMC2HOST(bt);
		BT_STATE_CHANGE(BT_STATE_RESET2,
				SI_SM_CALL_WITH_DELAY);

	case BT_STATE_RESET2:		/* Send a soft reset */
		BT_CONTROL(BT_CLR_WR_PTR);
		HOST2BMC(3);		/* number of bytes following */
		HOST2BMC(0x18);		/* NetFn/LUN == Application, LUN 0 */
		HOST2BMC(42);		/* Sequence number */
		HOST2BMC(3);		/* Cmd == Soft reset */
		BT_CONTROL(BT_H2B_ATN);
		bt->timeout = BT_RESET_DELAY * 1000000;
		BT_STATE_CHANGE(BT_STATE_RESET3,
				SI_SM_CALL_WITH_DELAY);

	case BT_STATE_RESET3:		/* Hold off everything for a bit */
		if (bt->timeout > 0)
			return SI_SM_CALL_WITH_DELAY;
		drain_BMC2HOST(bt);
		BT_STATE_CHANGE(BT_STATE_RESTART,
				SI_SM_CALL_WITH_DELAY);

	case BT_STATE_RESTART:		/* don't reset retries or seq! */
		bt->read_count = 0;
		bt->nonzero_status = 0;
		bt->timeout = bt->BT_CAP_req2rsp;
		BT_STATE_CHANGE(BT_STATE_XACTION_START,
				SI_SM_CALL_WITH_DELAY);

	/*
	 * Get BT Capabilities, using timing of upper level state machine.
	 * Set outreqs to prevent infinite loop on timeout.
	 */
	case BT_STATE_CAPABILITIES_BEGIN:
		bt->BT_CAP_outreqs = 1;
		{
			unsigned char GetBT_CAP[] = { 0x18, 0x36 };
			bt->state = BT_STATE_IDLE;
			bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
		}
		bt->complete = BT_STATE_CAPABILITIES_END;
		BT_STATE_CHANGE(BT_STATE_XACTION_START,
				SI_SM_CALL_WITH_DELAY);

	case BT_STATE_CAPABILITIES_END:
		i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
		bt_init_data(bt, bt->io);
		if ((i == 8) && !BT_CAP[2]) {
			bt->BT_CAP_outreqs = BT_CAP[3];
			bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000;
			bt->BT_CAP_retries = BT_CAP[7];
		} else
			printk(KERN_WARNING "IPMI BT: using default values\n");
		if (!bt->BT_CAP_outreqs)
			bt->BT_CAP_outreqs = 1;
		printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
			bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries);
		bt->timeout = bt->BT_CAP_req2rsp;
		return SI_SM_CALL_WITHOUT_DELAY;

	default:	/* should never occur */
		return error_recovery(bt,
				      status,
				      IPMI_ERR_UNSPECIFIED);
	}
	return SI_SM_CALL_WITH_DELAY;
}
Example #2
0
lr_symbol* lr_parser::parse()
{
  /* set up direct reference to tables to drive the parser */
  production_tab = production_table();
  action_tab     = action_table();
  reduce_tab     = reduce_table();

  /* initialize the action encapsulation object */
  init_actions();

  /* do user initialization */
  user_init();

  /* get the first token */
  cur_token = scan();

  /* push dummy symbol with start state to get us underway */
  stack.remove_all_elements();
  lr_symbol dummy_sym(0, start_state());
  stack.push(&dummy_sym);

  /* continue until accept or fatal error */
  while (true)
    {
      /* Check current token for freshness. */
      assert(-1 == cur_token->parse_state);

      /* current state is always on the top of the stack */

      /* look up action out of the current state with the current input */
      int act = get_action(stack.peek()->parse_state, cur_token->sym);

      /* decode the action -- > 0 encodes shift */
      if (act > 0)
        {
          act = act - 1;

          DEBUG_LOG("Shift and goto " << act);

          /* shift to the encoded state by pushing it on the stack */
          cur_token->parse_state = act;
          stack.push(cur_token);

          /* advance to the next Symbol */
          cur_token = scan();
        }
      /* if its less than zero, then it encodes a reduce action */
      else if (act < 0)
        {
          act = (-act) - 1;

          DEBUG_LOG("Reduce by rule " << act);

          /* perform the action for the reduce */
          lr_symbol* lhs_sym = do_action(act);

          /* check for accept indication */
          if (lhs_sym == 0)
            {
              return stack.peek();
            }

          /* look up information about the production */

          lhs_sym->sym      = production_tab[act].lhs_sym;
          short handle_size = production_tab[act].rhs_size;

          /* pop the handle off the stack */
          stack.npop(handle_size);

          /* look up the state to go to from the one popped back to */
          act = get_reduce(stack.peek()->parse_state, lhs_sym->sym);

          /* shift to that state */
          lhs_sym->parse_state = act;
          stack.push(lhs_sym);

          DEBUG_LOG("      and goto " << act);
        }
      /* finally if the entry is zero, we have an error */
      else if (act == 0)
        {
          DEBUG_LOG("Error");

          /* call user syntax error reporting routine */
          syntax_error(cur_token);

          /* try to error recover */
          switch (error_recovery())
            {
            case ERS_FAIL:
              /* if that fails give up with a fatal syntax error */
              unrecovered_syntax_error(cur_token);
              return 0;
            case ERS_SUCCESS:
              break;
            case ERS_ACCEPT:
              return stack.peek();
            default:
              assert(0);
            }
        }
    }

}
Example #3
0
static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
{
	unsigned char status;
	char buf[40]; /* For getting status */
	int i;

	status = BT_STATUS;
	bt->nonzero_status |= status;

	if ((bt_debug & BT_DEBUG_STATES) && (bt->state != bt->last_state))
		printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n",
			STATE2TXT,
			STATUS2TXT(buf),
			bt->timeout,
			time);
	bt->last_state = bt->state;

	if (bt->state == BT_STATE_HOSED) return SI_SM_HOSED;

	if (bt->state != BT_STATE_IDLE) {	/* do timeout test */

		/* Certain states, on error conditions, can lock up a CPU
		   because they are effectively in an infinite loop with
		   CALL_WITHOUT_DELAY (right back here with time == 0).
		   Prevent infinite lockup by ALWAYS decrementing timeout. */

    	/* FIXME: bt_event is sometimes called with time > BT_NORMAL_TIMEOUT
              (noticed in ipmi_smic_sm.c January 2004) */

		if ((time <= 0) || (time >= BT_NORMAL_TIMEOUT)) time = 100;
		bt->timeout -= time;
		if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) {
			error_recovery(bt, "timed out");
			return SI_SM_CALL_WITHOUT_DELAY;
		}
	}

	switch (bt->state) {

    	case BT_STATE_IDLE:	/* check for asynchronous messages */
		if (status & BT_SMS_ATN) {
			BT_CONTROL(BT_SMS_ATN);	/* clear it */
			return SI_SM_ATTN;
		}
		return SI_SM_IDLE;

	case BT_STATE_XACTION_START:
		if (status & BT_H_BUSY) {
			BT_CONTROL(BT_H_BUSY);
			break;
		}
    		if (status & BT_B2H_ATN) break;
		bt->state = BT_STATE_WRITE_BYTES;
		return SI_SM_CALL_WITHOUT_DELAY;	/* for logging */

	case BT_STATE_WRITE_BYTES:
		if (status & (BT_B_BUSY | BT_H2B_ATN)) break;
		BT_CONTROL(BT_CLR_WR_PTR);
		write_all_bytes(bt);
		BT_CONTROL(BT_H2B_ATN);	/* clears too fast to catch? */
		bt->state = BT_STATE_WRITE_CONSUME;
		return SI_SM_CALL_WITHOUT_DELAY; /* it MIGHT sail through */

	case BT_STATE_WRITE_CONSUME: /* BMCs usually blow right thru here */
        	if (status & (BT_H2B_ATN | BT_B_BUSY)) break;
		bt->state = BT_STATE_B2H_WAIT;
		/* fall through with status */

	/* Stay in BT_STATE_B2H_WAIT until a packet matches.  However, spinning
	   hard here, constantly reading status, seems to hold off the
	   generation of B2H_ATN so ALWAYS return CALL_WITH_DELAY. */

	case BT_STATE_B2H_WAIT:
    		if (!(status & BT_B2H_ATN)) break;

		/* Assume ordered, uncached writes: no need to wait */
		if (!(status & BT_H_BUSY)) BT_CONTROL(BT_H_BUSY); /* set */
		BT_CONTROL(BT_B2H_ATN);		/* clear it, ACK to the BMC */
		BT_CONTROL(BT_CLR_RD_PTR);	/* reset the queue */
		i = read_all_bytes(bt);
		BT_CONTROL(BT_H_BUSY);		/* clear */
		if (!i) break;			/* Try this state again */
		bt->state = BT_STATE_READ_END;
		return SI_SM_CALL_WITHOUT_DELAY;	/* for logging */

    	case BT_STATE_READ_END:

		/* I could wait on BT_H_BUSY to go clear for a truly clean
		   exit.  However, this is already done in XACTION_START
		   and the (possible) extra loop/status/possible wait affects
		   performance.  So, as long as it works, just ignore H_BUSY */

#ifdef MAKE_THIS_TRUE_IF_NECESSARY

		if (status & BT_H_BUSY) break;
#endif
		bt->seq++;
		bt->state = BT_STATE_IDLE;
		return SI_SM_TRANSACTION_COMPLETE;

	case BT_STATE_RESET1:
    		reset_flags(bt);
    		bt->timeout = BT_RESET_DELAY;
		bt->state = BT_STATE_RESET2;
		break;

	case BT_STATE_RESET2:		/* Send a soft reset */
		BT_CONTROL(BT_CLR_WR_PTR);
		HOST2BMC(3);		/* number of bytes following */
		HOST2BMC(0x18);		/* NetFn/LUN == Application, LUN 0 */
		HOST2BMC(42);		/* Sequence number */
		HOST2BMC(3);		/* Cmd == Soft reset */
		BT_CONTROL(BT_H2B_ATN);
		bt->state = BT_STATE_RESET3;
		break;

	case BT_STATE_RESET3:
		if (bt->timeout > 0) return SI_SM_CALL_WITH_DELAY;
		bt->state = BT_STATE_RESTART;	/* printk in debug modes */
		break;

	case BT_STATE_RESTART:		/* don't reset retries! */
		bt->write_data[2] = ++bt->seq;
		bt->read_count = 0;
		bt->nonzero_status = 0;
		bt->timeout = BT_NORMAL_TIMEOUT;
		bt->state = BT_STATE_XACTION_START;
		break;

	default:	/* HOSED is supposed to be caught much earlier */
		error_recovery(bt, "internal logic error");
		break;
  	}
  	return SI_SM_CALL_WITH_DELAY;
}
Example #4
0
static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
{
	unsigned char status, BT_CAP[8];
	static enum bt_states last_printed = BT_STATE_PRINTME;
	int i;

	status = BT_STATUS;
	bt->nonzero_status |= status;
	if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) {
		printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n",
			STATE2TXT,
			STATUS2TXT,
			bt->timeout,
			time);
		last_printed = bt->state;
	}

	/*
	 * Commands that time out may still (eventually) provide a response.
	 * This stale response will get in the way of a new response so remove
	 * it if possible (hopefully during IDLE).  Even if it comes up later
	 * it will be rejected by its (now-forgotten) seq number.
	 */

	if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
		drain_BMC2HOST(bt);
		BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
	}

	if ((bt->state != BT_STATE_IDLE) &&
	    (bt->state <  BT_STATE_PRINTME)) {
		/* check timeout */
		bt->timeout -= time;
		if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
			return error_recovery(bt,
					      status,
					      IPMI_TIMEOUT_ERR);
	}

	switch (bt->state) {

	/*
	 * Idle state first checks for asynchronous messages from another
	 * channel, then does some opportunistic housekeeping.
	 */

	case BT_STATE_IDLE:
		if (status & BT_SMS_ATN) {
			BT_CONTROL(BT_SMS_ATN);	/* clear it */
			return SI_SM_ATTN;
		}

		if (status & BT_H_BUSY)		/* clear a leftover H_BUSY */
			BT_CONTROL(BT_H_BUSY);

		/* Read BT capabilities if it hasn't been done yet */
		if (!bt->BT_CAP_outreqs)
			BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
					SI_SM_CALL_WITHOUT_DELAY);
		bt->timeout = bt->BT_CAP_req2rsp;
		BT_SI_SM_RETURN(SI_SM_IDLE);

	case BT_STATE_XACTION_START:
		if (status & (BT_B_BUSY | BT_H2B_ATN))
			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
		if (BT_STATUS & BT_H_BUSY)
			BT_CONTROL(BT_H_BUSY);	/* force clear */
		BT_STATE_CHANGE(BT_STATE_WRITE_BYTES,
				SI_SM_CALL_WITHOUT_DELAY);

	case BT_STATE_WRITE_BYTES:
		if (status & BT_H_BUSY)
			BT_CONTROL(BT_H_BUSY);	/* clear */
		BT_CONTROL(BT_CLR_WR_PTR);
		write_all_bytes(bt);
		BT_CONTROL(BT_H2B_ATN);	/* can clear too fast to catch */
		BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME,
				SI_SM_CALL_WITHOUT_DELAY);

	case BT_STATE_WRITE_CONSUME:
		if (status & (BT_B_BUSY | BT_H2B_ATN))
			BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
		BT_STATE_CHANGE(BT_STATE_READ