static enum si_sm_result bt_event(struct si_sm_data *bt, long time) { unsigned char status, BT_CAP[8]; static enum bt_states last_printed = BT_STATE_PRINTME; int i; status = BT_STATUS; bt->nonzero_status |= status; if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) { printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n", STATE2TXT, STATUS2TXT, bt->timeout, time); last_printed = bt->state; } /* * Commands that time out may still (eventually) provide a response. * This stale response will get in the way of a new response so remove * it if possible (hopefully during IDLE). Even if it comes up later * it will be rejected by its (now-forgotten) seq number. */ if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { drain_BMC2HOST(bt); BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); } if ((bt->state != BT_STATE_IDLE) && (bt->state < BT_STATE_PRINTME)) { /* check timeout */ bt->timeout -= time; if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) return error_recovery(bt, status, IPMI_TIMEOUT_ERR); } switch (bt->state) { /* * Idle state first checks for asynchronous messages from another * channel, then does some opportunistic housekeeping. */ case BT_STATE_IDLE: if (status & BT_SMS_ATN) { BT_CONTROL(BT_SMS_ATN); /* clear it */ return SI_SM_ATTN; } if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ BT_CONTROL(BT_H_BUSY); /* Read BT capabilities if it hasn't been done yet */ if (!bt->BT_CAP_outreqs) BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, SI_SM_CALL_WITHOUT_DELAY); bt->timeout = bt->BT_CAP_req2rsp; BT_SI_SM_RETURN(SI_SM_IDLE); case BT_STATE_XACTION_START: if (status & (BT_B_BUSY | BT_H2B_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); /* force clear */ BT_STATE_CHANGE(BT_STATE_WRITE_BYTES, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_WRITE_BYTES: if (status & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); /* clear */ BT_CONTROL(BT_CLR_WR_PTR); write_all_bytes(bt); BT_CONTROL(BT_H2B_ATN); /* can clear too fast to catch */ BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_WRITE_CONSUME: if (status & (BT_B_BUSY | BT_H2B_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); BT_STATE_CHANGE(BT_STATE_READ_WAIT, SI_SM_CALL_WITHOUT_DELAY); /* Spinning hard can suppress B2H_ATN and force a timeout */ case BT_STATE_READ_WAIT: if (!(status & BT_B2H_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); BT_CONTROL(BT_H_BUSY); /* set */ /* * Uncached, ordered writes should just proceeed serially but * some BMCs don't clear B2H_ATN with one hit. Fast-path a * workaround without too much penalty to the general case. */ BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_CLEAR_B2H: if (status & BT_B2H_ATN) { /* keep hitting it */ BT_CONTROL(BT_B2H_ATN); BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); } BT_STATE_CHANGE(BT_STATE_READ_BYTES, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_READ_BYTES: if (!(status & BT_H_BUSY)) /* check in case of retry */ BT_CONTROL(BT_H_BUSY); BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ i = read_all_bytes(bt); /* true == packet seq match */ BT_CONTROL(BT_H_BUSY); /* NOW clear */ if (!i) /* Not my message */ BT_STATE_CHANGE(BT_STATE_READ_WAIT, SI_SM_CALL_WITHOUT_DELAY); bt->state = bt->complete; return bt->state == BT_STATE_IDLE ? /* where to next? */ SI_SM_TRANSACTION_COMPLETE : /* normal */ SI_SM_CALL_WITHOUT_DELAY; /* Startup magic */ case BT_STATE_LONG_BUSY: /* For example: after FW update */ if (!(status & BT_B_BUSY)) { reset_flags(bt); /* next state is now IDLE */ bt_init_data(bt, bt->io); } return SI_SM_CALL_WITH_DELAY; /* No repeat printing */ case BT_STATE_RESET1: reset_flags(bt); drain_BMC2HOST(bt); BT_STATE_CHANGE(BT_STATE_RESET2, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESET2: /* Send a soft reset */ BT_CONTROL(BT_CLR_WR_PTR); HOST2BMC(3); /* number of bytes following */ HOST2BMC(0x18); /* NetFn/LUN == Application, LUN 0 */ HOST2BMC(42); /* Sequence number */ HOST2BMC(3); /* Cmd == Soft reset */ BT_CONTROL(BT_H2B_ATN); bt->timeout = BT_RESET_DELAY * 1000000; BT_STATE_CHANGE(BT_STATE_RESET3, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESET3: /* Hold off everything for a bit */ if (bt->timeout > 0) return SI_SM_CALL_WITH_DELAY; drain_BMC2HOST(bt); BT_STATE_CHANGE(BT_STATE_RESTART, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESTART: /* don't reset retries or seq! */ bt->read_count = 0; bt->nonzero_status = 0; bt->timeout = bt->BT_CAP_req2rsp; BT_STATE_CHANGE(BT_STATE_XACTION_START, SI_SM_CALL_WITH_DELAY); /* * Get BT Capabilities, using timing of upper level state machine. * Set outreqs to prevent infinite loop on timeout. */ case BT_STATE_CAPABILITIES_BEGIN: bt->BT_CAP_outreqs = 1; { unsigned char GetBT_CAP[] = { 0x18, 0x36 }; bt->state = BT_STATE_IDLE; bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP)); } bt->complete = BT_STATE_CAPABILITIES_END; BT_STATE_CHANGE(BT_STATE_XACTION_START, SI_SM_CALL_WITH_DELAY); case BT_STATE_CAPABILITIES_END: i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP)); bt_init_data(bt, bt->io); if ((i == 8) && !BT_CAP[2]) { bt->BT_CAP_outreqs = BT_CAP[3]; bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000; bt->BT_CAP_retries = BT_CAP[7]; } else printk(KERN_WARNING "IPMI BT: using default values\n"); if (!bt->BT_CAP_outreqs) bt->BT_CAP_outreqs = 1; printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n", bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries); bt->timeout = bt->BT_CAP_req2rsp; return SI_SM_CALL_WITHOUT_DELAY; default: /* should never occur */ return error_recovery(bt, status, IPMI_ERR_UNSPECIFIED); } return SI_SM_CALL_WITH_DELAY; }
static enum si_sm_result bt_event(struct si_sm_data *bt, long time) { unsigned char status, BT_CAP[8]; static enum bt_states last_printed = BT_STATE_PRINTME; int i; status = BT_STATUS; bt->nonzero_status |= status; if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) { printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n", STATE2TXT, STATUS2TXT, bt->timeout, time); last_printed = bt->state; } /* * Commands that time out may still (eventually) provide a response. * This stale response will get in the way of a new response so remove * it if possible (hopefully during IDLE). Even if it comes up later * it will be rejected by its (now-forgotten) seq number. */ if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { drain_BMC2HOST(bt); BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); } if ((bt->state != BT_STATE_IDLE) && (bt->state < BT_STATE_PRINTME)) { /* check timeout */ bt->timeout -= time; if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) return error_recovery(bt, status, IPMI_TIMEOUT_ERR); } switch (bt->state) { /* * Idle state first checks for asynchronous messages from another * channel, then does some opportunistic housekeeping. */ case BT_STATE_IDLE: if (status & BT_SMS_ATN) { BT_CONTROL(BT_SMS_ATN); /* clear it */ return SI_SM_ATTN; } if (status & BT_H_BUSY) /* clear a leftover H_BUSY */ BT_CONTROL(BT_H_BUSY); /* Read BT capabilities if it hasn't been done yet */ if (!bt->BT_CAP_outreqs) BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, SI_SM_CALL_WITHOUT_DELAY); bt->timeout = bt->BT_CAP_req2rsp; BT_SI_SM_RETURN(SI_SM_IDLE); case BT_STATE_XACTION_START: if (status & (BT_B_BUSY | BT_H2B_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); /* force clear */ BT_STATE_CHANGE(BT_STATE_WRITE_BYTES, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_WRITE_BYTES: if (status & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); /* clear */ BT_CONTROL(BT_CLR_WR_PTR); write_all_bytes(bt); BT_CONTROL(BT_H2B_ATN); /* can clear too fast to catch */ BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_WRITE_CONSUME: if (status & (BT_B_BUSY | BT_H2B_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); BT_STATE_CHANGE(BT_STATE_READ