int tws_init_ctlr(struct tws_softc *sc) { u_int64_t reg; u_int32_t regh, regl; TWS_TRACE_DEBUG(sc, "entry", sc, sc->is64bit); sc->obfl_q_overrun = false; if ( tws_init_connect(sc, tws_queue_depth) ) { TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit); return(FAILURE); } while( 1 ) { regh = tws_read_reg(sc, TWS_I2O0_IOPOBQPH, 4); regl = tws_read_reg(sc, TWS_I2O0_IOPOBQPL, 4); reg = (((u_int64_t)regh) << 32) | regl; TWS_TRACE_DEBUG(sc, "host outbound cleanup",reg, regl); if ( regh == TWS_FIFO_EMPTY32 ) break; } tws_init_obfl_q(sc); tws_display_ctlr_info(sc); tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4); tws_turn_on_interrupts(sc); return(SUCCESS); }
static void tws_reinit(void *arg) { struct tws_softc *sc = (struct tws_softc *)arg; int timeout_val=0; int try=2; int done=0; // device_printf(sc->tws_dev, "Waiting for Controller Ready\n"); while ( !done && try ) { if ( tws_ctlr_ready(sc) ) { done = 1; break; } else { timeout_val += 5; if ( timeout_val >= TWS_RESET_TIMEOUT ) { timeout_val = 0; if ( try ) tws_assert_soft_reset(sc); try--; } mtx_sleep(sc, &sc->gen_lock, 0, "tws_reinit", 5*hz); } } if (!done) { device_printf(sc->tws_dev, "FAILED to get Controller Ready!\n"); return; } sc->obfl_q_overrun = false; // device_printf(sc->tws_dev, "Sending initConnect\n"); if ( tws_init_connect(sc, tws_queue_depth) ) { TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit); } tws_init_obfl_q(sc); tws_turn_on_interrupts(sc); wakeup_one(sc); }
int tws_submit_command(struct tws_softc *sc, struct tws_request *req) { u_int32_t regl, regh; u_int64_t mfa=0; /* * mfa register read and write must be in order. * Get the io_lock to protect against simultinous * passthru calls */ mtx_lock(&sc->io_lock); if ( sc->obfl_q_overrun ) { tws_init_obfl_q(sc); } #ifdef TWS_PULL_MODE_ENABLE regh = (u_int32_t)(req->cmd_pkt_phy >> 32); /* regh = regh | TWS_MSG_ACC_MASK; */ mfa = regh; mfa = mfa << 32; regl = (u_int32_t)req->cmd_pkt_phy; regl = regl | TWS_BIT0; mfa = mfa | regl; #else regh = tws_read_reg(sc, TWS_I2O0_HIBQPH, 4); mfa = regh; mfa = mfa << 32; regl = tws_read_reg(sc, TWS_I2O0_HIBQPL, 4); mfa = mfa | regl; #endif mtx_unlock(&sc->io_lock); if ( mfa == TWS_FIFO_EMPTY ) { TWS_TRACE_DEBUG(sc, "inbound fifo empty", mfa, 0); /* * Generaly we should not get here. * If the fifo was empty we can't do any thing much * retry later */ return(TWS_REQ_RET_PEND_NOMFA); } #ifndef TWS_PULL_MODE_ENABLE for (int i=mfa; i<(sizeof(struct tws_command_packet)+ mfa - sizeof( struct tws_command_header)); i++) { bus_space_write_1(sc->bus_mfa_tag, sc->bus_mfa_handle,i, ((u_int8_t *)&req->cmd_pkt->cmd)[i-mfa]); } #endif if ( req->type == TWS_REQ_TYPE_SCSI_IO ) { mtx_lock(&sc->q_lock); tws_q_insert_tail(sc, req, TWS_BUSY_Q); mtx_unlock(&sc->q_lock); } /* * mfa register read and write must be in order. * Get the io_lock to protect against simultinous * passthru calls */ mtx_lock(&sc->io_lock); tws_write_reg(sc, TWS_I2O0_HIBQPH, regh, 4); tws_write_reg(sc, TWS_I2O0_HIBQPL, regl, 4); sc->stats.reqs_in++; mtx_unlock(&sc->io_lock); return(TWS_REQ_RET_SUBMIT_SUCCESS); }