Esempio n. 1
0
static void
tws_drain_busy_queue(struct tws_softc *sc)
{

    struct tws_request *req;
    TWS_TRACE_DEBUG(sc, "entry", 0, 0);

    lockmgr(&sc->q_lock, LK_EXCLUSIVE);
    req = tws_q_remove_tail(sc, TWS_BUSY_Q);
    lockmgr(&sc->q_lock, LK_RELEASE);
    while ( req ) {
	callout_stop(req->ccb_ptr->ccb_h.timeout_ch);
        tws_unmap_request(req->sc, req);

        TWS_TRACE_DEBUG(sc, "drained", 0, req->request_id);

        lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
        req->ccb_ptr->ccb_h.status = CAM_REQUEUE_REQ;
        xpt_done(req->ccb_ptr);
        lockmgr(&sc->sim_lock, LK_RELEASE);

        lockmgr(&sc->q_lock, LK_EXCLUSIVE);
        tws_q_insert_tail(sc, req, TWS_FREE_Q);
        req = tws_q_remove_tail(sc, TWS_BUSY_Q);
        lockmgr(&sc->q_lock, LK_RELEASE);
    }

}
Esempio n. 2
0
static void
tws_drain_busy_queue(struct tws_softc *sc)
{
    struct tws_request *req;
    union ccb          *ccb;
    TWS_TRACE_DEBUG(sc, "entry", 0, 0);

    mtx_lock(&sc->q_lock);
    req = tws_q_remove_tail(sc, TWS_BUSY_Q);
    mtx_unlock(&sc->q_lock);
    while ( req ) {
        TWS_TRACE_DEBUG(sc, "moved to TWS_COMPLETE_Q", 0, req->request_id);
        untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);

        req->error_code = TWS_REQ_RET_RESET;
        ccb = (union ccb *)(req->ccb_ptr);

        ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
        ccb->ccb_h.status |=  CAM_REQUEUE_REQ;
        ccb->ccb_h.status |=  CAM_SCSI_BUS_RESET;

        tws_unmap_request(req->sc, req);

        mtx_lock(&sc->sim_lock);
        xpt_done(req->ccb_ptr);
        mtx_unlock(&sc->sim_lock);

        mtx_lock(&sc->q_lock);
        tws_q_insert_tail(sc, req, TWS_FREE_Q);
        req = tws_q_remove_tail(sc, TWS_BUSY_Q);
        mtx_unlock(&sc->q_lock);
    } 
}
Esempio n. 3
0
static void
tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr)
{
    u_int8_t *sense_data;
    struct tws_softc *sc = req->sc;
    union ccb *ccb = req->ccb_ptr;

    TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error,
                                 req->cmd_pkt->cmd.pkt_a.status);
    if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED ||
         hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) {

        if ( ccb->ccb_h.target_lun ) {
            TWS_TRACE_DEBUG(sc, "invalid lun error",0,0);
            ccb->ccb_h.status |= CAM_LUN_INVALID;
        } else {
            TWS_TRACE_DEBUG(sc, "invalid target error",0,0);
            ccb->ccb_h.status |= CAM_TID_INVALID;
        }

    } else {
        TWS_TRACE_DEBUG(sc, "scsi status  error",0,0);
        ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
        if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) &&
              (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) {
            ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
            TWS_TRACE_DEBUG(sc, "page mode not supported",0,0);
        }
    }

    /* if there were no error simply mark complete error */
    if (ccb->ccb_h.status == 0)
        ccb->ccb_h.status = CAM_REQ_CMP_ERR;

    sense_data = (u_int8_t *)&ccb->csio.sense_data;
    if (sense_data) {
        memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH );
        ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH;
        ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
    }
    ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status;

    ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
    lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
    xpt_done(ccb);
    lockmgr(&sc->sim_lock, LK_RELEASE);

    callout_stop(req->ccb_ptr->ccb_h.timeout_ch);
    tws_unmap_request(req->sc, req);
    lockmgr(&sc->q_lock, LK_EXCLUSIVE);
    tws_q_remove_request(sc, req, TWS_BUSY_Q);
    tws_q_insert_tail(sc, req, TWS_FREE_Q);
    lockmgr(&sc->q_lock, LK_RELEASE);

}
Esempio n. 4
0
void
tws_release_request(struct tws_request *req)
{

    struct tws_softc *sc = req->sc;

    TWS_TRACE_DEBUG(sc, "entry", sc, 0);
    mtx_lock(&sc->q_lock);
    tws_q_insert_tail(sc, req, TWS_FREE_Q);
    mtx_unlock(&sc->q_lock);
}
Esempio n. 5
0
void
tws_release_request(struct tws_request *req)
{

    struct tws_softc *sc = req->sc;

    TWS_TRACE_DEBUG(sc, "entry", sc, 0);
    lockmgr(&sc->q_lock, LK_EXCLUSIVE);
    tws_q_insert_tail(sc, req, TWS_FREE_Q);
    lockmgr(&sc->q_lock, LK_RELEASE);
}
Esempio n. 6
0
static int
tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size)
{

    struct tws_command_packet *cmd_buf;
    cmd_buf = (struct tws_command_packet *)sc->dma_mem;
    int i;

    bzero(cmd_buf, dma_mem_size);
    TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0);
    mtx_lock(&sc->q_lock);
    for ( i=0; i< tws_queue_depth; i++)
    {
        if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) {
            /* log a ENOMEM failure msg here */
            mtx_unlock(&sc->q_lock);
            return(FAILURE);
        } 
        sc->reqs[i].cmd_pkt =  &cmd_buf[i];

        sc->sense_bufs[i].hdr = &cmd_buf[i].hdr ;
        sc->sense_bufs[i].hdr_pkt_phy = sc->dma_mem_phys + 
                              (i * sizeof(struct tws_command_packet));

        sc->reqs[i].cmd_pkt_phy = sc->dma_mem_phys + 
                              sizeof(struct tws_command_header) +
                              (i * sizeof(struct tws_command_packet));
        sc->reqs[i].request_id = i;
        sc->reqs[i].sc = sc;

        sc->reqs[i].cmd_pkt->hdr.header_desc.size_header = 128;

	callout_init(&sc->reqs[i].timeout, CALLOUT_MPSAFE);
        sc->reqs[i].state = TWS_REQ_STATE_FREE;
        if ( i >= TWS_RESERVED_REQS )
            tws_q_insert_tail(sc, &sc->reqs[i], TWS_FREE_Q);
    }
    mtx_unlock(&sc->q_lock);
    return(SUCCESS);
}
Esempio n. 7
0
static void
tws_scsi_complete(struct tws_request *req)
{
    struct tws_softc *sc = req->sc;

    mtx_lock(&sc->q_lock);
    tws_q_remove_request(sc, req, TWS_BUSY_Q);
    mtx_unlock(&sc->q_lock);

    untimeout(tws_timeout, req, req->ccb_ptr->ccb_h.timeout_ch);
    tws_unmap_request(req->sc, req);


    req->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
    mtx_lock(&sc->sim_lock);
    xpt_done(req->ccb_ptr);
    mtx_unlock(&sc->sim_lock);

    mtx_lock(&sc->q_lock);
    tws_q_insert_tail(sc, req, TWS_FREE_Q);
    mtx_unlock(&sc->q_lock);
}
Esempio n. 8
0
static void
tws_scsi_complete(struct tws_request *req)
{
    struct tws_softc *sc = req->sc;

    lockmgr(&sc->q_lock, LK_EXCLUSIVE);
    tws_q_remove_request(sc, req, TWS_BUSY_Q);
    lockmgr(&sc->q_lock, LK_RELEASE);

    callout_stop(req->ccb_ptr->ccb_h.timeout_ch);
    tws_unmap_request(req->sc, req);


    lockmgr(&sc->sim_lock, LK_EXCLUSIVE);
    req->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
    xpt_done(req->ccb_ptr);
    lockmgr(&sc->sim_lock, LK_RELEASE);

    lockmgr(&sc->q_lock, LK_EXCLUSIVE);
    tws_q_insert_tail(sc, req, TWS_FREE_Q);
    lockmgr(&sc->q_lock, LK_RELEASE);

}
Esempio n. 9
0
int
tws_submit_command(struct tws_softc *sc, struct tws_request *req)
{
    u_int32_t regl, regh;
    u_int64_t mfa=0;
    
    /* 
     * mfa register  read and write must be in order. 
     * Get the io_lock to protect against simultinous 
     * passthru calls 
     */
    mtx_lock(&sc->io_lock);

    if ( sc->obfl_q_overrun ) {
        tws_init_obfl_q(sc);
    }
       
#ifdef TWS_PULL_MODE_ENABLE
    regh = (u_int32_t)(req->cmd_pkt_phy >> 32);
    /* regh = regh | TWS_MSG_ACC_MASK; */ 
    mfa = regh;
    mfa = mfa << 32;
    regl = (u_int32_t)req->cmd_pkt_phy;
    regl = regl | TWS_BIT0;
    mfa = mfa | regl;
#else
    regh = tws_read_reg(sc, TWS_I2O0_HIBQPH, 4);
    mfa = regh;
    mfa = mfa << 32;
    regl = tws_read_reg(sc, TWS_I2O0_HIBQPL, 4);
    mfa = mfa | regl;
#endif

    mtx_unlock(&sc->io_lock);

    if ( mfa == TWS_FIFO_EMPTY ) {
        TWS_TRACE_DEBUG(sc, "inbound fifo empty", mfa, 0);

        /* 
         * Generaly we should not get here.
         * If the fifo was empty we can't do any thing much 
         * retry later 
         */
        return(TWS_REQ_RET_PEND_NOMFA);

    }

#ifndef TWS_PULL_MODE_ENABLE
    for (int i=mfa; i<(sizeof(struct tws_command_packet)+ mfa - 
                            sizeof( struct tws_command_header)); i++) {

        bus_space_write_1(sc->bus_mfa_tag, sc->bus_mfa_handle,i, 
                               ((u_int8_t *)&req->cmd_pkt->cmd)[i-mfa]);

    }
#endif

    if ( req->type == TWS_REQ_TYPE_SCSI_IO ) {
        mtx_lock(&sc->q_lock);
        tws_q_insert_tail(sc, req, TWS_BUSY_Q);
        mtx_unlock(&sc->q_lock);
    }

    /* 
     * mfa register  read and write must be in order. 
     * Get the io_lock to protect against simultinous 
     * passthru calls 
     */
    mtx_lock(&sc->io_lock);

    tws_write_reg(sc, TWS_I2O0_HIBQPH, regh, 4);
    tws_write_reg(sc, TWS_I2O0_HIBQPL, regl, 4);

    sc->stats.reqs_in++;
    mtx_unlock(&sc->io_lock);
    
    return(TWS_REQ_RET_SUBMIT_SUCCESS);

}