Exemple #1
0
void
tws_init_obfl_q(struct tws_softc *sc)
{
    int i=0;
    u_int64_t paddr;
    u_int32_t paddrh, paddrl, status;

    TWS_TRACE_DEBUG(sc, "entry", 0, sc->obfl_q_overrun);

    while ( i < tws_queue_depth ) {
        paddr = sc->sense_bufs[i].hdr_pkt_phy;
        paddrh = (u_int32_t)( paddr>>32);
        paddrl = (u_int32_t) paddr;
        tws_write_reg(sc, TWS_I2O0_HOBQPH, paddrh, 4);
        tws_write_reg(sc, TWS_I2O0_HOBQPL, paddrl, 4);
  
        status = tws_read_reg(sc, TWS_I2O0_STATUS, 4);
        if ( status & TWS_BIT13 ) {
            device_printf(sc->tws_dev,  "OBFL Overrun\n");
            sc->obfl_q_overrun = true;
            break;
        }
        i++;
    }

    if ( i == tws_queue_depth )
        sc->obfl_q_overrun = false;
}
Exemple #2
0
static void
tws_err_complete(struct tws_softc *sc, u_int64_t mfa)
{

    struct tws_command_header *hdr;
    struct tws_sense *sen;
    struct tws_request *req;
    u_int16_t req_id;
    u_int32_t reg, status;

    if ( !mfa ) {
        TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa);
        return;
    } else {
        /* lookup the sense */
        sen = tws_find_sense_from_mfa(sc, mfa);
        if ( sen == NULL ) {
            TWS_TRACE_DEBUG(sc, "found null req", 0, mfa);
            return;
        }
        hdr = sen->hdr;
        TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr);
        req_id = hdr->header_desc.request_id;
        req = &sc->reqs[req_id];
        TWS_TRACE_DEBUG(sc, "req, id", req, req_id);
        if ( req->error_code != TWS_REQ_SUBMIT_SUCCESS )
            TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code);
    }

    switch (req->type) {
        case TWS_PASSTHRU_REQ :
            tws_passthru_err_complete(req, hdr);
            break;
        case TWS_GETSET_PARAM_REQ :
            tws_getset_param_complete(req);
            break;
        case TWS_SCSI_IO_REQ :
            tws_scsi_err_complete(req, hdr);
            break;

    }

    lockmgr(&sc->io_lock, LK_EXCLUSIVE);
    hdr->header_desc.size_header = 128;
    reg = (u_int32_t)( mfa>>32);
    tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4);
    reg = (u_int32_t)(mfa);
    tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4);

    status = tws_read_reg(sc, TWS_I2O0_STATUS, 4);
    if ( status & TWS_BIT13 ) {
        TWS_TRACE_DEBUG(sc, "OBFL Overrun", status, TWS_I2O0_STATUS);
        sc->obfl_q_overrun = true;
        sen->posted = false;
    }
    lockmgr(&sc->io_lock, LK_RELEASE);

}
Exemple #3
0
int 
tws_init_ctlr(struct tws_softc *sc)
{
    u_int64_t reg;
    u_int32_t regh, regl;

    TWS_TRACE_DEBUG(sc, "entry", sc, sc->is64bit);
    sc->obfl_q_overrun = false;
    if ( tws_init_connect(sc, tws_queue_depth) )
    {
        TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit);
        return(FAILURE);
        
    }


    while( 1 ) {
        regh = tws_read_reg(sc, TWS_I2O0_IOPOBQPH, 4);
        regl = tws_read_reg(sc, TWS_I2O0_IOPOBQPL, 4);
        reg = (((u_int64_t)regh) << 32) | regl;
        TWS_TRACE_DEBUG(sc, "host outbound cleanup",reg, regl);
        if ( regh == TWS_FIFO_EMPTY32 )
            break;
    } 

    tws_init_obfl_q(sc);
    tws_display_ctlr_info(sc);
    tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
    tws_turn_on_interrupts(sc);
    return(SUCCESS);
}
Exemple #4
0
void
tws_turn_off_interrupts(struct tws_softc *sc)
{

    TWS_TRACE_DEBUG(sc, "entry", 0, 0);
    
    tws_write_reg(sc, TWS_I2O0_HIMASK, ~0, 4);

}
Exemple #5
0
void
tws_turn_on_interrupts(struct tws_softc *sc)
{

    TWS_TRACE_DEBUG(sc, "entry", 0, 0);
    /* turn on responce and db interrupt only */
    tws_write_reg(sc, TWS_I2O0_HIMASK, TWS_BIT0, 4);

}
Exemple #6
0
void
tws_assert_soft_reset(struct tws_softc *sc)
{
    u_int32_t reg;

    reg = tws_read_reg(sc, TWS_I2O0_HIBDB, 4);
    TWS_TRACE_DEBUG(sc, "in bound door bell read ", reg, TWS_I2O0_HIBDB);
    tws_write_reg(sc, TWS_I2O0_HIBDB, reg | TWS_BIT8, 4);

}
Exemple #7
0
void
tws_enable_db_intr(struct tws_softc *sc)
{
    u_int32_t reg;

    TWS_TRACE_DEBUG(sc, "entry", 0, 0);
    reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4);
    reg = reg & ~TWS_BIT2;
    tws_write_reg(sc, TWS_I2O0_HIMASK, reg, 4);
}
Exemple #8
0
static void
tws_intr_attn_error(struct tws_softc *sc)
{
    u_int32_t db=0;

    TWS_TRACE(sc, "attn error", 0, 0);
    tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
    db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
    device_printf(sc->tws_dev, "Micro controller error.\n");
    tws_reset(sc);
}
Exemple #9
0
static void
tws_intr_attn_aen(struct tws_softc *sc)
{
    u_int32_t db=0;

    /* maskoff db intrs untill all the aens are fetched */
    /* tws_disable_db_intr(sc); */
    tws_fetch_aen((void *)sc);
    tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4);
    db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);

}
Exemple #10
0
int
tws_submit_command(struct tws_softc *sc, struct tws_request *req)
{
    u_int32_t regl, regh;
    u_int64_t mfa=0;
    
    /* 
     * mfa register  read and write must be in order. 
     * Get the io_lock to protect against simultinous 
     * passthru calls 
     */
    mtx_lock(&sc->io_lock);

    if ( sc->obfl_q_overrun ) {
        tws_init_obfl_q(sc);
    }
       
#ifdef TWS_PULL_MODE_ENABLE
    regh = (u_int32_t)(req->cmd_pkt_phy >> 32);
    /* regh = regh | TWS_MSG_ACC_MASK; */ 
    mfa = regh;
    mfa = mfa << 32;
    regl = (u_int32_t)req->cmd_pkt_phy;
    regl = regl | TWS_BIT0;
    mfa = mfa | regl;
#else
    regh = tws_read_reg(sc, TWS_I2O0_HIBQPH, 4);
    mfa = regh;
    mfa = mfa << 32;
    regl = tws_read_reg(sc, TWS_I2O0_HIBQPL, 4);
    mfa = mfa | regl;
#endif

    mtx_unlock(&sc->io_lock);

    if ( mfa == TWS_FIFO_EMPTY ) {
        TWS_TRACE_DEBUG(sc, "inbound fifo empty", mfa, 0);

        /* 
         * Generaly we should not get here.
         * If the fifo was empty we can't do any thing much 
         * retry later 
         */
        return(TWS_REQ_RET_PEND_NOMFA);

    }

#ifndef TWS_PULL_MODE_ENABLE
    for (int i=mfa; i<(sizeof(struct tws_command_packet)+ mfa - 
                            sizeof( struct tws_command_header)); i++) {

        bus_space_write_1(sc->bus_mfa_tag, sc->bus_mfa_handle,i, 
                               ((u_int8_t *)&req->cmd_pkt->cmd)[i-mfa]);

    }
#endif

    if ( req->type == TWS_REQ_TYPE_SCSI_IO ) {
        mtx_lock(&sc->q_lock);
        tws_q_insert_tail(sc, req, TWS_BUSY_Q);
        mtx_unlock(&sc->q_lock);
    }

    /* 
     * mfa register  read and write must be in order. 
     * Get the io_lock to protect against simultinous 
     * passthru calls 
     */
    mtx_lock(&sc->io_lock);

    tws_write_reg(sc, TWS_I2O0_HIBQPH, regh, 4);
    tws_write_reg(sc, TWS_I2O0_HIBQPL, regl, 4);

    sc->stats.reqs_in++;
    mtx_unlock(&sc->io_lock);
    
    return(TWS_REQ_RET_SUBMIT_SUCCESS);

}
Exemple #11
0
static int
tws_init(struct tws_softc *sc)
{

    u_int32_t max_sg_elements;
    u_int32_t dma_mem_size;
    int error;
    u_int32_t reg;

    sc->seq_id = 0;
    if ( tws_queue_depth > TWS_MAX_REQS )
        tws_queue_depth = TWS_MAX_REQS;
    if (tws_queue_depth < TWS_RESERVED_REQS+1)
        tws_queue_depth = TWS_RESERVED_REQS+1;
    sc->is64bit = (sizeof(bus_addr_t) == 8) ? true : false;
    max_sg_elements = (sc->is64bit && !tws_use_32bit_sgls) ? 
                                 TWS_MAX_64BIT_SG_ELEMENTS : 
                                 TWS_MAX_32BIT_SG_ELEMENTS;
    dma_mem_size = (sizeof(struct tws_command_packet) * tws_queue_depth) +
                             (TWS_SECTOR_SIZE) ;
    if ( bus_dma_tag_create(bus_get_dma_tag(sc->tws_dev), /* PCI parent */ 
                            TWS_ALIGNMENT,           /* alignment */
                            0,                       /* boundary */
                            BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
                            BUS_SPACE_MAXADDR,       /* highaddr */
                            NULL, NULL,              /* filter, filterarg */
                            BUS_SPACE_MAXSIZE,       /* maxsize */
                            max_sg_elements,         /* numsegs */
                            BUS_SPACE_MAXSIZE,       /* maxsegsize */
                            0,                       /* flags */
                            NULL, NULL,              /* lockfunc, lockfuncarg */
                            &sc->parent_tag          /* tag */
                           )) {
        TWS_TRACE_DEBUG(sc, "DMA parent tag Create fail", max_sg_elements, 
                                                    sc->is64bit);
        return(ENOMEM);
    }
    /* In bound message frame requires 16byte alignment.
     * Outbound MF's can live with 4byte alignment - for now just 
     * use 16 for both.
     */
    if ( bus_dma_tag_create(sc->parent_tag,       /* parent */          
                            TWS_IN_MF_ALIGNMENT,  /* alignment */
                            0,                    /* boundary */
                            BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
                            BUS_SPACE_MAXADDR,    /* highaddr */
                            NULL, NULL,           /* filter, filterarg */
                            dma_mem_size,         /* maxsize */
                            1,                    /* numsegs */
                            BUS_SPACE_MAXSIZE,    /* maxsegsize */
                            0,                    /* flags */
                            NULL, NULL,           /* lockfunc, lockfuncarg */
                            &sc->cmd_tag          /* tag */
                           )) {
        TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
        return(ENOMEM);
    }

    if (bus_dmamem_alloc(sc->cmd_tag, &sc->dma_mem,
                    BUS_DMA_NOWAIT, &sc->cmd_map)) {
        TWS_TRACE_DEBUG(sc, "DMA mem alloc fail", max_sg_elements, sc->is64bit);
        return(ENOMEM);
    }

    /* if bus_dmamem_alloc succeeds then bus_dmamap_load will succeed */
    sc->dma_mem_phys=0;
    error = bus_dmamap_load(sc->cmd_tag, sc->cmd_map, sc->dma_mem,
                    dma_mem_size, tws_dmamap_cmds_load_cbfn,
                    &sc->dma_mem_phys, 0);

   /*
    * Create a dma tag for data buffers; size will be the maximum
    * possible I/O size (128kB).
    */
    if (bus_dma_tag_create(sc->parent_tag,         /* parent */
                           TWS_ALIGNMENT,          /* alignment */
                           0,                      /* boundary */
                           BUS_SPACE_MAXADDR_32BIT,/* lowaddr */
                           BUS_SPACE_MAXADDR,      /* highaddr */
                           NULL, NULL,             /* filter, filterarg */
                           TWS_MAX_IO_SIZE,        /* maxsize */
                           max_sg_elements,        /* nsegments */
                           TWS_MAX_IO_SIZE,        /* maxsegsize */
                           BUS_DMA_ALLOCNOW,       /* flags */
                           busdma_lock_mutex,      /* lockfunc */
                           &sc->io_lock,           /* lockfuncarg */
                           &sc->data_tag           /* tag */)) {
        TWS_TRACE_DEBUG(sc, "DMA cmd tag Create fail", max_sg_elements, sc->is64bit);
        return(ENOMEM);
    }

    sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS,
                      M_WAITOK | M_ZERO);
    if ( sc->reqs == NULL ) {
        TWS_TRACE_DEBUG(sc, "malloc failed", 0, sc->is64bit);
        return(ENOMEM);
    }
    sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS,
                      M_WAITOK | M_ZERO);
    if ( sc->sense_bufs == NULL ) {
        TWS_TRACE_DEBUG(sc, "sense malloc failed", 0, sc->is64bit);
        return(ENOMEM);
    }
    sc->scan_ccb = malloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO);
    if ( sc->scan_ccb == NULL ) {
        TWS_TRACE_DEBUG(sc, "ccb malloc failed", 0, sc->is64bit);
        return(ENOMEM);
    }
    if (bus_dmamem_alloc(sc->data_tag, (void **)&sc->ioctl_data_mem,
            (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &sc->ioctl_data_map)) {
        device_printf(sc->tws_dev, "Cannot allocate ioctl data mem\n");
        return(ENOMEM);
    }

    if ( !tws_ctlr_ready(sc) )
        if( !tws_ctlr_reset(sc) )
            return(FAILURE);
    
    bzero(&sc->stats, sizeof(struct tws_stats));
    tws_init_qs(sc);
    tws_turn_off_interrupts(sc);

    /* 
     * enable pull mode by setting bit1 .
     * setting bit0 to 1 will enable interrupt coalesing 
     * will revisit. 
     */

#ifdef TWS_PULL_MODE_ENABLE

    reg = tws_read_reg(sc, TWS_I2O0_CTL, 4);
    TWS_TRACE_DEBUG(sc, "i20 ctl", reg, TWS_I2O0_CTL);
    tws_write_reg(sc, TWS_I2O0_CTL, reg | TWS_BIT1, 4);

#endif

    TWS_TRACE_DEBUG(sc, "dma_mem_phys", sc->dma_mem_phys, TWS_I2O0_CTL);
    if ( tws_init_reqs(sc, dma_mem_size) == FAILURE )
        return(FAILURE);
    if ( tws_init_aen_q(sc) == FAILURE )
        return(FAILURE);

    return(SUCCESS);
    
} 
Exemple #12
0
static int
tws_detach(device_t dev)
{
    struct tws_softc *sc = device_get_softc(dev);
    int i;
    u_int32_t reg;

    TWS_TRACE_DEBUG(sc, "entry", 0, 0);

    mtx_lock(&sc->gen_lock);
    tws_send_event(sc, TWS_UNINIT_START);
    mtx_unlock(&sc->gen_lock);

    /* needs to disable interrupt before detaching from cam */
    tws_turn_off_interrupts(sc);
    /* clear door bell */
    tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
    reg = tws_read_reg(sc, TWS_I2O0_HIMASK, 4);
    TWS_TRACE_DEBUG(sc, "turn-off-intr", reg, 0);
    sc->obfl_q_overrun = false;
    tws_init_connect(sc, 1);

    /* Teardown the state in our softc created in our attach routine. */
    /* Disconnect the interrupt handler. */
    tws_teardown_intr(sc);

    /* Release irq resource */
    for(i=0;i<sc->irqs;i++) {
        if ( sc->irq_res[i] ){
            if (bus_release_resource(sc->tws_dev,
                     SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
                TWS_TRACE(sc, "bus release irq resource", 
                                       i, sc->irq_res_id[i]);
        }
    }
    if ( sc->intr_type == TWS_MSI ) {
        pci_release_msi(sc->tws_dev);
    }

    tws_cam_detach(sc);

    /* Release memory resource */
    if ( sc->mfa_res ){
        if (bus_release_resource(sc->tws_dev,
                 SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res))
            TWS_TRACE(sc, "bus release mem resource", 0, sc->mfa_res_id);
    }
    if ( sc->reg_res ){
        if (bus_release_resource(sc->tws_dev,
                 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res))
            TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
    }

    free(sc->reqs, M_TWS);
    free(sc->sense_bufs, M_TWS);
    free(sc->scan_ccb, M_TWS);
    if (sc->ioctl_data_mem)
            bus_dmamem_free(sc->data_tag, sc->ioctl_data_mem, sc->ioctl_data_map);
    free(sc->aen_q.q, M_TWS);
    free(sc->trace_q.q, M_TWS);
    mtx_destroy(&sc->q_lock);
    mtx_destroy(&sc->sim_lock);
    mtx_destroy(&sc->gen_lock);
    mtx_destroy(&sc->io_lock);
    destroy_dev(sc->tws_cdev);
    sysctl_ctx_free(&sc->tws_clist);
    return (0);
}