/* * Function name: tw_osli_print_ctlr_stats * Description: For being called from ddb. Prints OSL controller stats * * Input: sc -- ptr to OSL internal controller context * Output: None * Return value: None */ TW_VOID tw_osli_print_ctlr_stats(struct twa_softc *sc) { twa_printf(sc, "osl_ctlr_ctxt = %p\n", sc); twa_printf(sc, "OSLq type current max\n"); twa_printf(sc, "free %04d %04d\n", sc->q_stats[TW_OSLI_FREE_Q].cur_len, sc->q_stats[TW_OSLI_FREE_Q].max_len); twa_printf(sc, "busy %04d %04d\n", sc->q_stats[TW_OSLI_BUSY_Q].cur_len, sc->q_stats[TW_OSLI_BUSY_Q].max_len); }
/* * Function name: twa_print_req_info * Description: For being called from ddb. Calls functions that print * OSL and CL internal details for the request. * * Input: req -- ptr to OSL internal request context * Output: None * Return value: None */ TW_VOID twa_print_req_info(struct tw_osli_req_context *req) { struct twa_softc *sc = req->ctlr; twa_printf(sc, "OSL details for request:\n"); twa_printf(sc, "osl_req_ctxt = %p, cl_req_ctxt = %p\n" "data = %p, length = 0x%x, real_data = %p, real_length = 0x%x\n" "state = 0x%x, flags = 0x%x, error = 0x%x, orig_req = %p\n" "next_req = %p, prev_req = %p, dma_map = %p\n", req->req_handle.osl_req_ctxt, req->req_handle.cl_req_ctxt, req->data, req->length, req->real_data, req->real_length, req->state, req->flags, req->error_code, req->orig_req, req->link.next, req->link.prev, req->dma_map); tw_cl_print_req_info(&(req->req_handle)); }
/* * Function name: twa_print_request * Description: Prints a given request if it's in the wrong queue. * * Input: tr -- ptr to request pkt * req_type-- expected status of the given request * Output: None * Return value: None */ void twa_print_request(struct twa_request *tr, int req_type) { struct twa_softc *sc = tr->tr_sc; struct twa_command_packet *cmdpkt = tr->tr_command; struct twa_command_9k *cmd9k; union twa_command_7k *cmd7k; u_int8_t *cdb; int cmd_phys_addr; if (tr->tr_status != req_type) { twa_printf(sc, "Invalid %s request %p in queue! req_type = %x, queue_type = %x\n", (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_INTERNAL) ? "INTERNAL" : "EXTERNAL", tr, tr->tr_status, req_type); if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) { cmd9k = &(cmdpkt->command.cmd_pkt_9k); cmd_phys_addr = cmd9k->sg_list[0].address; twa_printf(sc, "9K cmd = %x %x %x %x %x %x %x %x %jx\n", cmd9k->command.opcode, cmd9k->command.reserved, cmd9k->unit, cmd9k->request_id, cmd9k->status, cmd9k->sgl_offset, cmd9k->sgl_entries, cmd_phys_addr, (uintmax_t)cmd9k->sg_list[0].length); cdb = (u_int8_t *)(cmdpkt->command.cmd_pkt_9k.cdb); twa_printf(sc, "cdb = %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n", cdb[0], cdb[1], cdb[2], cdb[3], cdb[4], cdb[5], cdb[6], cdb[7], cdb[8], cdb[9], cdb[10], cdb[11], cdb[12], cdb[13], cdb[14], cdb[15]); } else { cmd7k = &(cmdpkt->command.cmd_pkt_7k); twa_printf(sc, "7K cmd = %x %x %x %x %x %x %x %x %x\n", cmd7k->generic.opcode, cmd7k->generic.sgl_offset, cmd7k->generic.size, cmd7k->generic.request_id, cmd7k->generic.unit, cmd7k->generic.host_id, cmd7k->generic.status, cmd7k->generic.flags, cmd7k->generic.count); } cmd_phys_addr = (int)(tr->tr_cmd_phys); twa_printf(sc, "cmdphys=0x%x data=%p length=0x%jx\n", cmd_phys_addr, tr->tr_data, (uintmax_t)tr->tr_length); twa_printf(sc, "req_id=0x%x flags=0x%x callback=%p private=%p\n", tr->tr_request_id, tr->tr_flags, tr->tr_callback, tr->tr_private); } }
/* * Function name: twa_scsi_complete * Description: Called to complete CAM scsi requests. * * Input: tr -- ptr to request pkt to be completed * Output: None * Return value: None */ void twa_scsi_complete(struct twa_request *tr) { struct twa_softc *sc = tr->tr_sc; struct twa_command_header *cmd_hdr = &(tr->tr_command->cmd_hdr); struct twa_command_9k *cmd = &(tr->tr_command->command.cmd_pkt_9k); union ccb *ccb = (union ccb *)(tr->tr_private); u_int16_t error; u_int8_t *cdb; if (tr->tr_error) { if (tr->tr_error == EBUSY) ccb->ccb_h.status |= CAM_REQUEUE_REQ; else if (tr->tr_error == EFBIG) ccb->ccb_h.status = CAM_REQ_TOO_BIG; else ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else { if (cmd->status) { twa_dbg_dprint(1, sc, "req_id = 0x%x, status = 0x%x", cmd->request_id, cmd->status); error = cmd_hdr->status_block.error; if ((error == TWA_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) || (error == TWA_ERROR_UNIT_OFFLINE)) { twa_dbg_dprint(3, sc, "Unsupported unit. PTL = %x %x %x", ccb->ccb_h.path_id, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); ccb->ccb_h.status |= CAM_TID_INVALID; } else { twa_dbg_dprint(2, sc, "cmd = %x %x %x %x %x %x %x", cmd->command.opcode, cmd->command.reserved, cmd->unit, cmd->request_id, cmd->status, cmd->sgl_offset, cmd->sgl_entries); cdb = (u_int8_t *)(cmd->cdb); twa_dbg_dprint(2, sc, "cdb = %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x", cdb[0], cdb[1], cdb[2], cdb[3], cdb[4], cdb[5], cdb[6], cdb[7], cdb[8], cdb[9], cdb[10], cdb[11], cdb[12], cdb[13], cdb[14], cdb[15]); cmd_hdr->err_specific_desc[sizeof(cmd_hdr->err_specific_desc) - 1] = '\0'; /* * Print the error. Firmware doesn't yet support * the 'Mode Sense' cmd. Don't print if the cmd * is 'Mode Sense', and the error is 'Invalid field * in CDB'. */ if (! ((cdb[0] == 0x1A) && (error == 0x10D))) twa_printf(sc, "SCSI cmd = 0x%x: ERROR: (0x%02X: 0x%04X): %s: %s\n", cdb[0], TWA_MESSAGE_SOURCE_CONTROLLER_ERROR, error, twa_find_msg_string(twa_error_table, error), cmd_hdr->err_specific_desc); } bcopy(cmd_hdr->sense_data, &(ccb->csio.sense_data), TWA_SENSE_DATA_LENGTH); ccb->csio.sense_len = TWA_SENSE_DATA_LENGTH; ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; } else ccb->ccb_h.status = CAM_REQ_CMP; ccb->csio.scsi_status = cmd->status; /* If simq is frozen, unfreeze it. */ if (sc->twa_state & TWA_STATE_SIMQ_FROZEN) twa_allow_new_requests(sc, (void *)ccb); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); }
/* * Function name: twa_action * Description: Driver entry point for CAM's use. * * Input: sim -- sim corresponding to the ctlr * ccb -- ptr to CAM request * Output: None * Return value: None */ void twa_action(struct cam_sim *sim, union ccb *ccb) { struct twa_softc *sc = (struct twa_softc *)cam_sim_softc(sim); struct ccb_hdr *ccb_h = &(ccb->ccb_h); switch (ccb_h->func_code) { case XPT_SCSI_IO: /* SCSI I/O */ { struct twa_request *tr; if ((sc->twa_state & TWA_STATE_SIMQ_FROZEN) || ((tr = twa_get_request(sc)) == NULL)) { twa_dbg_dprint(2, sc, "simq frozen/Cannot get request pkt."); /* * Freeze the simq to maintain ccb ordering. The next * ccb that gets completed will unfreeze the simq. */ twa_disallow_new_requests(sc); ccb_h->status |= CAM_REQUEUE_REQ; xpt_done(ccb); break; } tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_EXTERNAL; tr->tr_private = ccb; tr->tr_callback = twa_complete_io; if (twa_execute_scsi(tr, ccb)) twa_release_request(tr); break; } case XPT_ABORT: twa_dbg_dprint(2, sc, "Abort request"); ccb_h->status = CAM_UA_ABORT; xpt_done(ccb); break; case XPT_RESET_BUS: twa_printf(sc, "Reset Bus request from CAM...\n"); if (twa_reset(sc)) { twa_printf(sc, "Reset Bus failed!\n"); ccb_h->status = CAM_REQ_CMP_ERR; } else ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); break; case XPT_SET_TRAN_SETTINGS: twa_dbg_dprint(3, sc, "XPT_SET_TRAN_SETTINGS"); /* * This command is not supported, since it's very specific * to SCSI, and we are doing ATA. */ ccb_h->status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); break; case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; twa_dbg_dprint(3, sc, "XPT_GET_TRAN_SETTINGS"); cts->valid = (CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID); cts->flags &= ~(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB); ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *geom; twa_dbg_dprint(3, sc, "XPT_CALC_GEOMETRY request"); geom = &ccb->ccg; if (geom->volume_size > 0x200000) /* 1 GB */ { geom->heads = 255; geom->secs_per_track = 63; } else { geom->heads = 64; geom->secs_per_track = 32; } geom->cylinders = geom->volume_size / (geom->heads * geom->secs_per_track); ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path inquiry -- get twa properties */ { struct ccb_pathinq *path_inq = &ccb->cpi; twa_dbg_dprint(3, sc, "XPT_PATH_INQ request"); path_inq->version_num = 1; path_inq->hba_inquiry = 0; path_inq->target_sprt = 0; path_inq->hba_misc = 0; path_inq->hba_eng_cnt = 0; path_inq->max_target = TWA_MAX_UNITS; path_inq->max_lun = 0; path_inq->unit_number = cam_sim_unit(sim); path_inq->bus_id = cam_sim_bus(sim); path_inq->initiator_id = 12; path_inq->base_transfer_speed = 100000; strncpy(path_inq->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(path_inq->hba_vid, "3ware", HBA_IDLEN); strncpy(path_inq->dev_name, cam_sim_name(sim), DEV_IDLEN); ccb_h->status = CAM_REQ_CMP; xpt_done(ccb); break; } default: twa_dbg_dprint(3, sc, "func_code = %x", ccb_h->func_code); ccb_h->status = CAM_REQ_INVALID; xpt_done(ccb); break; } }
/* * Function name: twa_execute_scsi * Description: Build a fw cmd, based on a CAM style ccb, and * send it down. * * Input: tr -- ptr to request pkt * ccb -- ptr to CAM style ccb * Output: None * Return value: 0 -- success * non-zero-- failure */ int twa_execute_scsi(struct twa_request *tr, union ccb *ccb) { struct twa_softc *sc = tr->tr_sc; struct twa_command_packet *cmdpkt; struct twa_command_9k *cmd9k; struct ccb_hdr *ccb_h = &(ccb->ccb_h); struct ccb_scsiio *csio = &(ccb->csio); int error; twa_dbg_dprint(3, sc, "SCSI I/O request 0x%x", csio->cdb_io.cdb_bytes[0]); if (ccb_h->target_id >= TWA_MAX_UNITS) { twa_dbg_dprint(3, sc, "Invalid target. PTL = %x %x %x", ccb_h->path_id, ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_TID_INVALID; if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_EXTERNAL) xpt_done(ccb); return(1); } if (ccb_h->target_lun != 0) { twa_dbg_dprint(3, sc, "Invalid lun. PTL = %x %x %x", ccb_h->path_id, ccb_h->target_id, ccb_h->target_lun); ccb_h->status |= CAM_LUN_INVALID; if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_EXTERNAL) xpt_done(ccb); return(1); } if(ccb_h->flags & CAM_CDB_PHYS) { twa_printf(sc, "Physical CDB address!\n"); ccb_h->status = CAM_REQ_CMP_ERR; if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_EXTERNAL) xpt_done(ccb); return(1); } /* * We are going to work on this request. Mark it as enqueued (though * we don't actually queue it...) */ ccb_h->status |= CAM_SIM_QUEUED; if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if(ccb_h->flags & CAM_DIR_IN) tr->tr_flags |= TWA_CMD_DATA_IN; else tr->tr_flags |= TWA_CMD_DATA_OUT; } cmdpkt = tr->tr_command; cmdpkt->cmd_hdr.header_desc.size_header = 128; cmd9k = &(cmdpkt->command.cmd_pkt_9k); cmd9k->command.opcode = TWA_OP_EXECUTE_SCSI_COMMAND; cmd9k->unit = ccb_h->target_id; cmd9k->request_id = tr->tr_request_id; cmd9k->status = 0; cmd9k->sgl_offset = 16; /* offset from end of hdr = max cdb len */ if(ccb_h->flags & CAM_CDB_POINTER) bcopy(csio->cdb_io.cdb_ptr, cmd9k->cdb, csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, cmd9k->cdb, csio->cdb_len); if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Virtual data addresses. Need to convert them... */ twa_dbg_dprint(3, sc, "XPT_SCSI_IO: Single virtual address!"); if (!(ccb_h->flags & CAM_SCATTER_VALID)) { if (csio->dxfer_len > TWA_MAX_IO_SIZE) { twa_printf(sc, "I/O size %d too big.\n", csio->dxfer_len); ccb_h->status = CAM_REQ_TOO_BIG; if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_EXTERNAL) xpt_done(ccb); return(1); } if ((tr->tr_length = csio->dxfer_len)) { tr->tr_data = csio->data_ptr; cmd9k->sgl_entries = 1; } } else { twa_printf(sc, "twa_execute_scsi: XPT_SCSI_IO: Got SGList!\n"); ccb_h->status = CAM_REQ_CMP_ERR; if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_EXTERNAL) { xpt_done(ccb); } return(1); } } else { /* Data addresses are physical. */ twa_printf(sc, "twa_execute_scsi: XPT_SCSI_IO: Physical data addresses!\n"); ccb_h->status = CAM_REQ_CMP_ERR; if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_EXTERNAL) { ccb_h->status |= CAM_RELEASE_SIMQ; ccb_h->status &= ~CAM_SIM_QUEUED; xpt_done(ccb); } return(1); } tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_9K; /* twa_setup_data_dmamap will fill in the SGL, and submit the I/O. */ error = twa_map_request(tr); return(error); }
/* * Function name: twa_map_request * Description: Maps a cmd pkt and data associated with it, into * DMA'able memory. * * Input: tr -- ptr to request pkt * Output: None * Return value: 0 -- success * non-zero-- failure */ int twa_map_request(struct twa_request *tr) { struct twa_softc *sc = tr->tr_sc; int error = 0; twa_dbg_dprint_enter(10, sc); /* If the command involves data, map that too. */ if (tr->tr_data != NULL) { /* * It's sufficient for the data pointer to be 4-byte aligned * to work with 9000. However, if 4-byte aligned addresses * are passed to bus_dmamap_load, we can get back sg elements * that are not 512-byte multiples in size. So, we will let * only those buffers that are 512-byte aligned to pass * through, and bounce the rest, so as to make sure that we * always get back sg elements that are 512-byte multiples * in size. */ if (((vm_offset_t)tr->tr_data % 512) || (tr->tr_length % 512)) { tr->tr_flags |= TWA_CMD_DATA_COPY_NEEDED; tr->tr_real_data = tr->tr_data; /* save original data pointer */ tr->tr_real_length = tr->tr_length; /* save original data length */ tr->tr_length = (tr->tr_length + 511) & ~511; tr->tr_data = malloc(tr->tr_length, TWA_MALLOC_CLASS, M_NOWAIT); if (tr->tr_data == NULL) { twa_printf(sc, "%s: malloc failed\n", __func__); tr->tr_data = tr->tr_real_data; /* restore original data pointer */ tr->tr_length = tr->tr_real_length; /* restore original data length */ return(ENOMEM); } } /* * Map the data buffer into bus space and build the s/g list. */ if ((error = bus_dmamap_load(sc->twa_dma_tag, tr->tr_dma_map, tr->tr_data, tr->tr_length, twa_setup_data_dmamap, tr, BUS_DMA_WAITOK))) { if (error == EINPROGRESS) { tr->tr_flags |= TWA_CMD_IN_PROGRESS; if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_EXTERNAL) twa_disallow_new_requests(sc); error = 0; } else { /* Free alignment buffer if it was used. */ if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) { free(tr->tr_data, TWA_MALLOC_CLASS); tr->tr_data = tr->tr_real_data; /* restore 'real' data pointer */ tr->tr_length = tr->tr_real_length;/* restore 'real' data length */ } } } else error = tr->tr_error; } else if ((error = twa_submit_io(tr))) twa_unmap_request(tr); return(error); }
/* * Function name: twa_alloc_req_pkts * Description: Allocates memory for, and initializes request pkts, * and queues them in the free queue. * * Input: sc -- ptr to per ctlr structure * num_reqs-- # of request pkts to allocate and initialize. * Output: None * Return value: 0 -- success * non-zero-- failure */ int twa_alloc_req_pkts(struct twa_softc *sc, int num_reqs) { struct twa_request *tr; int i; if ((sc->twa_req_buf = malloc(num_reqs * sizeof(struct twa_request), TWA_MALLOC_CLASS, M_NOWAIT)) == NULL) return(ENOMEM); /* Allocate the bus DMA tag appropriate for PCI. */ if (bus_dma_tag_create(NULL, /* parent */ TWA_ALIGNMENT, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ TWA_Q_LENGTH * (sizeof(struct twa_command_packet)),/* maxsize */ TWA_MAX_SG_ELEMENTS, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &Giant, /* lockfuncarg */ &sc->twa_dma_tag /* tag */)) { twa_printf(sc, "Can't allocate DMA tag.\n"); return(ENOMEM); } /* Allocate memory for cmd pkts. */ if (bus_dmamem_alloc(sc->twa_dma_tag, (void *)(&(sc->twa_cmd_pkt_buf)), BUS_DMA_WAITOK, &(sc->twa_cmd_map))) return(ENOMEM); bus_dmamap_load(sc->twa_dma_tag, sc->twa_cmd_map, sc->twa_cmd_pkt_buf, num_reqs * sizeof(struct twa_command_packet), twa_setup_request_dmamap, sc, 0); bzero(sc->twa_req_buf, num_reqs * sizeof(struct twa_request)); bzero(sc->twa_cmd_pkt_buf, num_reqs * sizeof(struct twa_command_packet)); for (i = 0; i < num_reqs; i++) { tr = &(sc->twa_req_buf[i]); tr->tr_command = &(sc->twa_cmd_pkt_buf[i]); tr->tr_cmd_phys = sc->twa_cmd_pkt_phys + (i * sizeof(struct twa_command_packet)); tr->tr_request_id = i; tr->tr_sc = sc; sc->twa_lookup[i] = tr; /* * Create a map for data buffers. maxsize (256 * 1024) used in * bus_dma_tag_create above should suffice the bounce page needs * for data buffers, since the max I/O size we support is 128KB. * If we supported I/O's bigger than 256KB, we would have to * create a second dma_tag, with the appropriate maxsize. */ if (bus_dmamap_create(sc->twa_dma_tag, 0, &tr->tr_dma_map)) return(ENOMEM); /* Insert request into the free queue. */ twa_release_request(tr); } return(0); }
/* * Function name: twa_attach * Description: Allocates pci resources; updates sc; adds a node to the * sysctl tree to expose the driver version; makes calls * to initialize ctlr, and to attach to CAM. * * Input: dev -- bus device corresponding to the ctlr * Output: None * Return value: 0 -- success * non-zero-- failure */ static int twa_attach(device_t dev) { struct twa_softc *sc = device_get_softc(dev); u_int32_t command; int res_id; int error; twa_dbg_dprint_enter(3, sc); /* Initialize the softc structure. */ sc->twa_bus_dev = dev; sysctl_ctx_init(&sc->twa_sysctl_ctx); sc->twa_sysctl_tree = SYSCTL_ADD_NODE(&sc->twa_sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, ""); if (sc->twa_sysctl_tree == NULL) { twa_printf(sc, "Cannot add sysctl tree node.\n"); return(ENXIO); } SYSCTL_ADD_STRING(&sc->twa_sysctl_ctx, SYSCTL_CHILDREN(sc->twa_sysctl_tree), OID_AUTO, "driver_version", CTLFLAG_RD, TWA_DRIVER_VERSION_STRING, 0, "TWA driver version"); /* Make sure we are going to be able to talk to this board. */ command = pci_read_config(dev, PCIR_COMMAND, 2); if ((command & PCIM_CMD_PORTEN) == 0) { twa_printf(sc, "Register window not available.\n"); return(ENXIO); } /* Force the busmaster enable bit on, in case the BIOS forgot. */ command |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, command, 2); /* Allocate the PCI register window. */ res_id = TWA_IO_CONFIG_REG; if ((sc->twa_io_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &res_id, 0, ~0, 1, RF_ACTIVE)) == NULL) { twa_printf(sc, "can't allocate register window.\n"); twa_free(sc); return(ENXIO); } sc->twa_bus_tag = rman_get_bustag(sc->twa_io_res); sc->twa_bus_handle = rman_get_bushandle(sc->twa_io_res); /* Allocate and connect our interrupt. */ res_id = 0; if ((sc->twa_irq_res = bus_alloc_resource(sc->twa_bus_dev, SYS_RES_IRQ, &res_id, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { twa_printf(sc, "Can't allocate interrupt.\n"); twa_free(sc); return(ENXIO); } if (bus_setup_intr(sc->twa_bus_dev, sc->twa_irq_res, INTR_TYPE_CAM, twa_pci_intr, sc, &sc->twa_intr_handle)) { twa_printf(sc, "Can't set up interrupt.\n"); twa_free(sc); return(ENXIO); } /* Initialize the driver for this controller. */ if ((error = twa_setup(sc))) { twa_free(sc); return(error); } /* Print some information about the controller and configuration. */ twa_describe_controller(sc); /* Create the control device. */ sc->twa_ctrl_dev = make_dev(&twa_cdevsw, device_get_unit(sc->twa_bus_dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "twa%d", device_get_unit(sc->twa_bus_dev)); sc->twa_ctrl_dev->si_drv1 = sc; /* * Schedule ourselves to bring the controller up once interrupts are * available. This isn't strictly necessary, since we disable * interrupts while probing the controller, but it is more in keeping * with common practice for other disk devices. */ sc->twa_ich.ich_func = twa_intrhook; sc->twa_ich.ich_arg = sc; if (config_intrhook_establish(&sc->twa_ich) != 0) { twa_printf(sc, "Can't establish configuration hook.\n"); twa_free(sc); return(ENXIO); } if ((error = twa_cam_setup(sc))) { twa_free(sc); return(error); } return(0); }