Пример #1
0
/*
 * Function name:	twa_drain_busy_queue
 * Description:		This function gets called after a controller reset.
 *			It errors back to CAM, all those requests that were
 *			pending with the firmware, at the time of the reset.
 *
 * Input:		sc	-- ptr to per ctlr structure
 * Output:		None
 * Return value:	None
 */
void
twa_drain_busy_queue(struct twa_softc *sc)
{
	struct twa_request	*tr;
	union ccb		*ccb;

	/* Walk the busy queue. */
	while ((tr = twa_dequeue_busy(sc))) {
		twa_unmap_request(tr);
		if ((tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_INTERNAL) ||
			(tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_IOCTL)) {
			/* It's an internal/ioctl request.  Simply free it. */
			if (tr->tr_data)
				free(tr->tr_data, M_DEVBUF);
		} else {
			if ((ccb = tr->tr_private)) {
				/* It's a SCSI request.  Complete it. */
				ccb->ccb_h.status = CAM_SCSI_BUS_RESET |
							CAM_RELEASE_SIMQ;
				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
				xpt_done(ccb);
			}
		}
		twa_release_request(tr);
	}
}
Пример #2
0
static int
ld_twa_flush(struct ld_softc *ld, int flags)
{
	int s, rv = 0;
	struct twa_request *tr;
	struct twa_softc *twa = device_private(device_parent(ld->sc_dv));
	struct ld_twa_softc *sc = (void *)ld;
	struct twa_command_generic *generic_cmd;

	/* Get a request packet. */
	tr = twa_get_request_wait(twa, 0);
	KASSERT(tr != NULL);

	tr->tr_cmd_pkt_type =
		(TWA_CMD_PKT_TYPE_9K | TWA_CMD_PKT_TYPE_EXTERNAL);

	tr->tr_callback = twa_request_wait_handler;
	tr->tr_ld_sc = sc;

	tr->tr_command->cmd_hdr.header_desc.size_header = 128;

	generic_cmd = &(tr->tr_command->command.cmd_pkt_7k.generic);
	generic_cmd->opcode = TWA_OP_FLUSH;
	generic_cmd->size = 2;
	generic_cmd->unit = sc->sc_hwunit;
	generic_cmd->request_id = tr->tr_request_id;
	generic_cmd->sgl_offset = 0;
	generic_cmd->host_id = 0;
	generic_cmd->status = 0;
	generic_cmd->flags = 0;
	generic_cmd->count = 0;
	rv = twa_map_request(tr);
	s = splbio();
	while (tr->tr_status != TWA_CMD_COMPLETE)
		if ((rv = tsleep(tr, PRIBIO, "twaflush", 60 * hz)) != 0)
			break;
	twa_release_request(tr);
	splx(s);

	return (rv);
}
Пример #3
0
static void
ld_twa_handler(struct twa_request *tr)
{
	uint8_t	status;
	struct buf *bp;
	struct ld_twa_softc *sc;

	bp = tr->bp;
	sc = (struct ld_twa_softc *)tr->tr_ld_sc;

	status = tr->tr_command->command.cmd_pkt_9k.status;

	if (status != 0) {
		bp->b_error = EIO;
		bp->b_resid = bp->b_bcount;
	} else {
		bp->b_resid = 0;
		bp->b_error = 0;
	}
	twa_release_request(tr);

	lddone(&sc->sc_ld, bp);
}
Пример #4
0
/*
 * Function name:	twa_action
 * Description:		Driver entry point for CAM's use.
 *
 * Input:		sim	-- sim corresponding to the ctlr
 *			ccb	-- ptr to CAM request
 * Output:		None
 * Return value:	None
 */
void
twa_action(struct cam_sim *sim, union ccb *ccb)
{
	struct twa_softc	*sc = (struct twa_softc *)cam_sim_softc(sim);
	struct ccb_hdr		*ccb_h = &(ccb->ccb_h);

	switch (ccb_h->func_code) {
	case XPT_SCSI_IO:	/* SCSI I/O */
	{
		struct twa_request	*tr;

		if ((sc->twa_state & TWA_STATE_SIMQ_FROZEN) ||
				((tr = twa_get_request(sc)) == NULL)) {
			twa_dbg_dprint(2, sc, "simq frozen/Cannot get request pkt.");
			/*
			 * Freeze the simq to maintain ccb ordering.  The next
			 * ccb that gets completed will unfreeze the simq.
			 */
			twa_disallow_new_requests(sc);
			ccb_h->status |= CAM_REQUEUE_REQ;
			xpt_done(ccb);
			break;
		}
		tr->tr_cmd_pkt_type |= TWA_CMD_PKT_TYPE_EXTERNAL;
		tr->tr_private = ccb;
		tr->tr_callback = twa_complete_io;
		if (twa_execute_scsi(tr, ccb))
			twa_release_request(tr);
		break;
	}

	case XPT_ABORT:
		twa_dbg_dprint(2, sc, "Abort request");
		ccb_h->status = CAM_UA_ABORT;
		xpt_done(ccb);
		break;

	case XPT_RESET_BUS:
		twa_printf(sc, "Reset Bus request from CAM...\n");
		if (twa_reset(sc)) {
			twa_printf(sc, "Reset Bus failed!\n");
			ccb_h->status = CAM_REQ_CMP_ERR;
		}
		else
			ccb_h->status = CAM_REQ_CMP;

		xpt_done(ccb);
		break;

	case XPT_SET_TRAN_SETTINGS:
		twa_dbg_dprint(3, sc, "XPT_SET_TRAN_SETTINGS");

		/*
		 * This command is not supported, since it's very specific
		 * to SCSI, and we are doing ATA.
		 */
  		ccb_h->status = CAM_FUNC_NOTAVAIL;
  		xpt_done(ccb);
  		break;

	case XPT_GET_TRAN_SETTINGS: 
	{
		struct ccb_trans_settings	*cts = &ccb->cts;

		twa_dbg_dprint(3, sc, "XPT_GET_TRAN_SETTINGS");
		cts->valid = (CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID);
		cts->flags &= ~(CCB_TRANS_DISC_ENB | CCB_TRANS_TAG_ENB);
		ccb_h->status = CAM_REQ_CMP;
		xpt_done(ccb);
		break;
	}

	case XPT_CALC_GEOMETRY:
	{
		struct ccb_calc_geometry	*geom;

		twa_dbg_dprint(3, sc, "XPT_CALC_GEOMETRY request");
		geom = &ccb->ccg;

		if (geom->volume_size > 0x200000) /* 1 GB */ {
			geom->heads = 255;
			geom->secs_per_track = 63;
		} else {
			geom->heads = 64;
			geom->secs_per_track = 32;
		}
		geom->cylinders = geom->volume_size /
					(geom->heads * geom->secs_per_track);
		ccb_h->status = CAM_REQ_CMP;
		xpt_done(ccb);
		break;
	}

	case XPT_PATH_INQ:    /* Path inquiry -- get twa properties */
	{
		struct ccb_pathinq	*path_inq = &ccb->cpi;

		twa_dbg_dprint(3, sc, "XPT_PATH_INQ request");

		path_inq->version_num = 1;
		path_inq->hba_inquiry = 0;
		path_inq->target_sprt = 0;
		path_inq->hba_misc = 0;
		path_inq->hba_eng_cnt = 0;
		path_inq->max_target = TWA_MAX_UNITS;
		path_inq->max_lun = 0;
		path_inq->unit_number = cam_sim_unit(sim);
		path_inq->bus_id = cam_sim_bus(sim);
		path_inq->initiator_id = 12;
		path_inq->base_transfer_speed = 100000;
		strncpy(path_inq->sim_vid, "FreeBSD", SIM_IDLEN);
		strncpy(path_inq->hba_vid, "3ware", HBA_IDLEN);
		strncpy(path_inq->dev_name, cam_sim_name(sim), DEV_IDLEN);
		ccb_h->status = CAM_REQ_CMP;
		xpt_done(ccb);
		break;
	}

	default:
		twa_dbg_dprint(3, sc, "func_code = %x", ccb_h->func_code);
		ccb_h->status = CAM_REQ_INVALID;
		xpt_done(ccb);
		break;
	}
}
Пример #5
0
/*
 * Function name:	twa_setup_data_dmamap
 * Description:		Callback of bus_dmamap_load for the buffer associated
 *			with data.  Updates the cmd pkt (size/sgl_entries
 *			fields, as applicable) to reflect the number of sg
 *			elements.
 *
 * Input:		arg	-- ptr to request pkt
 *			segs	-- ptr to a list of segment descriptors
 *			nsegments--# of segments
 *			error	-- 0 if no errors encountered before callback,
 *				   non-zero if errors were encountered
 * Output:		None
 * Return value:	None
 */
static void
twa_setup_data_dmamap(void *arg, bus_dma_segment_t *segs,
					int nsegments, int error)
{
	struct twa_request		*tr = (struct twa_request *)arg;
	struct twa_command_packet	*cmdpkt = tr->tr_command;
	struct twa_command_9k		*cmd9k;
	union twa_command_7k		*cmd7k;
	u_int8_t			sgl_offset;

	twa_dbg_dprint_enter(10, tr->tr_sc);

	if ((tr->tr_flags & TWA_CMD_IN_PROGRESS) &&
			(tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_EXTERNAL))
		twa_allow_new_requests(tr->tr_sc, (void *)(tr->tr_private));

	if (error == EFBIG) {
		tr->tr_error = error;
		goto out;
	}

	if (tr->tr_cmd_pkt_type & TWA_CMD_PKT_TYPE_9K) {
		cmd9k = &(cmdpkt->command.cmd_pkt_9k);
		twa_fillin_sgl(&(cmd9k->sg_list[0]), segs, nsegments);
		cmd9k->sgl_entries += nsegments - 1;
	} else {
		/* It's a 7000 command packet. */
		cmd7k = &(cmdpkt->command.cmd_pkt_7k);
		if ((sgl_offset = cmdpkt->command.cmd_pkt_7k.generic.sgl_offset))
			twa_fillin_sgl((struct twa_sg *)
					(((u_int32_t *)cmd7k) + sgl_offset),
					segs, nsegments);
		/* Modify the size field, based on sg address size. */
		cmd7k->generic.size += 
				((TWA_64BIT_ADDRESSES ? 3 : 2) * nsegments);
	}

	if (tr->tr_flags & TWA_CMD_DATA_IN)
		bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
							BUS_DMASYNC_PREREAD);
	if (tr->tr_flags & TWA_CMD_DATA_OUT) {
		/* 
		 * If we're using an alignment buffer, and we're
		 * writing data, copy the real data out.
		 */
		if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED)
			bcopy(tr->tr_real_data, tr->tr_data, tr->tr_real_length);
		bus_dmamap_sync(tr->tr_sc->twa_dma_tag, tr->tr_dma_map,
						BUS_DMASYNC_PREWRITE);
	}
	error = twa_submit_io(tr);

out:
	if (error) {
		twa_unmap_request(tr);
		/*
		 * If the caller had been returned EINPROGRESS, and he has
		 * registered a callback for handling completion, the callback
		 * will never get called because we were unable to submit the
		 * request.  So, free up the request right here.
		 */
		if ((tr->tr_flags & TWA_CMD_IN_PROGRESS) && (tr->tr_callback))
			twa_release_request(tr);
	}
}
Пример #6
0
/*
 * Function name:	twa_alloc_req_pkts
 * Description:		Allocates memory for, and initializes request pkts,
 *			and queues them in the free queue.
 *
 * Input:		sc	-- ptr to per ctlr structure
 *			num_reqs-- # of request pkts to allocate and initialize.
 * Output:		None
 * Return value:	0	-- success
 *			non-zero-- failure
 */
int
twa_alloc_req_pkts(struct twa_softc *sc, int num_reqs)
{
	struct twa_request	*tr;
	int			i;

	if ((sc->twa_req_buf = malloc(num_reqs * sizeof(struct twa_request),
					TWA_MALLOC_CLASS, M_NOWAIT)) == NULL)
		return(ENOMEM);

	/* Allocate the bus DMA tag appropriate for PCI. */
	if (bus_dma_tag_create(NULL,			/* parent */
				TWA_ALIGNMENT,		/* alignment */
				0,			/* boundary */
				BUS_SPACE_MAXADDR,	/* lowaddr */
				BUS_SPACE_MAXADDR, 	/* highaddr */
				NULL, NULL, 		/* filter, filterarg */
				TWA_Q_LENGTH *
				(sizeof(struct twa_command_packet)),/* maxsize */
				TWA_MAX_SG_ELEMENTS,	/* nsegments */
				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
				BUS_DMA_ALLOCNOW,	/* flags */
				busdma_lock_mutex,	/* lockfunc */
				&Giant,			/* lockfuncarg */
				&sc->twa_dma_tag	/* tag */)) {
		twa_printf(sc, "Can't allocate DMA tag.\n");
		return(ENOMEM);
	}

	/* Allocate memory for cmd pkts. */
	if (bus_dmamem_alloc(sc->twa_dma_tag,
				(void *)(&(sc->twa_cmd_pkt_buf)),
				BUS_DMA_WAITOK, &(sc->twa_cmd_map)))
		return(ENOMEM);

	bus_dmamap_load(sc->twa_dma_tag, sc->twa_cmd_map,
				sc->twa_cmd_pkt_buf,
				num_reqs * sizeof(struct twa_command_packet),
				twa_setup_request_dmamap, sc, 0);
	bzero(sc->twa_req_buf, num_reqs * sizeof(struct twa_request));
	bzero(sc->twa_cmd_pkt_buf,
			num_reqs * sizeof(struct twa_command_packet));

	for (i = 0; i < num_reqs; i++) {
		tr = &(sc->twa_req_buf[i]);
		tr->tr_command = &(sc->twa_cmd_pkt_buf[i]);
		tr->tr_cmd_phys = sc->twa_cmd_pkt_phys +
					(i * sizeof(struct twa_command_packet));
		tr->tr_request_id = i;
		tr->tr_sc = sc;
		sc->twa_lookup[i] = tr;

		/*
		 * Create a map for data buffers.  maxsize (256 * 1024) used in
		 * bus_dma_tag_create above should suffice the bounce page needs
		 * for data buffers, since the max I/O size we support is 128KB.
		 * If we supported I/O's bigger than 256KB, we would have to
		 * create a second dma_tag, with the appropriate maxsize.
		 */
		if (bus_dmamap_create(sc->twa_dma_tag, 0,
						&tr->tr_dma_map))
			return(ENOMEM);

		/* Insert request into the free queue. */
		twa_release_request(tr);
	}
	return(0);
}