Пример #1
0
static void
pst_start(struct pst_softc *psc)
{
    struct pst_request *request;
    struct bio *bp;
    u_int32_t mfa;

    if (psc->iop->outstanding < (I2O_IOP_OUTBOUND_FRAME_COUNT - 1) &&
	(bp = bioq_first(&psc->queue))) {
	if ((mfa = iop_get_mfa(psc->iop)) != 0xffffffff) {
	    bioq_remove(&psc->queue, bp);
	    if (!(request = malloc(sizeof(struct pst_request),
				   M_PSTRAID, M_NOWAIT | M_ZERO))) {
		printf("pst: out of memory in start\n");
		biofinish(request->bp, NULL, ENOMEM);
		iop_free_mfa(psc->iop, mfa);
		return;
	    }
	    psc->iop->outstanding++;
	    request->psc = psc;
	    request->mfa = mfa;
	    request->bp = bp;
	    if (pst_rw(request)) {
		biofinish(request->bp, NULL, EIO);
		iop_free_mfa(request->psc->iop, request->mfa);
		psc->iop->outstanding--;
		free(request, M_PSTRAID);
	    }
	}
    }
}
Пример #2
0
void
bioq_flush(struct bio_queue_head *head, struct devstat *stp, int error)
{
	struct bio *bp;

	while ((bp = bioq_takefirst(head)) != NULL)
		biofinish(bp, stp, error);
}
Пример #3
0
/*
 * Actually translate the requested transfer into one the physical driver
 * can understand.  The transfer is described by a buf and will include
 * only one physical transfer.
 */
static void
ptstrategy(struct bio *bp)
{
	struct cam_periph *periph;
	struct pt_softc *softc;
	int    s;
	
	periph = (struct cam_periph *)bp->bio_dev->si_drv1;
	bp->bio_resid = bp->bio_bcount;
	if (periph == NULL) {
		biofinish(bp, NULL, ENXIO);
		return;
	}
	softc = (struct pt_softc *)periph->softc;

	/*
	 * Mask interrupts so that the pack cannot be invalidated until
	 * after we are in the queue.  Otherwise, we might not properly
	 * clean up one of the buffers.
	 */
	s = splbio();
	
	/*
	 * If the device has been made invalid, error out
	 */
	if ((softc->flags & PT_FLAG_DEVICE_INVALID)) {
		splx(s);
		biofinish(bp, NULL, ENXIO);
		return;
	}
	
	/*
	 * Place it in the queue of disk activities for this disk
	 */
	bioq_insert_tail(&softc->bio_queue, bp);

	splx(s);
	
	/*
	 * Schedule ourselves for performing the work.
	 */
	xpt_schedule(periph, /* XXX priority */1);

	return;
}
Пример #4
0
/*
 * Actually translate the requested transfer into one the physical driver
 * can understand.  The transfer is described by a buf and will include
 * only one physical transfer.
 */
static void
adastrategy(struct bio *bp)
{
	struct cam_periph *periph;
	struct ada_softc *softc;
	
	periph = (struct cam_periph *)bp->bio_disk->d_drv1;
	if (periph == NULL) {
		biofinish(bp, NULL, ENXIO);
		return;
	}
	softc = (struct ada_softc *)periph->softc;

	cam_periph_lock(periph);

	/*
	 * If the device has been made invalid, error out
	 */
	if ((softc->flags & ADA_FLAG_PACK_INVALID)) {
		cam_periph_unlock(periph);
		biofinish(bp, NULL, ENXIO);
		return;
	}
	
	/*
	 * Place it in the queue of disk activities for this disk
	 */
	if (bp->bio_cmd == BIO_DELETE &&
	    (softc->flags & ADA_FLAG_CAN_TRIM))
		bioq_disksort(&softc->trim_queue, bp);
	else
		bioq_disksort(&softc->bio_queue, bp);

	/*
	 * Schedule ourselves for performing the work.
	 */
	adaschedule(periph);
	cam_periph_unlock(periph);

	return;
}
Пример #5
0
static void
pst_timeout(struct pst_request *request)
{
    printf("pst: timeout mfa=0x%08x cmd=0x%02x\n",
	   request->mfa, request->bp->bio_cmd);
    mtx_lock(&request->psc->iop->mtx);
    iop_free_mfa(request->psc->iop, request->mfa);
    if ((request->mfa = iop_get_mfa(request->psc->iop)) == 0xffffffff) {
	printf("pst: timeout no mfa possible\n");
	biofinish(request->bp, NULL, EIO);
	request->psc->iop->outstanding--;
	mtx_unlock(&request->psc->iop->mtx);
	return;
    }
    if (pst_rw(request)) {
	iop_free_mfa(request->psc->iop, request->mfa);
	biofinish(request->bp, NULL, EIO);
	request->psc->iop->outstanding--;
    }
    mtx_unlock(&request->psc->iop->mtx);
}
Пример #6
0
static void
pst_timeout(void *arg)
{
    struct pst_request *request;

    request = arg;
    printf("pst: timeout mfa=0x%08x cmd=0x%02x\n",
	   request->mfa, request->bp->bio_cmd);
    mtx_assert(&request->psc->iop->mtx, MA_OWNED);
    iop_free_mfa(request->psc->iop, request->mfa);
    if ((request->mfa = iop_get_mfa(request->psc->iop)) == 0xffffffff) {
	printf("pst: timeout no mfa possible\n");
	biofinish(request->bp, NULL, EIO);
	request->psc->iop->outstanding--;
	return;
    }
    if (pst_rw(request)) {
	iop_free_mfa(request->psc->iop, request->mfa);
	biofinish(request->bp, NULL, EIO);
	request->psc->iop->outstanding--;
    }
}
Пример #7
0
/*
 * Actually translate the requested transfer into one the physical driver
 * can understand.  The transfer is described by a buf and will include
 * only one physical transfer.
 */
static void
ptstrategy(struct bio *bp)
{
	struct cam_periph *periph;
	struct pt_softc *softc;
	
	periph = (struct cam_periph *)bp->bio_dev->si_drv1;
	bp->bio_resid = bp->bio_bcount;
	if (periph == NULL) {
		biofinish(bp, NULL, ENXIO);
		return;
	}
	cam_periph_lock(periph);
	softc = (struct pt_softc *)periph->softc;

	/*
	 * If the device has been made invalid, error out
	 */
	if ((softc->flags & PT_FLAG_DEVICE_INVALID)) {
		cam_periph_unlock(periph);
		biofinish(bp, NULL, ENXIO);
		return;
	}
	
	/*
	 * Place it in the queue of disk activities for this disk
	 */
	bioq_insert_tail(&softc->bio_queue, bp);

	/*
	 * Schedule ourselves for performing the work.
	 */
	xpt_schedule(periph, /* XXX priority */1);
	cam_periph_unlock(periph);

	return;
}
Пример #8
0
static void
pst_done(struct iop_softc *sc, u_int32_t mfa, struct i2o_single_reply *reply)
{
    struct pst_request *request =
	(struct pst_request *)reply->transaction_context;
    struct pst_softc *psc = request->psc;

    untimeout((timeout_t *)pst_timeout, request, request->timeout_handle);
    request->bp->bio_resid = request->bp->bio_bcount - reply->donecount;
    biofinish(request->bp, NULL, reply->status ? EIO : 0);
    free(request, M_PSTRAID);
    psc->iop->reg->oqueue = mfa;
    psc->iop->outstanding--;
    pst_start(psc);
}
Пример #9
0
static void
htif_blk_strategy(struct bio *bp)
{
	struct htif_blk_softc *sc;

	sc = bp->bio_disk->d_drv1;

	HTIF_BLK_LOCK(sc);
	if (sc->running > 0) {
		bioq_disksort(&sc->bio_queue, bp);
		HTIF_BLK_UNLOCK(sc);
		wakeup(sc);
	} else {
		HTIF_BLK_UNLOCK(sc);
		biofinish(bp, NULL, ENXIO);
	}
}
Пример #10
0
static void
ptoninvalidate(struct cam_periph *periph)
{
	int s;
	struct pt_softc *softc;
	struct bio *q_bp;
	struct ccb_setasync csa;

	softc = (struct pt_softc *)periph->softc;

	/*
	 * De-register any async callbacks.
	 */
	xpt_setup_ccb(&csa.ccb_h, periph->path,
		      /* priority */ 5);
	csa.ccb_h.func_code = XPT_SASYNC_CB;
	csa.event_enable = 0;
	csa.callback = ptasync;
	csa.callback_arg = periph;
	xpt_action((union ccb *)&csa);

	softc->flags |= PT_FLAG_DEVICE_INVALID;

	/*
	 * Although the oninvalidate() routines are always called at
	 * splsoftcam, we need to be at splbio() here to keep the buffer
	 * queue from being modified while we traverse it.
	 */
	s = splbio();

	/*
	 * Return all queued I/O with ENXIO.
	 * XXX Handle any transactions queued to the card
	 *     with XPT_ABORT_CCB.
	 */
	while ((q_bp = bioq_first(&softc->bio_queue)) != NULL){
		bioq_remove(&softc->bio_queue, q_bp);
		q_bp->bio_resid = q_bp->bio_bcount;
		biofinish(q_bp, NULL, ENXIO);
	}

	splx(s);

	xpt_print_path(periph->path);
	printf("lost device\n");
}
Пример #11
0
void
afddetach(struct ata_device *atadev)
{   
    struct afd_softc *fdp = atadev->driver;
    struct bio *bp;
    
    while ((bp = bioq_first(&fdp->queue))) {
	bioq_remove(&fdp->queue, bp);
	biofinish(bp, NULL, ENXIO);
    }
    disk_invalidate(&fdp->disk);
    disk_destroy(fdp->dev);
    devstat_remove_entry(&fdp->stats);
    ata_free_name(atadev);
    ata_free_lun(&afd_lun_map, fdp->lun);
    free(fdp, M_AFD);
    atadev->driver = NULL;
}   
Пример #12
0
static void 
afd_strategy(struct bio *bp)
{
    device_t dev = bp->bio_disk->d_drv1;
    struct ata_device *atadev = device_get_softc(dev);
    struct afd_softc *fdp = device_get_ivars(dev);
    struct ata_request *request;
    u_int16_t count;
    int8_t ccb[16];

    /* if it's a null transfer, return immediatly. */
    if (bp->bio_bcount == 0) {
	bp->bio_resid = 0;
	biodone(bp);
	return;
    }

    /* should reject all queued entries if media have changed. */
    if (atadev->flags & ATA_D_MEDIA_CHANGED) {
	biofinish(bp, NULL, EIO);
	return;
    }

    count = bp->bio_bcount / fdp->sectorsize;
    bp->bio_resid = bp->bio_bcount; 

    bzero(ccb, sizeof(ccb));

    if (bp->bio_cmd == BIO_READ)
	ccb[0] = ATAPI_READ_BIG;
    else
	ccb[0] = ATAPI_WRITE_BIG;

    ccb[2] = bp->bio_pblkno >> 24;
    ccb[3] = bp->bio_pblkno >> 16;
    ccb[4] = bp->bio_pblkno >> 8;
    ccb[5] = bp->bio_pblkno;
    ccb[7] = count>>8;
    ccb[8] = count;

    if (!(request = ata_alloc_request())) {
	biofinish(bp, NULL, ENOMEM);
	return;
    }
    request->dev = dev;
    request->bio = bp;
    bcopy(ccb, request->u.atapi.ccb, 16);
    request->data = bp->bio_data;
    request->bytecount = count * fdp->sectorsize;
    request->transfersize = min(request->bytecount, 65534);
    request->timeout = (ccb[0] == ATAPI_WRITE_BIG) ? 60 : 30;
    request->retries = 2;
    request->callback = afd_done;
    switch (bp->bio_cmd) {
    case BIO_READ:
	request->flags = (ATA_R_ATAPI | ATA_R_READ);
	break;
    case BIO_WRITE:
	request->flags = (ATA_R_ATAPI | ATA_R_WRITE);
	break;
    default:
	device_printf(dev, "unknown BIO operation\n");
	ata_free_request(request);
	biofinish(bp, NULL, EIO);
	return;
    }
    if (atadev->mode >= ATA_DMA)
	request->flags |= ATA_R_DMA;
    request->flags |= ATA_R_ORDERED;
    ata_queue_request(request);
}
Пример #13
0
static void
no_strategy(struct bio *bp)
{

	biofinish(bp, NULL, ENODEV);
}
Пример #14
0
static void
dead_strategy(struct bio *bp)
{

	biofinish(bp, NULL, ENXIO);
}
Пример #15
0
static void
isf_task(void *arg)
{
	struct isf_softc	*sc = arg;
	struct bio		*bp;
	int			ss = sc->isf_disk->d_sectorsize;
	int			error, i;

	for (;;) {
		ISF_LOCK(sc);
		do {
			bp = bioq_first(&sc->isf_bioq);
			if (bp == NULL) {
				if (sc->isf_doomed)
					kproc_exit(0);
				else
					ISF_SLEEP(sc, sc, 0);
			}
		} while (bp == NULL);
		bioq_remove(&sc->isf_bioq, bp);

		error = 0;
		switch (bp->bio_cmd) {
		case BIO_READ:
			isf_read(sc, bp->bio_pblkno * ss, bp->bio_data,
			    bp->bio_bcount);
			break;

		case BIO_WRITE:
			/*
			 * In principle one could suspend the in-progress
			 * erase, process any pending writes to other
			 * blocks and then proceed, but that seems
			 * overly complex for the likely usage modes.
			 */
			if (sc->isf_erasing) {
				error = EBUSY;
				break;
			}

			/*
			 * Read in the block we want to write and check that
			 * we're only setting bits to 0.  If an erase would
			 * be required return an I/O error.
			 */
			isf_read(sc, bp->bio_pblkno * ss, sc->isf_rbuf,
			    bp->bio_bcount);
			for (i = 0; i < bp->bio_bcount / 2; i++)
				if ((sc->isf_rbuf[i] &
				    ((uint16_t *)bp->bio_data)[i]) !=
				    ((uint16_t *)bp->bio_data)[i]) {
					device_printf(sc->isf_dev, "write"
					    " requires erase at 0x%08jx\n",
					    bp->bio_pblkno * ss);
					error = EIO;
					break;
				}
			if (error != 0)
				break;

			error = isf_write(sc, bp->bio_pblkno * ss,
			    bp->bio_data, bp->bio_bcount);
			break;

		default:
			panic("%s: unsupported I/O operation %d", __func__,
			    bp->bio_cmd);
		}
		if (error == 0)
			biodone(bp);
		else
			biofinish(bp, NULL, error);
		ISF_UNLOCK(sc);
	}
}
Пример #16
0
static void
ptdone(struct cam_periph *periph, union ccb *done_ccb)
{
	struct pt_softc *softc;
	struct ccb_scsiio *csio;

	softc = (struct pt_softc *)periph->softc;
	csio = &done_ccb->csio;
	switch (csio->ccb_h.ccb_state) {
	case PT_CCB_BUFFER_IO:
	case PT_CCB_BUFFER_IO_UA:
	{
		struct bio *bp;

		bp = (struct bio *)done_ccb->ccb_h.ccb_bp;
		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
			int error;
			int sf;
			
			if ((csio->ccb_h.ccb_state & PT_CCB_RETRY_UA) != 0)
				sf = SF_RETRY_UA;
			else
				sf = 0;

			error = pterror(done_ccb, CAM_RETRY_SELTO, sf);
			if (error == ERESTART) {
				/*
				 * A retry was scheuled, so
				 * just return.
				 */
				return;
			}
			if (error != 0) {
				if (error == ENXIO) {
					/*
					 * Catastrophic error.  Mark our device
					 * as invalid.
					 */
					xpt_print(periph->path,
					    "Invalidating device\n");
					softc->flags |= PT_FLAG_DEVICE_INVALID;
				}

				/*
				 * return all queued I/O with EIO, so that
				 * the client can retry these I/Os in the
				 * proper order should it attempt to recover.
				 */
				bioq_flush(&softc->bio_queue, NULL, EIO);
				bp->bio_error = error;
				bp->bio_resid = bp->bio_bcount;
				bp->bio_flags |= BIO_ERROR;
			} else {
				bp->bio_resid = csio->resid;
				bp->bio_error = 0;
				if (bp->bio_resid != 0) {
					/* Short transfer ??? */
					bp->bio_flags |= BIO_ERROR;
				}
			}
			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
				cam_release_devq(done_ccb->ccb_h.path,
						 /*relsim_flags*/0,
						 /*reduction*/0,
						 /*timeout*/0,
						 /*getcount_only*/0);
		} else {
			bp->bio_resid = csio->resid;
			if (bp->bio_resid != 0)
				bp->bio_flags |= BIO_ERROR;
		}

		/*
		 * Block out any asyncronous callbacks
		 * while we touch the pending ccb list.
		 */
		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);

		biofinish(bp, softc->device_stats, 0);
		break;
	}
	case PT_CCB_WAITING:
		/* Caller will release the CCB */
		wakeup(&done_ccb->ccb_h.cbfcnp);
		return;
	}
	xpt_release_ccb(done_ccb);
}
Пример #17
0
static void 
ad_strategy(struct bio *bp)
{
    device_t dev =  bp->bio_disk->d_drv1;
    struct ata_device *atadev = device_get_softc(dev);
    struct ata_request *request;

    if (atadev->spindown)
	callout_reset(&atadev->spindown_timer, hz * atadev->spindown,
		      ad_spindown, dev);

    if (!(request = ata_alloc_request())) {
	device_printf(dev, "FAILURE - out of memory in start\n");
	biofinish(bp, NULL, ENOMEM);
	return;
    }

    /* setup request */
    request->dev = dev;
    request->bio = bp;
    request->callback = ad_done;
    if (atadev->spindown_state) {
	device_printf(dev, "request while spun down, starting.\n");
	atadev->spindown_state = 0;
	request->timeout = MAX(ATA_REQUEST_TIMEOUT, 31);
    }
    else {
	request->timeout = ATA_REQUEST_TIMEOUT;
    }
    request->retries = 2;
    request->data = bp->bio_data;
    request->bytecount = bp->bio_bcount;
    request->u.ata.lba = bp->bio_pblkno;
    request->u.ata.count = request->bytecount / DEV_BSIZE;
    request->transfersize = min(bp->bio_bcount, atadev->max_iosize);

    switch (bp->bio_cmd) {
    case BIO_READ:
	request->flags = ATA_R_READ;
	if (atadev->mode >= ATA_DMA) {
	    request->u.ata.command = ATA_READ_DMA;
	    request->flags |= ATA_R_DMA;
	}
	else if (request->transfersize > DEV_BSIZE)
	    request->u.ata.command = ATA_READ_MUL;
	else
	    request->u.ata.command = ATA_READ;
	break;
    case BIO_WRITE:
	request->flags = ATA_R_WRITE;
	if (atadev->mode >= ATA_DMA) {
	    request->u.ata.command = ATA_WRITE_DMA;
	    request->flags |= ATA_R_DMA;
	}
	else if (request->transfersize > DEV_BSIZE)
	    request->u.ata.command = ATA_WRITE_MUL;
	else
	    request->u.ata.command = ATA_WRITE;
	break;
    case BIO_DELETE:
	request->flags = ATA_R_CONTROL;
	request->u.ata.command = ATA_CFA_ERASE;
	request->transfersize = 0;
	request->donecount = bp->bio_bcount;
	break;
    case BIO_FLUSH:
	request->u.ata.lba = 0;
	request->u.ata.count = 0;
	request->u.ata.feature = 0;
	request->bytecount = 0;
	request->transfersize = 0;
	request->flags = ATA_R_CONTROL;
	request->u.ata.command = ATA_FLUSHCACHE;
	break;
    default:
	device_printf(dev, "FAILURE - unknown BIO operation\n");
	ata_free_request(request);
	biofinish(bp, NULL, EIO);
	return;
    }
    request->flags |= ATA_R_ORDERED;
    ata_queue_request(request);
}