Ejemplo n.º 1
0
static void
vndiodone(struct buf *bp)
{
	struct vndxfer *vnx = VND_BUFTOXFER(bp);
	struct vnd_softc *vnd = vnx->vx_vnd;
	struct buf *obp = bp->b_private;
	int s = splbio();

	KASSERT(&vnx->vx_buf == bp);
	KASSERT(vnd->sc_active > 0);
#ifdef DEBUG
	if (vnddebug & VDB_IO) {
		printf("vndiodone1: bp %p iodone: error %d\n",
		    bp, bp->b_error);
	}
#endif
	disk_unbusy(&vnd->sc_dkdev, bp->b_bcount - bp->b_resid,
	    (bp->b_flags & B_READ));
	vnd->sc_active--;
	if (vnd->sc_active == 0) {
		wakeup(&vnd->sc_tab);
	}
	splx(s);
	obp->b_error = bp->b_error;
	obp->b_resid = bp->b_resid;
	buf_destroy(bp);
	VND_PUTXFER(vnd, vnx);
	biodone(obp);
}
Ejemplo n.º 2
0
Archivo: ld.c Proyecto: MarginC/kame
void
lddone(struct ld_softc *sc, struct buf *bp)
{

	if ((bp->b_flags & B_ERROR) != 0) {
		diskerr(bp, "ld", "error", LOG_PRINTF, 0, sc->sc_dk.dk_label);
		printf("\n");
	}

	disk_unbusy(&sc->sc_dk, bp->b_bcount - bp->b_resid);
#if NRND > 0
	rnd_add_uint32(&sc->sc_rnd_source, bp->b_rawblkno);
#endif
	biodone(bp);

	if (--sc->sc_queuecnt <= sc->sc_maxqueuecnt) {
		if ((sc->sc_flags & LDF_DRAIN) != 0)
			wakeup(&sc->sc_queuecnt);
		while ((bp = BUFQ_FIRST(&sc->sc_bufq)) != NULL) {
			BUFQ_REMOVE(&sc->sc_bufq, bp);
			if (!ldstart(sc, bp))
				break;
		}
	}
}
Ejemplo n.º 3
0
void
rriodone(device_t usc, struct buf *bp)
{
	struct ra_softc *ra = device_private(usc);

	disk_unbusy(&ra->ra_disk, bp->b_bcount, (bp->b_flags & B_READ));

	biodone(bp);
}
Ejemplo n.º 4
0
void
cddone(struct scsi_xfer *xs)
{
	struct cd_softc *cd = xs->sc_link->device_softc;

	if (xs->bp != NULL)
		disk_unbusy(&cd->sc_dk, xs->bp->b_bcount - xs->bp->b_resid,
		    (xs->bp->b_flags & B_READ));
}
Ejemplo n.º 5
0
void
wddone(void *v)
{
	struct wd_softc *wd = v;
	struct buf *bp = wd->sc_bp;
	char buf[256], *errbuf = buf;
	WDCDEBUG_PRINT(("wddone %s\n", wd->sc_dev.dv_xname),
	    DEBUG_XFERS);

	bp->b_resid = wd->sc_wdc_bio.bcount;
	errbuf[0] = '\0';
	switch (wd->sc_wdc_bio.error) {
	case ERR_NODEV:
		bp->b_flags |= B_ERROR;
		bp->b_error = ENXIO;
		break;
	case ERR_DMA:
		errbuf = "DMA error";
		goto retry;
	case ERR_DF:
		errbuf = "device fault";
		goto retry;
	case TIMEOUT:
		errbuf = "device timeout";
		goto retry;
	case ERROR:
		/* Don't care about media change bits */
		if (wd->sc_wdc_bio.r_error != 0 &&
		    (wd->sc_wdc_bio.r_error & ~(WDCE_MC | WDCE_MCR)) == 0)
			goto noerror;
		ata_perror(wd->drvp, wd->sc_wdc_bio.r_error, errbuf,
		    sizeof buf);
retry:
		/* Just reset and retry. Can we do more ? */
		wdc_reset_channel(wd->drvp, 0);
		diskerr(bp, "wd", errbuf, LOG_PRINTF,
		    wd->sc_wdc_bio.blkdone, wd->sc_dk.dk_label);
		if (wd->retries++ < WDIORETRIES) {
			printf(", retrying\n");
			timeout_add(&wd->sc_restart_timeout, RECOVERYTIME);
			return;
		}
		printf("\n");
		bp->b_flags |= B_ERROR;
		bp->b_error = EIO;
		break;
	case NOERROR:
noerror:	if ((wd->sc_wdc_bio.flags & ATA_CORR) || wd->retries > 0)
			printf("%s: soft error (corrected)\n",
			    wd->sc_dev.dv_xname);
	}
	disk_unbusy(&wd->sc_dk, (bp->b_bcount - bp->b_resid),
	    (bp->b_flags & B_READ));
	biodone(bp);
	wd->openings++;
	wdstart(wd);
}
Ejemplo n.º 6
0
Archivo: xbd.c Proyecto: MarginC/kame
static void
xbdresume(void)
{
	struct xbdreq *pxr, *xr;
	struct xbd_softc *xs;
	struct buf *bp;

	while ((pxr = SIMPLEQ_FIRST(&xbdr_suspended)) != NULL) {
		DPRINTF(XBDB_IO, ("xbdstart: resuming xbdreq %p for bp %p\n",
		    pxr, pxr->xr_bp));
		bp = pxr->xr_bp;
		xs = getxbd_softc(bp->b_dev);
		if (xs == NULL || xs->sc_shutdown) {
			bp->b_flags |= B_ERROR;
			bp->b_error = EIO;
		}
		if (bp->b_flags & B_ERROR) {
			pxr->xr_bdone -= pxr->xr_bqueue;
			pxr->xr_bqueue = 0;
			if (pxr->xr_bdone == 0) {
				bp->b_resid = bp->b_bcount;
				if (pxr->xr_aligned)
					unmap_align(pxr);
				PUT_XBDREQ(pxr);
				if (xs)
				{
					disk_unbusy(&xs->sc_dksc.sc_dkdev,
					    (bp->b_bcount - bp->b_resid),
					    (bp->b_flags & B_READ));
#if NRND > 0
					rnd_add_uint32(&xs->rnd_source,
					    bp->b_blkno);
#endif
				}
				biodone(bp);
			}
			continue;
		}
		while (__predict_true(pxr->xr_bqueue > 0)) {
			GET_XBDREQ(xr);
			if (__predict_false(xr == NULL))
				goto out;
			xr->xr_parent = pxr;
			fill_ring(xr);
		}
		DPRINTF(XBDB_IO, ("xbdstart: resumed xbdreq %p for bp %p\n",
		    pxr, bp));
		SIMPLEQ_REMOVE_HEAD(&xbdr_suspended, xr_suspended);
	}

 out:
	return;
}
Ejemplo n.º 7
0
void
sd_buf_done(struct scsi_xfer *xs)
{
    struct sd_softc *sc = xs->sc_link->device_softc;
    struct buf *bp = xs->cookie;

    splassert(IPL_BIO);

    disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid,
                bp->b_flags & B_READ);

    switch (xs->error) {
    case XS_NOERROR:
        bp->b_error = 0;
        bp->b_resid = xs->resid;
        break;

    case XS_NO_CCB:
        /* The adapter is busy, requeue the buf and try it later. */
        sd_buf_requeue(sc, bp);
        scsi_xs_put(xs);
        SET(sc->flags, SDF_WAITING); /* break out of sdstart loop */
        timeout_add(&sc->sc_timeout, 1);
        return;

    case XS_SENSE:
    case XS_SHORTSENSE:
        if (scsi_interpret_sense(xs) != ERESTART)
            xs->retries = 0;

    /* FALLTHROUGH */
    case XS_BUSY:
    case XS_TIMEOUT:
        if (xs->retries--) {
            scsi_xs_exec(xs);
            return;
        }

    /* FALLTHROUGH */
    default:
        bp->b_error = EIO;
        bp->b_flags |= B_ERROR;
        bp->b_resid = bp->b_bcount;
        break;
    }

    biodone(bp);
    scsi_xs_put(xs);
    sdstart(sc); /* restart io */
}
Ejemplo n.º 8
0
void
ofdisk_strategy(struct buf *bp)
{
	struct ofdisk_softc *of =
		device_lookup_private(&ofdisk_cd, DISKUNIT(bp->b_dev));
	struct partition *p;
	u_quad_t off;
	int read;
	int (*OF_io)(int, void *, int);
	daddr_t blkno = bp->b_blkno;

	bp->b_resid = 0;
	if (bp->b_bcount == 0)
		goto done;

	OF_io = bp->b_flags & B_READ ? OF_read : 
		(int(*)(int, void*, int))OF_write;

	if (DISKPART(bp->b_dev) != RAW_PART) {
		if (bounds_check_with_label(&of->sc_dk, bp, 0) <= 0) {
			bp->b_resid = bp->b_bcount;
			goto done;
		}
		p = &of->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
		blkno = bp->b_blkno + p->p_offset;
	}

	disk_busy(&of->sc_dk);

	off = (u_quad_t)blkno * DEV_BSIZE;
	read = -1;
	do {
		if (OF_seek(of->sc_ihandle, off) < 0)
			break;
		read = OF_io(of->sc_ihandle, bp->b_data, bp->b_bcount);
	} while (read == -2);

	if (read < 0) {
		bp->b_error = EIO;
		bp->b_resid = bp->b_bcount;
	} else
		bp->b_resid = bp->b_bcount - read;

	disk_unbusy(&of->sc_dk, bp->b_bcount - bp->b_resid,
	    (bp->b_flags & B_READ));

done:
	biodone(bp);
}
Ejemplo n.º 9
0
void
ccdintr(struct ccd_softc *cs, struct buf *bp)
{

	splassert(IPL_BIO);

	CCD_DPRINTF(CCDB_FOLLOW, ("ccdintr(%p, %p)\n", cs, bp));

	/*
	 * Request is done for better or worse, wakeup the top half.
	 */
	if (bp->b_flags & B_ERROR)
		bp->b_resid = bp->b_bcount;
	disk_unbusy(&cs->sc_dkdev, (bp->b_bcount - bp->b_resid),
	    (bp->b_flags & B_READ));
	biodone(bp);
}
Ejemplo n.º 10
0
void
wdrestart(void *v)
{
	struct wd_softc *wd = v;
	struct buf *bp = wd->sc_bp;
	struct channel_softc *chnl;
	int s;
	WDCDEBUG_PRINT(("wdrestart %s\n", wd->sc_dev.dv_xname),
	    DEBUG_XFERS);

	chnl = (struct channel_softc *)(wd->drvp->chnl_softc);
	if (chnl->dying)
		return;

	s = splbio();
	disk_unbusy(&wd->sc_dk, 0, (bp->b_flags & B_READ));
	__wdstart(v, bp);
	splx(s);
}
Ejemplo n.º 11
0
static void
dk_done1(struct dk_softc *dksc, struct buf *bp, bool lock)
{
	struct disk *dk = &dksc->sc_dkdev;

	if (bp->b_error != 0) {
		struct cfdriver *cd = device_cfdriver(dksc->sc_dev);

		diskerr(bp, cd->cd_name, "error", LOG_PRINTF, 0,
			dk->dk_label);
		printf("\n");
	}

	if (lock)
		mutex_enter(&dksc->sc_iolock);
	disk_unbusy(dk, bp->b_bcount - bp->b_resid, (bp->b_flags & B_READ));
	if (lock)
		mutex_exit(&dksc->sc_iolock);

	rnd_add_uint32(&dksc->sc_rnd_source, bp->b_rawblkno);

	biodone(bp);
}
Ejemplo n.º 12
0
/*
 * cdstart looks to see if there is a buf waiting for the device
 * and that the device is not already busy. If both are true,
 * It deques the buf and creates a scsi command to perform the
 * transfer in the buf. The transfer request will call scsi_done
 * on completion, which will in turn call this routine again
 * so that the next queued transfer is performed.
 * The bufs are queued by the strategy routine (cdstrategy)
 *
 * This routine is also called after other non-queued requests
 * have been made of the scsi driver, to ensure that the queue
 * continues to be drained.
 *
 * must be called at the correct (highish) spl level
 * cdstart() is called at splbio from cdstrategy, cdrestart and scsi_done
 */
void
cdstart(void *v)
{
	struct cd_softc *cd = v;
	struct scsi_link *sc_link = cd->sc_link;
	struct buf *bp = 0;
	struct buf *dp;
	struct scsi_rw_big cmd_big;
	struct scsi_rw cmd_small;
	struct scsi_generic *cmdp;
	int blkno, nblks, cmdlen, error;
	struct partition *p;

	splassert(IPL_BIO);

	SC_DEBUG(sc_link, SDEV_DB2, ("cdstart\n"));
	/*
	 * Check if the device has room for another command
	 */
	while (sc_link->openings > 0) {
		/*
		 * there is excess capacity, but a special waits
		 * It'll need the adapter as soon as we clear out of the
		 * way and let it run (user level wait).
		 */
		if (sc_link->flags & SDEV_WAITING) {
			sc_link->flags &= ~SDEV_WAITING;
			wakeup((caddr_t)sc_link);
			return;
		}

		/*
		 * See if there is a buf with work for us to do..
		 */
		dp = &cd->buf_queue;
		if ((bp = dp->b_actf) == NULL)	/* yes, an assign */
			return;
		dp->b_actf = bp->b_actf;

		/*
		 * If the device has become invalid, abort all the
		 * reads and writes until all files have been closed and
		 * re-opened
		 */
		if ((sc_link->flags & SDEV_MEDIA_LOADED) == 0) {
			bp->b_error = EIO;
			bp->b_flags |= B_ERROR;
			bp->b_resid = bp->b_bcount;
			biodone(bp);
			continue;
		}

		/*
		 * We have a buf, now we should make a command
		 *
		 * First, translate the block to absolute and put it in terms
		 * of the logical blocksize of the device.
		 */
		blkno =
		    bp->b_blkno / (cd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
		p = &cd->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
		blkno += DL_GETPOFFSET(p);
		nblks = howmany(bp->b_bcount, cd->sc_dk.dk_label->d_secsize);

		/*
		 *  Fill out the scsi command.  If the transfer will
		 *  fit in a "small" cdb, use it.
		 */
		if (!(sc_link->flags & SDEV_ATAPI) &&
		    !(sc_link->quirks & SDEV_ONLYBIG) && 
		    ((blkno & 0x1fffff) == blkno) &&
		    ((nblks & 0xff) == nblks)) {
			/*
			 * We can fit in a small cdb.
			 */
			bzero(&cmd_small, sizeof(cmd_small));
			cmd_small.opcode = (bp->b_flags & B_READ) ?
			    READ_COMMAND : WRITE_COMMAND;
			_lto3b(blkno, cmd_small.addr);
			cmd_small.length = nblks & 0xff;
			cmdlen = sizeof(cmd_small);
			cmdp = (struct scsi_generic *)&cmd_small;
		} else {
			/*
			 * Need a large cdb.
			 */
			bzero(&cmd_big, sizeof(cmd_big));
			cmd_big.opcode = (bp->b_flags & B_READ) ?
			    READ_BIG : WRITE_BIG;
			_lto4b(blkno, cmd_big.addr);
			_lto2b(nblks, cmd_big.length);
			cmdlen = sizeof(cmd_big);
			cmdp = (struct scsi_generic *)&cmd_big;
		}

		/* Instrumentation. */
		disk_busy(&cd->sc_dk);

		/*
		 * Call the routine that chats with the adapter.
		 * Note: we cannot sleep as we may be an interrupt
		 */
		error = scsi_scsi_cmd(sc_link, cmdp, cmdlen,
		    (u_char *) bp->b_data, bp->b_bcount, SCSI_RETRIES, 30000,
		    bp, SCSI_NOSLEEP | ((bp->b_flags & B_READ) ? SCSI_DATA_IN :
		    SCSI_DATA_OUT));
		switch (error) {
		case 0:
			timeout_del(&cd->sc_timeout);
			break;
		case EAGAIN:
			/*
			 * The device can't start another i/o. Try again later.
			 */
			dp->b_actf = bp;
			disk_unbusy(&cd->sc_dk, 0, 0);
			timeout_add(&cd->sc_timeout, 1);
			return;
		default:
			disk_unbusy(&cd->sc_dk, 0, 0);
			printf("%s: not queued, error %d\n",
			    cd->sc_dev.dv_xname, error);
			break;
		}
	}
}
Ejemplo n.º 13
0
void
dk_start(struct dk_softc *dksc, struct buf *bp)
{
	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
	int error;

	if (!(dksc->sc_flags & DKF_INITED)) {
		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
		return;
	}

	mutex_enter(&dksc->sc_iolock);

	if (bp != NULL)
		bufq_put(dksc->sc_bufq, bp);

	if (dksc->sc_busy)
		goto done;
	dksc->sc_busy = true;

	/*
	 * Peeking at the buffer queue and committing the operation
	 * only after success isn't atomic.
	 *
	 * So when a diskstart fails, the buffer is saved
	 * and tried again before the next buffer is fetched.
	 * dk_drain() handles flushing of a saved buffer.
	 *
	 * This keeps order of I/O operations, unlike bufq_put.
	 */

	bp = dksc->sc_deferred;
	dksc->sc_deferred = NULL;

	if (bp == NULL)
		bp = bufq_get(dksc->sc_bufq);

	while (bp != NULL) {

		disk_busy(&dksc->sc_dkdev);
		mutex_exit(&dksc->sc_iolock);
		error = dkd->d_diskstart(dksc->sc_dev, bp);
		mutex_enter(&dksc->sc_iolock);
		if (error == EAGAIN) {
			dksc->sc_deferred = bp;
			disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
			break;
		}

		if (error != 0) {
			bp->b_error = error;
			bp->b_resid = bp->b_bcount;
			dk_done1(dksc, bp, false);
		}

		bp = bufq_get(dksc->sc_bufq);
	}

	dksc->sc_busy = false;
done:
	mutex_exit(&dksc->sc_iolock);
}
Ejemplo n.º 14
0
void
sd_buf_done(struct scsi_xfer *xs)
{
	struct sd_softc *sc = xs->sc_link->device_softc;
	struct buf *bp = xs->cookie;
	int error, s;

	switch (xs->error) {
	case XS_NOERROR:
		bp->b_error = 0;
		bp->b_resid = xs->resid;
		break;

	case XS_NO_CCB:
		/* The adapter is busy, requeue the buf and try it later. */
		disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid,
		    bp->b_flags & B_READ);
		bufq_requeue(&sc->sc_bufq, bp);
		scsi_xs_put(xs);
		SET(sc->flags, SDF_WAITING);
		timeout_add(&sc->sc_timeout, 1);
		return;

	case XS_SENSE:
	case XS_SHORTSENSE:
#ifdef SCSIDEBUG
		scsi_sense_print_debug(xs);
#endif
		error = sd_interpret_sense(xs);
		if (error == 0) {
			bp->b_error = 0;
			bp->b_resid = xs->resid;
			break;
		}
		if (error != ERESTART) {
			bp->b_error = error;
			xs->retries = 0;
		}
		goto retry;

	case XS_BUSY:
		if (xs->retries) {
			if (scsi_delay(xs, 1) != ERESTART)
				xs->retries = 0;
		}
		goto retry;

	case XS_TIMEOUT:
retry:
		if (xs->retries--) {
			scsi_xs_exec(xs);
			return;
		}
		/* FALLTHROUGH */

	default:
		if (bp->b_error == 0)
			bp->b_error = EIO;
		bp->b_flags |= B_ERROR;
		bp->b_resid = bp->b_bcount;
		break;
	}

	disk_unbusy(&sc->sc_dk, bp->b_bcount - xs->resid,
	    bp->b_flags & B_READ);

	s = splbio();
	biodone(bp);
	splx(s);
	scsi_xs_put(xs);
}
Ejemplo n.º 15
0
static int
fdcintr1(struct fdc_softc *fdc)
{
#define	st0	fdc->sc_status[0]
#define	cyl	fdc->sc_status[1]
	struct fd_softc *fd;
	struct buf *bp;
	bus_space_tag_t iot = fdc->sc_iot;
	bus_space_handle_t ioh = fdc->sc_ioh;
	int read, head, sec, i, nblks;
	struct fd_type *type;
	struct ne7_fd_formb *finfo = NULL;

	KASSERT(mutex_owned(&fdc->sc_mtx));
	if (fdc->sc_state == PROBING) {
#ifdef DEBUG
		printf("fdcintr: got probe interrupt\n");
#endif
		fdc->sc_probe++;
		goto out;
	}

loop:
	/* Is there a drive for the controller to do a transfer with? */
	fd = TAILQ_FIRST(&fdc->sc_drives);
	if (fd == NULL) {
		fdc->sc_state = DEVIDLE;
 		goto out;
	}

	/* Is there a transfer to this drive?  If not, deactivate drive. */
	bp = BUFQ_PEEK(fd->sc_q);
	if (bp == NULL) {
		fd->sc_ops = 0;
		TAILQ_REMOVE(&fdc->sc_drives, fd, sc_drivechain);
		fd->sc_active = 0;
		goto loop;
	}

	if (bp->b_flags & B_FORMAT)
		finfo = (struct ne7_fd_formb *)bp->b_data;

	switch (fdc->sc_state) {
	case DEVIDLE:
		fdc->sc_errors = 0;
		fd->sc_skip = 0;
		fd->sc_bcount = bp->b_bcount;
		fd->sc_blkno = bp->b_blkno / (FDC_BSIZE / DEV_BSIZE);
		callout_stop(&fd->sc_motoroff_ch);
		if ((fd->sc_flags & FD_MOTOR_WAIT) != 0) {
			fdc->sc_state = MOTORWAIT;
			return 1;
		}
		if ((fd->sc_flags & FD_MOTOR) == 0) {
			/* Turn on the motor, being careful about pairing. */
			struct fd_softc *ofd = fdc->sc_fd[fd->sc_drive ^ 1];
			if (ofd && ofd->sc_flags & FD_MOTOR) {
				callout_stop(&ofd->sc_motoroff_ch);
				ofd->sc_flags &= ~(FD_MOTOR | FD_MOTOR_WAIT);
			}
			fd->sc_flags |= FD_MOTOR | FD_MOTOR_WAIT;
			fd_set_motor(fdc, 0);
			fdc->sc_state = MOTORWAIT;
			/* Allow .25s for motor to stabilize. */
			callout_reset(&fd->sc_motoron_ch, hz / 4,
			    fd_motor_on, fd);
			return 1;
		}
		/* Make sure the right drive is selected. */
		fd_set_motor(fdc, 0);

		/* fall through */
	case DOSEEK:
	doseek:
		if (fd->sc_cylin == bp->b_cylinder)
			goto doio;

		out_fdc(iot, ioh, NE7CMD_SPECIFY);/* specify command */
		out_fdc(iot, ioh, fd->sc_type->steprate);
		out_fdc(iot, ioh, 6);		/* XXX head load time == 6ms */

		out_fdc(iot, ioh, NE7CMD_SEEK);	/* seek function */
		out_fdc(iot, ioh, fd->sc_drive); /* drive number */
		out_fdc(iot, ioh, bp->b_cylinder * fd->sc_type->step);

		fd->sc_cylin = -1;
		fdc->sc_state = SEEKWAIT;

		iostat_seek(fd->sc_dk.dk_stats);
		disk_busy(&fd->sc_dk);

		callout_reset(&fdc->sc_timo_ch, 4 * hz, fdctimeout, fdc);
		return 1;

	case DOIO:
	doio:
		type = fd->sc_type;
		if (finfo)
			fd->sc_skip = (char *)&(finfo->fd_formb_cylno(0)) -
				      (char *)finfo;
		sec = fd->sc_blkno % type->seccyl;
		nblks = type->seccyl - sec;
		nblks = min(nblks, fd->sc_bcount / FDC_BSIZE);
		nblks = min(nblks, fdc->sc_maxiosize / FDC_BSIZE);
		fd->sc_nblks = nblks;
		fd->sc_nbytes = finfo ? bp->b_bcount : nblks * FDC_BSIZE;
		head = sec / type->sectrac;
		sec -= head * type->sectrac;
#ifdef DIAGNOSTIC
		{
			int block;
			block = (fd->sc_cylin * type->heads + head)
			    * type->sectrac + sec;
			if (block != fd->sc_blkno) {
				printf("fdcintr: block %d != blkno "
				    "%" PRId64 "\n", block, fd->sc_blkno);
#ifdef DDB
				 Debugger();
#endif
			}
		}
#endif
		read = bp->b_flags & B_READ ? DMAMODE_READ : DMAMODE_WRITE;
		isa_dmastart(fdc->sc_ic, fdc->sc_drq,
		    (char *)bp->b_data + fd->sc_skip, fd->sc_nbytes,
		    NULL, read | DMAMODE_DEMAND, BUS_DMA_NOWAIT);
		bus_space_write_1(iot, fdc->sc_fdctlioh, 0, type->rate);
#ifdef FD_DEBUG
		printf("fdcintr: %s drive %d track %d head %d sec %d nblks %d\n",
			read ? "read" : "write", fd->sc_drive, fd->sc_cylin,
			head, sec, nblks);
#endif
		if (finfo) {
			/* formatting */
			if (out_fdc(iot, ioh, NE7CMD_FORMAT) < 0) {
				fdc->sc_errors = 4;
				fdcretry(fdc);
				goto loop;
			}
			out_fdc(iot, ioh, (head << 2) | fd->sc_drive);
			out_fdc(iot, ioh, finfo->fd_formb_secshift);
			out_fdc(iot, ioh, finfo->fd_formb_nsecs);
			out_fdc(iot, ioh, finfo->fd_formb_gaplen);
			out_fdc(iot, ioh, finfo->fd_formb_fillbyte);
		} else {
			if (read)
				out_fdc(iot, ioh, NE7CMD_READ);	/* READ */
			else
				out_fdc(iot, ioh, NE7CMD_WRITE); /* WRITE */
			out_fdc(iot, ioh, (head << 2) | fd->sc_drive);
			out_fdc(iot, ioh, fd->sc_cylin); /* track */
			out_fdc(iot, ioh, head);
			out_fdc(iot, ioh, sec + 1);	 /* sector +1 */
			out_fdc(iot, ioh, type->secsize);/* sector size */
			out_fdc(iot, ioh, type->sectrac);/* sectors/track */
			out_fdc(iot, ioh, type->gap1);	 /* gap1 size */
			out_fdc(iot, ioh, type->datalen);/* data length */
		}
		fdc->sc_state = IOCOMPLETE;

		disk_busy(&fd->sc_dk);

		/* allow 2 seconds for operation */
		callout_reset(&fdc->sc_timo_ch, 2 * hz, fdctimeout, fdc);
		return 1;				/* will return later */

	case SEEKWAIT:
		callout_stop(&fdc->sc_timo_ch);
		fdc->sc_state = SEEKCOMPLETE;
		/* allow 1/50 second for heads to settle */
		callout_reset(&fdc->sc_intr_ch, hz / 50, fdcintrcb, fdc);
		return 1;

	case SEEKCOMPLETE:
		/* no data on seek */
		disk_unbusy(&fd->sc_dk, 0, 0);

		/* Make sure seek really happened. */
		out_fdc(iot, ioh, NE7CMD_SENSEI);
		if (fdcresult(fdc) != 2 || (st0 & 0xf8) != 0x20 ||
		    cyl != bp->b_cylinder * fd->sc_type->step) {
#ifdef FD_DEBUG
			fdcstatus(fd->sc_dev, 2, "seek failed");
#endif
			fdcretry(fdc);
			goto loop;
		}
		fd->sc_cylin = bp->b_cylinder;
		goto doio;

	case IOTIMEDOUT:
		isa_dmaabort(fdc->sc_ic, fdc->sc_drq);
	case SEEKTIMEDOUT:
	case RECALTIMEDOUT:
	case RESETTIMEDOUT:
		fdcretry(fdc);
		goto loop;

	case IOCOMPLETE: /* IO DONE, post-analyze */
		callout_stop(&fdc->sc_timo_ch);

		disk_unbusy(&fd->sc_dk, (bp->b_bcount - bp->b_resid),
		    (bp->b_flags & B_READ));

		if (fdcresult(fdc) != 7 || (st0 & 0xf8) != 0) {
			isa_dmaabort(fdc->sc_ic, fdc->sc_drq);
#ifdef FD_DEBUG
			fdcstatus(fd->sc_dev, 7, bp->b_flags & B_READ ?
			    "read failed" : "write failed");
			printf("blkno %llu nblks %d\n",
			    (unsigned long long)fd->sc_blkno, fd->sc_nblks);
#endif
			fdcretry(fdc);
			goto loop;
		}
		isa_dmadone(fdc->sc_ic, fdc->sc_drq);
		if (fdc->sc_errors) {
			diskerr(bp, "fd", "soft error (corrected)", LOG_PRINTF,
			    fd->sc_skip / FDC_BSIZE, NULL);
			printf("\n");
			fdc->sc_errors = 0;
		}
		fd->sc_blkno += fd->sc_nblks;
		fd->sc_skip += fd->sc_nbytes;
		fd->sc_bcount -= fd->sc_nbytes;
		if (!finfo && fd->sc_bcount > 0) {
			bp->b_cylinder = fd->sc_blkno / fd->sc_type->seccyl;
			goto doseek;
		}
		fdfinish(fd, bp);
		goto loop;

	case DORESET:
		/* try a reset, keep motor on */
		fd_set_motor(fdc, 1);
		delay(100);
		fd_set_motor(fdc, 0);
		fdc->sc_state = RESETCOMPLETE;
		callout_reset(&fdc->sc_timo_ch, hz / 2, fdctimeout, fdc);
		return 1;			/* will return later */

	case RESETCOMPLETE:
		callout_stop(&fdc->sc_timo_ch);
		/* clear the controller output buffer */
		for (i = 0; i < 4; i++) {
			out_fdc(iot, ioh, NE7CMD_SENSEI);
			(void) fdcresult(fdc);
		}

		/* fall through */
	case DORECAL:
		out_fdc(iot, ioh, NE7CMD_RECAL); /* recalibrate function */
		out_fdc(iot, ioh, fd->sc_drive);
		fdc->sc_state = RECALWAIT;
		callout_reset(&fdc->sc_timo_ch, 5 * hz, fdctimeout, fdc);
		return 1;			/* will return later */

	case RECALWAIT:
		callout_stop(&fdc->sc_timo_ch);
		fdc->sc_state = RECALCOMPLETE;
		/* allow 1/30 second for heads to settle */
		callout_reset(&fdc->sc_intr_ch, hz / 30, fdcintrcb, fdc);
		return 1;			/* will return later */

	case RECALCOMPLETE:
		out_fdc(iot, ioh, NE7CMD_SENSEI);
		if (fdcresult(fdc) != 2 || (st0 & 0xf8) != 0x20 || cyl != 0) {
#ifdef FD_DEBUG
			fdcstatus(fd->sc_dev, 2, "recalibrate failed");
#endif
			fdcretry(fdc);
			goto loop;
		}
		fd->sc_cylin = 0;
		goto doseek;

	case MOTORWAIT:
		if (fd->sc_flags & FD_MOTOR_WAIT)
			return 1;		/* time's not up yet */
		goto doseek;

	default:
		fdcstatus(fd->sc_dev, 0, "stray interrupt");
		return 1;
	}
#undef	st0
#undef	cyl

out:
	cv_signal(&fdc->sc_cv);
	return 1;
}
Ejemplo n.º 16
0
/*
 * Called from the controller.
 */
int
fdintr(struct fdc_softc *fdc)
{
#define	st0	fdc->sc_status[0]
#define	cyl	fdc->sc_status[1]
	struct fd_softc *fd;
	struct buf *bp;
	bus_space_tag_t iot = fdc->sc_iot;
	bus_space_handle_t ioh = fdc->sc_ioh;
	bus_space_handle_t ioh_ctl = fdc->sc_ioh_ctl;
	int read, head, sec, i, nblks, cylin;
	struct fd_type *type;
	struct fd_formb *finfo = NULL;
	int fd_bsize;

loop:
	/* Is there a transfer to this drive?  If not, deactivate drive. */
	fd = TAILQ_FIRST(&fdc->sc_link.fdlink.sc_drives);
	if (fd == NULL) {
		fdc->sc_state = DEVIDLE;
		return 1;
	}
	fd_bsize = FD_BSIZE(fd);

	bp = fd->sc_bp;
	if (bp == NULL) {
		fd->sc_ops = 0;
		TAILQ_REMOVE(&fdc->sc_link.fdlink.sc_drives, fd, sc_drivechain);
		goto loop;
	}

	if (bp->b_flags & B_FORMAT)
	    finfo = (struct fd_formb *)bp->b_data;

	cylin = ((bp->b_blkno * DEV_BSIZE) + (bp->b_bcount - bp->b_resid)) /
	    (fd_bsize * fd->sc_type->seccyl);

	switch (fdc->sc_state) {
	case DEVIDLE:
		fdc->sc_errors = 0;
		fd->sc_skip = 0;
		fd->sc_bcount = bp->b_bcount;
		fd->sc_blkno = bp->b_blkno / (fd_bsize / DEV_BSIZE);
		timeout_del(&fd->fd_motor_off_to);
		if ((fd->sc_flags & FD_MOTOR_WAIT) != 0) {
			fdc->sc_state = MOTORWAIT;
			return 1;
		}
		if ((fd->sc_flags & FD_MOTOR) == 0) {
			/* Turn on the motor, being careful about pairing. */
			struct fd_softc *ofd =
				fdc->sc_link.fdlink.sc_fd[fd->sc_drive ^ 1];
			if (ofd && ofd->sc_flags & FD_MOTOR) {
				timeout_del(&ofd->fd_motor_off_to);
				ofd->sc_flags &= ~(FD_MOTOR | FD_MOTOR_WAIT);
			}
			fd->sc_flags |= FD_MOTOR | FD_MOTOR_WAIT;
			fd_set_motor(fdc, 0);
			fdc->sc_state = MOTORWAIT;
			/* Allow .25s for motor to stabilize. */
			timeout_add_msec(&fd->fd_motor_on_to, 250);
			return 1;
		}
		/* Make sure the right drive is selected. */
		fd_set_motor(fdc, 0);

		/* FALLTHROUGH */
	case DOSEEK:
	doseek:
		if (fd->sc_cylin == cylin)
			goto doio;

		out_fdc(iot, ioh, NE7CMD_SPECIFY);/* specify command */
		out_fdc(iot, ioh, fd->sc_type->steprate);
		out_fdc(iot, ioh, 6);		/* XXX head load time == 6ms */

		out_fdc(iot, ioh, NE7CMD_SEEK);	/* seek function */
		out_fdc(iot, ioh, fd->sc_drive);	/* drive number */
		out_fdc(iot, ioh, cylin * fd->sc_type->step);

		fd->sc_cylin = -1;
		fdc->sc_state = SEEKWAIT;

		fd->sc_dk.dk_seek++;
		disk_busy(&fd->sc_dk);

		timeout_add_sec(&fd->fdtimeout_to, 4);
		return 1;

	case DOIO:
	doio:
		type = fd->sc_type;
		if (finfo)
		    fd->sc_skip = (char *)&(finfo->fd_formb_cylno(0)) -
			(char *)finfo;
		sec = fd->sc_blkno % type->seccyl;
		nblks = type->seccyl - sec;
		nblks = min(nblks, fd->sc_bcount / fd_bsize);
		nblks = min(nblks, FDC_MAXIOSIZE / fd_bsize);
		fd->sc_nblks = nblks;
		fd->sc_nbytes = finfo ? bp->b_bcount : nblks * fd_bsize;
		head = sec / type->sectrac;
		sec -= head * type->sectrac;
#ifdef DIAGNOSTIC
		{int block;
		 block = (fd->sc_cylin * type->heads + head) * type->sectrac + sec;
		 if (block != fd->sc_blkno) {
			 panic("fdintr: block %d != blkno %llu", block, fd->sc_blkno);
		 }}
#endif
		read = bp->b_flags & B_READ ? DMAMODE_READ : DMAMODE_WRITE;
		isadma_start(bp->b_data + fd->sc_skip, fd->sc_nbytes,
		    fdc->sc_drq, read);
		bus_space_write_1(iot, ioh_ctl, fdctl, type->rate);
#ifdef FD_DEBUG
		printf("fdintr: %s drive %d track %d head %d sec %d nblks %d\n",
		    read ? "read" : "write", fd->sc_drive, fd->sc_cylin, head,
		    sec, nblks);
#endif
		if (finfo) {
                        /* formatting */
			if (out_fdc(iot, ioh, NE7CMD_FORMAT) < 0) {
			    fdc->sc_errors = 4;
			    fdretry(fd);
			    goto loop;
			}
                        out_fdc(iot, ioh, (head << 2) | fd->sc_drive);
                        out_fdc(iot, ioh, finfo->fd_formb_secshift);
                        out_fdc(iot, ioh, finfo->fd_formb_nsecs);
                        out_fdc(iot, ioh, finfo->fd_formb_gaplen);
                        out_fdc(iot, ioh, finfo->fd_formb_fillbyte);
		} else {
			if (read)
				out_fdc(iot, ioh, NE7CMD_READ);	/* READ */
			else
				out_fdc(iot, ioh, NE7CMD_WRITE);/* WRITE */
			out_fdc(iot, ioh, (head << 2) | fd->sc_drive);
			out_fdc(iot, ioh, fd->sc_cylin);	/* track */
			out_fdc(iot, ioh, head);
			out_fdc(iot, ioh, sec + 1);		/* sec +1 */
			out_fdc(iot, ioh, type->secsize);	/* sec size */
			out_fdc(iot, ioh, type->sectrac);	/* secs/track */
			out_fdc(iot, ioh, type->gap1);		/* gap1 size */
			out_fdc(iot, ioh, type->datalen);	/* data len */
		}
		fdc->sc_state = IOCOMPLETE;

		disk_busy(&fd->sc_dk);

		/* allow 2 seconds for operation */
		timeout_add_sec(&fd->fdtimeout_to, 2);
		return 1;				/* will return later */

	case SEEKWAIT:
		timeout_del(&fd->fdtimeout_to);
		fdc->sc_state = SEEKCOMPLETE;
		/* allow 1/50 second for heads to settle */
		timeout_add_msec(&fdc->fdcpseudointr_to, 20);
		return 1;

	case SEEKCOMPLETE:
		disk_unbusy(&fd->sc_dk, 0, 0);	/* no data on seek */

		/* Make sure seek really happened. */
		out_fdc(iot, ioh, NE7CMD_SENSEI);
		if (fdcresult(fdc) != 2 || (st0 & 0xf8) != 0x20 ||
		    cyl != cylin * fd->sc_type->step) {
#ifdef FD_DEBUG
			fdcstatus(&fd->sc_dev, 2, "seek failed");
#endif
			fdretry(fd);
			goto loop;
		}
		fd->sc_cylin = cylin;
		goto doio;

	case IOTIMEDOUT:
		isadma_abort(fdc->sc_drq);
	case SEEKTIMEDOUT:
	case RECALTIMEDOUT:
	case RESETTIMEDOUT:
		fdretry(fd);
		goto loop;

	case IOCOMPLETE: /* IO DONE, post-analyze */
		timeout_del(&fd->fdtimeout_to);

		disk_unbusy(&fd->sc_dk, (bp->b_bcount - bp->b_resid),
		    (bp->b_flags & B_READ));

		if (fdcresult(fdc) != 7 || (st0 & 0xf8) != 0) {
			isadma_abort(fdc->sc_drq);
#ifdef FD_DEBUG
			fdcstatus(&fd->sc_dev, 7, bp->b_flags & B_READ ?
			    "read failed" : "write failed");
			printf("blkno %lld nblks %d\n",
			    (long long)fd->sc_blkno, fd->sc_nblks);
#endif
			fdretry(fd);
			goto loop;
		}
		read = bp->b_flags & B_READ ? DMAMODE_READ : DMAMODE_WRITE;
		isadma_done(fdc->sc_drq);
		if (fdc->sc_errors) {
			diskerr(bp, "fd", "soft error", LOG_PRINTF,
			    fd->sc_skip / fd_bsize, (struct disklabel *)NULL);
			printf("\n");
			fdc->sc_errors = 0;
		}

		fd->sc_blkno += fd->sc_nblks;
		fd->sc_skip += fd->sc_nbytes;
		fd->sc_bcount -= fd->sc_nbytes;
		bp->b_resid -= fd->sc_nbytes;
		if (!finfo && fd->sc_bcount > 0) {
			cylin = fd->sc_blkno / fd->sc_type->seccyl;
			goto doseek;
		}
		fdfinish(fd, bp);
		goto loop;

	case DORESET:
		/* try a reset, keep motor on */
		fd_set_motor(fdc, 1);
		delay(100);
		fd_set_motor(fdc, 0);
		fdc->sc_state = RESETCOMPLETE;
		timeout_add_msec(&fd->fdtimeout_to, 500);
		return 1;			/* will return later */

	case RESETCOMPLETE:
		timeout_del(&fd->fdtimeout_to);
		/* clear the controller output buffer */
		for (i = 0; i < 4; i++) {
			out_fdc(iot, ioh, NE7CMD_SENSEI);
			(void) fdcresult(fdc);
		}

		/* FALLTHROUGH */
	case DORECAL:
		out_fdc(iot, ioh, NE7CMD_RECAL);	/* recal function */
		out_fdc(iot, ioh, fd->sc_drive);
		fdc->sc_state = RECALWAIT;
		timeout_add_sec(&fd->fdtimeout_to, 5);
		return 1;			/* will return later */

	case RECALWAIT:
		timeout_del(&fd->fdtimeout_to);
		fdc->sc_state = RECALCOMPLETE;
		/* allow 1/30 second for heads to settle */
		timeout_add(&fdc->fdcpseudointr_to, hz / 30);
		return 1;			/* will return later */

	case RECALCOMPLETE:
		out_fdc(iot, ioh, NE7CMD_SENSEI);
		if (fdcresult(fdc) != 2 || (st0 & 0xf8) != 0x20 || cyl != 0) {
#ifdef FD_DEBUG
			fdcstatus(&fd->sc_dev, 2, "recalibrate failed");
#endif
			fdretry(fd);
			goto loop;
		}
		fd->sc_cylin = 0;
		goto doseek;

	case MOTORWAIT:
		if (fd->sc_flags & FD_MOTOR_WAIT)
			return 1;		/* time's not up yet */
		goto doseek;

	default:
		fdcstatus(&fd->sc_dev, 0, "stray interrupt");
		return 1;
	}
#ifdef DIAGNOSTIC
	panic("fdintr: impossible");
#endif
#undef	st0
#undef	cyl
}
Ejemplo n.º 17
0
/*
 * The kernel thread (one for every active snapshot).
 *
 * After wakeup it cleans the cache and runs the I/O requests.
 */
static void
fss_bs_thread(void *arg)
{
	bool thread_idle, is_valid;
	int error, i, todo, len, crotor, is_read;
	long off;
	char *addr;
	u_int32_t c, cl, ch, *indirp;
	struct buf *bp, *nbp;
	struct fss_softc *sc;
	struct fss_cache *scp, *scl;

	sc = arg;
	scl = sc->sc_cache+sc->sc_cache_size;
	crotor = 0;
	thread_idle = false;

	mutex_enter(&sc->sc_slock);

	for (;;) {
		if (thread_idle)
			cv_wait(&sc->sc_work_cv, &sc->sc_slock);
		thread_idle = true;
		if ((sc->sc_flags & FSS_BS_THREAD) == 0) {
			mutex_exit(&sc->sc_slock);
			kthread_exit(0);
		}

		/*
		 * Process I/O requests (persistent)
		 */

		if (sc->sc_flags & FSS_PERSISTENT) {
			if ((bp = bufq_get(sc->sc_bufq)) == NULL)
				continue;
			is_valid = FSS_ISVALID(sc);
			is_read = (bp->b_flags & B_READ);
			thread_idle = false;
			mutex_exit(&sc->sc_slock);

			if (is_valid) {
				disk_busy(sc->sc_dkdev);
				error = fss_bs_io(sc, FSS_READ, 0,
				    dbtob(bp->b_blkno), bp->b_bcount,
				    bp->b_data);
				disk_unbusy(sc->sc_dkdev,
				    (error ? 0 : bp->b_bcount), is_read);
			} else
				error = ENXIO;

			bp->b_error = error;
			bp->b_resid = (error ? bp->b_bcount : 0);
			biodone(bp);

			mutex_enter(&sc->sc_slock);
			continue;
		}

		/*
		 * Clean the cache
		 */
		for (i = 0; i < sc->sc_cache_size; i++) {
			crotor = (crotor + 1) % sc->sc_cache_size;
			scp = sc->sc_cache + crotor;
			if (scp->fc_type != FSS_CACHE_VALID)
				continue;
			mutex_exit(&sc->sc_slock);

			thread_idle = false;
			indirp = fss_bs_indir(sc, scp->fc_cluster);
			if (indirp != NULL) {
				error = fss_bs_io(sc, FSS_WRITE, sc->sc_clnext,
				    0, FSS_CLSIZE(sc), scp->fc_data);
			} else
				error = EIO;

			mutex_enter(&sc->sc_slock);
			if (error == 0) {
				*indirp = sc->sc_clnext++;
				sc->sc_indir_dirty = 1;
			} else
				fss_error(sc, "write error on backing store");

			scp->fc_type = FSS_CACHE_FREE;
			cv_broadcast(&sc->sc_cache_cv);
			break;
		}

		/*
		 * Process I/O requests
		 */
		if ((bp = bufq_get(sc->sc_bufq)) == NULL)
			continue;
		is_valid = FSS_ISVALID(sc);
		is_read = (bp->b_flags & B_READ);
		thread_idle = false;

		if (!is_valid) {
			mutex_exit(&sc->sc_slock);

			bp->b_error = ENXIO;
			bp->b_resid = bp->b_bcount;
			biodone(bp);

			mutex_enter(&sc->sc_slock);
			continue;
		}

		disk_busy(sc->sc_dkdev);

		/*
		 * First read from the snapshotted block device unless
		 * this request is completely covered by backing store.
		 */

		cl = FSS_BTOCL(sc, dbtob(bp->b_blkno));
		off = FSS_CLOFF(sc, dbtob(bp->b_blkno));
		ch = FSS_BTOCL(sc, dbtob(bp->b_blkno)+bp->b_bcount-1);
		error = 0;
		bp->b_resid = 0;
		bp->b_error = 0;
		for (c = cl; c <= ch; c++) {
			if (isset(sc->sc_copied, c))
				continue;
			mutex_exit(&sc->sc_slock);

			/* Not on backing store, read from device. */
			nbp = getiobuf(NULL, true);
			nbp->b_flags = B_READ;
			nbp->b_resid = nbp->b_bcount = bp->b_bcount;
			nbp->b_bufsize = bp->b_bcount;
			nbp->b_data = bp->b_data;
			nbp->b_blkno = bp->b_blkno;
			nbp->b_lblkno = 0;
			nbp->b_dev = sc->sc_bdev;
			SET(nbp->b_cflags, BC_BUSY);	/* mark buffer busy */

			bdev_strategy(nbp);

			error = biowait(nbp);
			if (error != 0) {
				bp->b_resid = bp->b_bcount;
				bp->b_error = nbp->b_error;
				disk_unbusy(sc->sc_dkdev, 0, is_read);
				biodone(bp);
			}
			putiobuf(nbp);

			mutex_enter(&sc->sc_slock);
			break;
		}
		if (error)
			continue;

		/*
		 * Replace those parts that have been saved to backing store.
		 */

		addr = bp->b_data;
		todo = bp->b_bcount;
		for (c = cl; c <= ch; c++, off = 0, todo -= len, addr += len) {
			len = FSS_CLSIZE(sc)-off;
			if (len > todo)
				len = todo;
			if (isclr(sc->sc_copied, c))
				continue;
			mutex_exit(&sc->sc_slock);

			indirp = fss_bs_indir(sc, c);
			if (indirp == NULL || *indirp == 0) {
				/*
				 * Not on backing store. Either in cache
				 * or hole in the snapshotted block device.
				 */

				mutex_enter(&sc->sc_slock);
				for (scp = sc->sc_cache; scp < scl; scp++)
					if (scp->fc_type == FSS_CACHE_VALID &&
					    scp->fc_cluster == c)
						break;
				if (scp < scl)
					memcpy(addr, (char *)scp->fc_data+off,
					    len);
				else
					memset(addr, 0, len);
				continue;
			}

			/*
			 * Read from backing store.
			 */
			error =
			    fss_bs_io(sc, FSS_READ, *indirp, off, len, addr);

			mutex_enter(&sc->sc_slock);
			if (error) {
				bp->b_resid = bp->b_bcount;
				bp->b_error = error;
				break;
			}
		}
		mutex_exit(&sc->sc_slock);

		disk_unbusy(sc->sc_dkdev, (error ? 0 : bp->b_bcount), is_read);
		biodone(bp);

		mutex_enter(&sc->sc_slock);
	}
}
Ejemplo n.º 18
0
/*
 * State machine to process read requests.
 * Initialize with MCD_S_BEGIN: calculate sizes, and set mode
 * MCD_S_WAITMODE: waits for status reply from set mode, set read command
 * MCD_S_WAITREAD: wait for read ready, read data.
 */
int
mcdintr(void *arg)
{
	struct mcd_softc *sc = arg;
	struct mcd_mbx *mbx = &sc->mbx;
	struct buf *bp = mbx->bp;
	bus_space_tag_t iot = sc->sc_iot;
	bus_space_handle_t ioh = sc->sc_ioh;

	int i;
	u_char x;
	bcd_t msf[3];

	switch (mbx->state) {
	case MCD_S_IDLE:
		return 0;

	case MCD_S_BEGIN:
	tryagain:
		if (mbx->mode == sc->lastmode)
			goto firstblock;

		sc->lastmode = MCD_MD_UNKNOWN;
		bus_space_write_1(iot, ioh, MCD_COMMAND, MCD_CMDSETMODE);
		bus_space_write_1(iot, ioh, MCD_COMMAND, mbx->mode);

		mbx->count = RDELAY_WAITMODE;
		mbx->state = MCD_S_WAITMODE;

	case MCD_S_WAITMODE:
		callout_stop(&sc->sc_pintr_ch);
		for (i = 20; i; i--) {
			x = bus_space_read_1(iot, ioh, MCD_XFER);
			if ((x & MCD_XF_STATUSUNAVAIL) == 0)
				break;
			delay(50);
		}
		if (i == 0)
			goto hold;
		sc->status = bus_space_read_1(iot, ioh, MCD_STATUS);
		mcd_setflags(sc);
		if ((sc->flags & MCDF_LOADED) == 0)
			goto changed;
		MCD_TRACE("doread: got WAITMODE delay=%d\n",
		    RDELAY_WAITMODE - mbx->count);

		sc->lastmode = mbx->mode;

	firstblock:
		MCD_TRACE("doread: read blkno=%d for bp=0x%p\n", 
		    (int) mbx->blkno, bp);

		/* Build parameter block. */
		hsg2msf(mbx->blkno, msf);

		/* Send the read command. */
		bus_space_write_1(iot, ioh, MCD_COMMAND, sc->readcmd);
		bus_space_write_1(iot, ioh, MCD_COMMAND, msf[0]);
		bus_space_write_1(iot, ioh, MCD_COMMAND, msf[1]);
		bus_space_write_1(iot, ioh, MCD_COMMAND, msf[2]);
		bus_space_write_1(iot, ioh, MCD_COMMAND, 0);
		bus_space_write_1(iot, ioh, MCD_COMMAND, 0);
		bus_space_write_1(iot, ioh, MCD_COMMAND, mbx->nblk);

		mbx->count = RDELAY_WAITREAD;
		mbx->state = MCD_S_WAITREAD;

	case MCD_S_WAITREAD:
		callout_stop(&sc->sc_pintr_ch);
	nextblock:
	loop:
		for (i = 20; i; i--) {
			x = bus_space_read_1(iot, ioh, MCD_XFER);
			if ((x & MCD_XF_DATAUNAVAIL) == 0)
				goto gotblock;
			if ((x & MCD_XF_STATUSUNAVAIL) == 0)
				break;
			delay(50);
		}
		if (i == 0)
			goto hold;
		sc->status = bus_space_read_1(iot, ioh, MCD_STATUS);
		mcd_setflags(sc);
		if ((sc->flags & MCDF_LOADED) == 0)
			goto changed;
#if 0
		printf("%s: got status byte %02x during read\n",
		    device_xname(sc->sc_dev), (u_int)sc->status);
#endif
		goto loop;

	gotblock:
		MCD_TRACE("doread: got data delay=%d\n",
		    RDELAY_WAITREAD - mbx->count);

		/* Data is ready. */
		bus_space_write_1(iot, ioh, MCD_CTL2, 0x04);	/* XXX */
		bus_space_read_multi_1(iot, ioh, MCD_RDATA,
		    (char *)bp->b_data + mbx->skip, mbx->sz);
		bus_space_write_1(iot, ioh, MCD_CTL2, 0x0c);	/* XXX */
		mbx->blkno += 1;
		mbx->skip += mbx->sz;
		if (--mbx->nblk > 0)
			goto nextblock;

		mbx->state = MCD_S_IDLE;

		/* Return buffer. */
		bp->b_resid = 0;
		disk_unbusy(&sc->sc_dk, bp->b_bcount, (bp->b_flags & B_READ));
		biodone(bp);

		mcdstart(sc);
		return 1;

	hold:
		if (mbx->count-- < 0) {
			printf("%s: timeout in state %d",
			    device_xname(sc->sc_dev), mbx->state);
			goto readerr;
		}

#if 0
		printf("%s: sleep in state %d\n", device_xname(sc->sc_dev),
		    mbx->state);
#endif
		callout_reset(&sc->sc_pintr_ch, hz / 100,
		    mcd_pseudointr, sc);
		return -1;
	}

readerr:
	if (mbx->retry-- > 0) {
		printf("; retrying\n");
		goto tryagain;
	} else
		printf("; giving up\n");

changed:
	/* Invalidate the buffer. */
	bp->b_error = EIO;
	bp->b_resid = bp->b_bcount - mbx->skip;
	disk_unbusy(&sc->sc_dk, (bp->b_bcount - bp->b_resid),
	    (bp->b_flags & B_READ));
	biodone(bp);

	mcdstart(sc);
	return -1;

#ifdef notyet
	printf("%s: unit timeout; resetting\n", device_xname(sc->sc_dev));
	bus_space_write_1(iot, ioh, MCD_RESET, MCD_CMDRESET);
	delay(300000);
	(void) mcd_getstat(sc, 1);
	(void) mcd_getstat(sc, 1);
	/*sc->status &= ~MCD_ST_DSKCHNG; */
	sc->debug = 1; /* preventive set debug mode */
#endif
}
Ejemplo n.º 19
0
Archivo: xbd.c Proyecto: MarginC/kame
static int
xbd_response_handler(void *arg)
{
	struct buf *bp;
	struct xbd_softc *xs;
	blk_ring_resp_entry_t *ring_resp;
	struct xbdreq *pxr, *xr;
	int i;

	for (i = resp_cons; i != blk_ring->resp_prod; i = BLK_RING_INC(i)) {
		ring_resp = &blk_ring->ring[MASK_BLK_IDX(i)].resp;
		xr = (struct xbdreq *)ring_resp->id;
		pxr = xr->xr_parent;

		DPRINTF(XBDB_IO, ("xbd_response_handler(%d): pxr %p xr %p "
		    "bdone %04lx breq %04lx\n", i, pxr, xr, pxr->xr_bdone,
		    xr->xr_breq));
		pxr->xr_bdone -= xr->xr_breq;
		DIAGCONDPANIC(pxr->xr_bdone < 0,
		    ("xbd_response_handler: pxr->xr_bdone < 0"));

		if (__predict_false(ring_resp->status)) {
			pxr->xr_bp->b_flags |= B_ERROR;
			pxr->xr_bp->b_error = EIO;
		}

		if (xr != pxr) {
			PUT_XBDREQ(xr);
			if (!SIMPLEQ_EMPTY(&xbdr_suspended))
				xbdresume();
		}

		if (pxr->xr_bdone == 0) {
			bp = pxr->xr_bp;
			xs = getxbd_softc(bp->b_dev);
			if (xs == NULL) { /* don't fail bp if we're shutdown */
				bp->b_flags |= B_ERROR;
				bp->b_error = EIO;
			}
			DPRINTF(XBDB_IO, ("xbd_response_handler(%d): "
			    "completed bp %p\n", i, bp));
			if (bp->b_flags & B_ERROR)
				bp->b_resid = bp->b_bcount;
			else
				bp->b_resid = 0;

			if (pxr->xr_aligned)
				unmap_align(pxr);

			PUT_XBDREQ(pxr);
			if (xs) {
				disk_unbusy(&xs->sc_dksc.sc_dkdev,
				    (bp->b_bcount - bp->b_resid),
				    (bp->b_flags & B_READ));
#if NRND > 0
				rnd_add_uint32(&xs->rnd_source,
				    bp->b_blkno);
#endif
			}
			biodone(bp);
			if (!SIMPLEQ_EMPTY(&xbdr_suspended))
				xbdresume();
			/* XXX possible lockup if this was the only
			 * active device and requests were held back in
			 * the queue.
			 */
			if (xs)
				dk_iodone(xs->sc_di, &xs->sc_dksc);
		}
	}
	resp_cons = i;
	/* check if xbdresume queued any requests */
	if (last_req_prod != req_prod)
		signal_requests_to_xen();
	return 0;
}