Ejemplo n.º 1
0
/*
 * Queue a transfer request, and if possible, hand it to the controller.
 */
void
rastrategy(struct buf *bp)
{
	struct ra_softc *ra = mscp_device_lookup(bp->b_dev);
	int b;

	/*
	 * Make sure this is a reasonable drive to use.
	 */
	if (ra == NULL) {
		bp->b_error = ENXIO;
		goto done;
	}
	/*
	 * If drive is open `raw' or reading label, let it at it.
	 */
	if (ra->ra_state == DK_RDLABEL) {
	        /* Make some statistics... /bqt */
	        b = splbio();
	        disk_busy(&ra->ra_disk);
		splx(b);
		mscp_strategy(bp, device_parent(ra->ra_dev));
		return;
	}

	/* If disk is not online, try to put it online */
	if (ra->ra_state == DK_CLOSED)
		if (ra_putonline(bp->b_dev, ra) == MSCP_FAILED) {
			bp->b_error = EIO;
			goto done;
		}

	/*
	 * Determine the size of the transfer, and make sure it is
	 * within the boundaries of the partition.
	 */
	if (bounds_check_with_label(&ra->ra_disk, bp, ra->ra_wlabel) <= 0)
		goto done;

	/* Make some statistics... /bqt */
	b = splbio();
	disk_busy(&ra->ra_disk);
	splx(b);
	mscp_strategy(bp, device_parent(ra->ra_dev));
	return;

done:
	biodone(bp);
}
Ejemplo n.º 2
0
void
__wdstart(struct wd_softc *wd, struct buf *bp)
{
	struct disklabel *lp;
	u_int64_t nsecs;

	lp = wd->sc_dk.dk_label;
	wd->sc_wdc_bio.blkno = DL_BLKTOSEC(lp, bp->b_blkno + DL_SECTOBLK(lp,
	    DL_GETPOFFSET(&lp->d_partitions[DISKPART(bp->b_dev)])));
	wd->sc_wdc_bio.blkdone =0;
	wd->sc_bp = bp;
	/*
	 * If we're retrying, retry in single-sector mode. This will give us
	 * the sector number of the problem, and will eventually allow the
	 * transfer to succeed.
	 */
	if (wd->retries >= WDIORETRIES_SINGLE)
		wd->sc_wdc_bio.flags = ATA_SINGLE;
	else
		wd->sc_wdc_bio.flags = 0;
	nsecs = howmany(bp->b_bcount, lp->d_secsize);
	if ((wd->sc_flags & WDF_LBA48) &&
	    /* use LBA48 only if really need */
	    ((wd->sc_wdc_bio.blkno + nsecs - 1 >= LBA48_THRESHOLD) ||
	     (nsecs > 0xff)))
		wd->sc_wdc_bio.flags |= ATA_LBA48;
	if (wd->sc_flags & WDF_LBA)
		wd->sc_wdc_bio.flags |= ATA_LBA;
	if (bp->b_flags & B_READ)
		wd->sc_wdc_bio.flags |= ATA_READ;
	wd->sc_wdc_bio.bcount = bp->b_bcount;
	wd->sc_wdc_bio.databuf = bp->b_data;
	wd->sc_wdc_bio.wd = wd;
	/* Instrumentation. */
	disk_busy(&wd->sc_dk);
	switch (wdc_ata_bio(wd->drvp, &wd->sc_wdc_bio)) {
	case WDC_TRY_AGAIN:
		timeout_add_sec(&wd->sc_restart_timeout, 1);
		break;
	case WDC_QUEUED:
		break;
	case WDC_COMPLETE:
		/*
		 * This code is never executed because we never set
		 * the ATA_POLL flag above
		 */
#if 0
		if (wd->sc_wdc_bio.flags & ATA_POLL)
			wddone(wd);
#endif
		break;
	default:
		panic("__wdstart: bad return code from wdc_ata_bio()");
	}
}
Ejemplo n.º 3
0
void
ofdisk_strategy(struct buf *bp)
{
	struct ofdisk_softc *of =
		device_lookup_private(&ofdisk_cd, DISKUNIT(bp->b_dev));
	struct partition *p;
	u_quad_t off;
	int read;
	int (*OF_io)(int, void *, int);
	daddr_t blkno = bp->b_blkno;

	bp->b_resid = 0;
	if (bp->b_bcount == 0)
		goto done;

	OF_io = bp->b_flags & B_READ ? OF_read : 
		(int(*)(int, void*, int))OF_write;

	if (DISKPART(bp->b_dev) != RAW_PART) {
		if (bounds_check_with_label(&of->sc_dk, bp, 0) <= 0) {
			bp->b_resid = bp->b_bcount;
			goto done;
		}
		p = &of->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
		blkno = bp->b_blkno + p->p_offset;
	}

	disk_busy(&of->sc_dk);

	off = (u_quad_t)blkno * DEV_BSIZE;
	read = -1;
	do {
		if (OF_seek(of->sc_ihandle, off) < 0)
			break;
		read = OF_io(of->sc_ihandle, bp->b_data, bp->b_bcount);
	} while (read == -2);

	if (read < 0) {
		bp->b_error = EIO;
		bp->b_resid = bp->b_bcount;
	} else
		bp->b_resid = bp->b_bcount - read;

	disk_unbusy(&of->sc_dk, bp->b_bcount - bp->b_resid,
	    (bp->b_flags & B_READ));

done:
	biodone(bp);
}
Ejemplo n.º 4
0
void
mcdstart(struct mcd_softc *sc)
{
	struct buf *bp;
	int s;

loop:
	s = splbio();

	if ((bp = bufq_get(sc->buf_queue)) == NULL) {
		/* Nothing to do. */
		sc->active = 0;
		splx(s);
		return;
	}

	/* Block found to process. */
	MCD_TRACE("start: found block bp=0x%p\n", bp);
	splx(s);

	/* Changed media? */
	if ((sc->flags & MCDF_LOADED) == 0) {
		MCD_TRACE("start: drive not valid%s", "\n");
		bp->b_error = EIO;
		biodone(bp);
		goto loop;
	}

	sc->active = 1;

	/* Instrumentation. */
	s = splbio();
	disk_busy(&sc->sc_dk);
	splx(s);

	sc->mbx.retry = MCD_RDRETRIES;
	sc->mbx.bp = bp;
	sc->mbx.blkno = bp->b_rawblkno;
	sc->mbx.nblk = bp->b_bcount / sc->blksize;
	sc->mbx.sz = sc->blksize;
	sc->mbx.skip = 0;
	sc->mbx.state = MCD_S_BEGIN;
	sc->mbx.mode = MCD_MD_COOKED;

	s = splbio();
	(void) mcdintr(sc);
	splx(s);
}
Ejemplo n.º 5
0
/*
 * Queue a transfer request, and if possible, hand it to the controller.
 *
 * This routine is broken into two so that the internal version
 * udastrat1() can be called by the (nonexistent, as yet) bad block
 * revectoring routine.
 */
void
rxstrategy(struct buf *bp)
{
	int unit;
	struct rx_softc *rx;
	int b;

	/*
	 * Make sure this is a reasonable drive to use.
	 */
	unit = DISKUNIT(bp->b_dev);
	if ((rx = device_lookup_private(&rx_cd, unit)) == NULL) {
		bp->b_error = ENXIO;
		goto done;
	}

	/* If disk is not online, try to put it online */
	if (rx->ra_state == DK_CLOSED)
		if (rx_putonline(rx) == MSCP_FAILED) {
			bp->b_error = EIO;
			goto done;
		}

	/*
	 * Determine the size of the transfer, and make sure it is
	 * within the boundaries of the partition.
	 */
	if (bp->b_blkno >= rx->ra_disk.dk_label->d_secperunit) {
		bp->b_resid = bp->b_bcount;
		goto done;
	}

	/* Make some statistics... /bqt */
	b = splbio();
	disk_busy(&rx->ra_disk);
	splx(b);
	mscp_strategy(bp, device_parent(rx->ra_dev));
	return;

done:
	biodone(bp);
}
Ejemplo n.º 6
0
/*
 * cdstart looks to see if there is a buf waiting for the device
 * and that the device is not already busy. If both are true,
 * It deques the buf and creates a scsi command to perform the
 * transfer in the buf. The transfer request will call scsi_done
 * on completion, which will in turn call this routine again
 * so that the next queued transfer is performed.
 * The bufs are queued by the strategy routine (cdstrategy)
 *
 * This routine is also called after other non-queued requests
 * have been made of the scsi driver, to ensure that the queue
 * continues to be drained.
 *
 * must be called at the correct (highish) spl level
 * cdstart() is called at splbio from cdstrategy, cdrestart and scsi_done
 */
void
cdstart(void *v)
{
	struct cd_softc *cd = v;
	struct scsi_link *sc_link = cd->sc_link;
	struct buf *bp = 0;
	struct buf *dp;
	struct scsi_rw_big cmd_big;
	struct scsi_rw cmd_small;
	struct scsi_generic *cmdp;
	int blkno, nblks, cmdlen, error;
	struct partition *p;

	splassert(IPL_BIO);

	SC_DEBUG(sc_link, SDEV_DB2, ("cdstart\n"));
	/*
	 * Check if the device has room for another command
	 */
	while (sc_link->openings > 0) {
		/*
		 * there is excess capacity, but a special waits
		 * It'll need the adapter as soon as we clear out of the
		 * way and let it run (user level wait).
		 */
		if (sc_link->flags & SDEV_WAITING) {
			sc_link->flags &= ~SDEV_WAITING;
			wakeup((caddr_t)sc_link);
			return;
		}

		/*
		 * See if there is a buf with work for us to do..
		 */
		dp = &cd->buf_queue;
		if ((bp = dp->b_actf) == NULL)	/* yes, an assign */
			return;
		dp->b_actf = bp->b_actf;

		/*
		 * If the device has become invalid, abort all the
		 * reads and writes until all files have been closed and
		 * re-opened
		 */
		if ((sc_link->flags & SDEV_MEDIA_LOADED) == 0) {
			bp->b_error = EIO;
			bp->b_flags |= B_ERROR;
			bp->b_resid = bp->b_bcount;
			biodone(bp);
			continue;
		}

		/*
		 * We have a buf, now we should make a command
		 *
		 * First, translate the block to absolute and put it in terms
		 * of the logical blocksize of the device.
		 */
		blkno =
		    bp->b_blkno / (cd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
		p = &cd->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
		blkno += DL_GETPOFFSET(p);
		nblks = howmany(bp->b_bcount, cd->sc_dk.dk_label->d_secsize);

		/*
		 *  Fill out the scsi command.  If the transfer will
		 *  fit in a "small" cdb, use it.
		 */
		if (!(sc_link->flags & SDEV_ATAPI) &&
		    !(sc_link->quirks & SDEV_ONLYBIG) && 
		    ((blkno & 0x1fffff) == blkno) &&
		    ((nblks & 0xff) == nblks)) {
			/*
			 * We can fit in a small cdb.
			 */
			bzero(&cmd_small, sizeof(cmd_small));
			cmd_small.opcode = (bp->b_flags & B_READ) ?
			    READ_COMMAND : WRITE_COMMAND;
			_lto3b(blkno, cmd_small.addr);
			cmd_small.length = nblks & 0xff;
			cmdlen = sizeof(cmd_small);
			cmdp = (struct scsi_generic *)&cmd_small;
		} else {
			/*
			 * Need a large cdb.
			 */
			bzero(&cmd_big, sizeof(cmd_big));
			cmd_big.opcode = (bp->b_flags & B_READ) ?
			    READ_BIG : WRITE_BIG;
			_lto4b(blkno, cmd_big.addr);
			_lto2b(nblks, cmd_big.length);
			cmdlen = sizeof(cmd_big);
			cmdp = (struct scsi_generic *)&cmd_big;
		}

		/* Instrumentation. */
		disk_busy(&cd->sc_dk);

		/*
		 * Call the routine that chats with the adapter.
		 * Note: we cannot sleep as we may be an interrupt
		 */
		error = scsi_scsi_cmd(sc_link, cmdp, cmdlen,
		    (u_char *) bp->b_data, bp->b_bcount, SCSI_RETRIES, 30000,
		    bp, SCSI_NOSLEEP | ((bp->b_flags & B_READ) ? SCSI_DATA_IN :
		    SCSI_DATA_OUT));
		switch (error) {
		case 0:
			timeout_del(&cd->sc_timeout);
			break;
		case EAGAIN:
			/*
			 * The device can't start another i/o. Try again later.
			 */
			dp->b_actf = bp;
			disk_unbusy(&cd->sc_dk, 0, 0);
			timeout_add(&cd->sc_timeout, 1);
			return;
		default:
			disk_unbusy(&cd->sc_dk, 0, 0);
			printf("%s: not queued, error %d\n",
			    cd->sc_dev.dv_xname, error);
			break;
		}
	}
}
Ejemplo n.º 7
0
void
dk_start(struct dk_softc *dksc, struct buf *bp)
{
	const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver;
	int error;

	if (!(dksc->sc_flags & DKF_INITED)) {
		DPRINTF_FOLLOW(("%s: not inited\n", __func__));
		return;
	}

	mutex_enter(&dksc->sc_iolock);

	if (bp != NULL)
		bufq_put(dksc->sc_bufq, bp);

	if (dksc->sc_busy)
		goto done;
	dksc->sc_busy = true;

	/*
	 * Peeking at the buffer queue and committing the operation
	 * only after success isn't atomic.
	 *
	 * So when a diskstart fails, the buffer is saved
	 * and tried again before the next buffer is fetched.
	 * dk_drain() handles flushing of a saved buffer.
	 *
	 * This keeps order of I/O operations, unlike bufq_put.
	 */

	bp = dksc->sc_deferred;
	dksc->sc_deferred = NULL;

	if (bp == NULL)
		bp = bufq_get(dksc->sc_bufq);

	while (bp != NULL) {

		disk_busy(&dksc->sc_dkdev);
		mutex_exit(&dksc->sc_iolock);
		error = dkd->d_diskstart(dksc->sc_dev, bp);
		mutex_enter(&dksc->sc_iolock);
		if (error == EAGAIN) {
			dksc->sc_deferred = bp;
			disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ));
			break;
		}

		if (error != 0) {
			bp->b_error = error;
			bp->b_resid = bp->b_bcount;
			dk_done1(dksc, bp, false);
		}

		bp = bufq_get(dksc->sc_bufq);
	}

	dksc->sc_busy = false;
done:
	mutex_exit(&dksc->sc_iolock);
}
Ejemplo n.º 8
0
/*
 * sdstart looks to see if there is a buf waiting for the device
 * and that the device is not already busy. If both are true,
 * It dequeues the buf and creates a scsi command to perform the
 * transfer in the buf. The transfer request will call scsi_done
 * on completion, which will in turn call this routine again
 * so that the next queued transfer is performed.
 * The bufs are queued by the strategy routine (sdstrategy)
 *
 * This routine is also called after other non-queued requests
 * have been made of the scsi driver, to ensure that the queue
 * continues to be drained.
 */
void
sdstart(struct scsi_xfer *xs)
{
	struct scsi_link *link = xs->sc_link;
	struct sd_softc *sc = link->device_softc;
	struct buf *bp;
	u_int64_t secno;
	int nsecs;
	int read;
	struct partition *p;

	if (sc->flags & SDF_DYING) {
		scsi_xs_put(xs);
		return;
	}
	if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
		bufq_drain(&sc->sc_bufq);
		scsi_xs_put(xs);
		return;
	}

	bp = bufq_dequeue(&sc->sc_bufq);
	if (bp == NULL) {
		scsi_xs_put(xs);
		return;
	}

	secno = DL_BLKTOSEC(sc->sc_dk.dk_label, bp->b_blkno);

	p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
	secno += DL_GETPOFFSET(p);
	nsecs = howmany(bp->b_bcount, sc->sc_dk.dk_label->d_secsize);
	read = bp->b_flags & B_READ;

	/*
	 *  Fill out the scsi command.  If the transfer will
	 *  fit in a "small" cdb, use it.
	 */
	if (!(link->flags & SDEV_ATAPI) &&
	    !(link->quirks & SDEV_ONLYBIG) &&
	    ((secno & 0x1fffff) == secno) &&
	    ((nsecs & 0xff) == nsecs))
		sd_cmd_rw6(xs, read, secno, nsecs);
	else if (((secno & 0xffffffff) == secno) &&
	    ((nsecs & 0xffff) == nsecs))
		sd_cmd_rw10(xs, read, secno, nsecs);
	else if (((secno & 0xffffffff) == secno) &&
	    ((nsecs & 0xffffffff) == nsecs))
		sd_cmd_rw12(xs, read, secno, nsecs);
	else
		sd_cmd_rw16(xs, read, secno, nsecs);

	xs->flags |= (read ? SCSI_DATA_IN : SCSI_DATA_OUT);
	xs->timeout = 60000;
	xs->data = bp->b_data;
	xs->datalen = bp->b_bcount;

	xs->done = sd_buf_done;
	xs->cookie = bp;
	xs->bp = bp;

	/* Instrumentation. */
	disk_busy(&sc->sc_dk);

	/* Mark disk as dirty. */
	if (!read)
		sc->flags |= SDF_DIRTY;

	scsi_xs_exec(xs);

	/* move onto the next io */
	if (ISSET(sc->flags, SDF_WAITING))
		CLR(sc->flags, SDF_WAITING);
	else if (bufq_peek(&sc->sc_bufq))
		scsi_xsh_add(&sc->sc_xsh);
}
Ejemplo n.º 9
0
static int
fdcintr1(struct fdc_softc *fdc)
{
#define	st0	fdc->sc_status[0]
#define	cyl	fdc->sc_status[1]
	struct fd_softc *fd;
	struct buf *bp;
	bus_space_tag_t iot = fdc->sc_iot;
	bus_space_handle_t ioh = fdc->sc_ioh;
	int read, head, sec, i, nblks;
	struct fd_type *type;
	struct ne7_fd_formb *finfo = NULL;

	KASSERT(mutex_owned(&fdc->sc_mtx));
	if (fdc->sc_state == PROBING) {
#ifdef DEBUG
		printf("fdcintr: got probe interrupt\n");
#endif
		fdc->sc_probe++;
		goto out;
	}

loop:
	/* Is there a drive for the controller to do a transfer with? */
	fd = TAILQ_FIRST(&fdc->sc_drives);
	if (fd == NULL) {
		fdc->sc_state = DEVIDLE;
 		goto out;
	}

	/* Is there a transfer to this drive?  If not, deactivate drive. */
	bp = BUFQ_PEEK(fd->sc_q);
	if (bp == NULL) {
		fd->sc_ops = 0;
		TAILQ_REMOVE(&fdc->sc_drives, fd, sc_drivechain);
		fd->sc_active = 0;
		goto loop;
	}

	if (bp->b_flags & B_FORMAT)
		finfo = (struct ne7_fd_formb *)bp->b_data;

	switch (fdc->sc_state) {
	case DEVIDLE:
		fdc->sc_errors = 0;
		fd->sc_skip = 0;
		fd->sc_bcount = bp->b_bcount;
		fd->sc_blkno = bp->b_blkno / (FDC_BSIZE / DEV_BSIZE);
		callout_stop(&fd->sc_motoroff_ch);
		if ((fd->sc_flags & FD_MOTOR_WAIT) != 0) {
			fdc->sc_state = MOTORWAIT;
			return 1;
		}
		if ((fd->sc_flags & FD_MOTOR) == 0) {
			/* Turn on the motor, being careful about pairing. */
			struct fd_softc *ofd = fdc->sc_fd[fd->sc_drive ^ 1];
			if (ofd && ofd->sc_flags & FD_MOTOR) {
				callout_stop(&ofd->sc_motoroff_ch);
				ofd->sc_flags &= ~(FD_MOTOR | FD_MOTOR_WAIT);
			}
			fd->sc_flags |= FD_MOTOR | FD_MOTOR_WAIT;
			fd_set_motor(fdc, 0);
			fdc->sc_state = MOTORWAIT;
			/* Allow .25s for motor to stabilize. */
			callout_reset(&fd->sc_motoron_ch, hz / 4,
			    fd_motor_on, fd);
			return 1;
		}
		/* Make sure the right drive is selected. */
		fd_set_motor(fdc, 0);

		/* fall through */
	case DOSEEK:
	doseek:
		if (fd->sc_cylin == bp->b_cylinder)
			goto doio;

		out_fdc(iot, ioh, NE7CMD_SPECIFY);/* specify command */
		out_fdc(iot, ioh, fd->sc_type->steprate);
		out_fdc(iot, ioh, 6);		/* XXX head load time == 6ms */

		out_fdc(iot, ioh, NE7CMD_SEEK);	/* seek function */
		out_fdc(iot, ioh, fd->sc_drive); /* drive number */
		out_fdc(iot, ioh, bp->b_cylinder * fd->sc_type->step);

		fd->sc_cylin = -1;
		fdc->sc_state = SEEKWAIT;

		iostat_seek(fd->sc_dk.dk_stats);
		disk_busy(&fd->sc_dk);

		callout_reset(&fdc->sc_timo_ch, 4 * hz, fdctimeout, fdc);
		return 1;

	case DOIO:
	doio:
		type = fd->sc_type;
		if (finfo)
			fd->sc_skip = (char *)&(finfo->fd_formb_cylno(0)) -
				      (char *)finfo;
		sec = fd->sc_blkno % type->seccyl;
		nblks = type->seccyl - sec;
		nblks = min(nblks, fd->sc_bcount / FDC_BSIZE);
		nblks = min(nblks, fdc->sc_maxiosize / FDC_BSIZE);
		fd->sc_nblks = nblks;
		fd->sc_nbytes = finfo ? bp->b_bcount : nblks * FDC_BSIZE;
		head = sec / type->sectrac;
		sec -= head * type->sectrac;
#ifdef DIAGNOSTIC
		{
			int block;
			block = (fd->sc_cylin * type->heads + head)
			    * type->sectrac + sec;
			if (block != fd->sc_blkno) {
				printf("fdcintr: block %d != blkno "
				    "%" PRId64 "\n", block, fd->sc_blkno);
#ifdef DDB
				 Debugger();
#endif
			}
		}
#endif
		read = bp->b_flags & B_READ ? DMAMODE_READ : DMAMODE_WRITE;
		isa_dmastart(fdc->sc_ic, fdc->sc_drq,
		    (char *)bp->b_data + fd->sc_skip, fd->sc_nbytes,
		    NULL, read | DMAMODE_DEMAND, BUS_DMA_NOWAIT);
		bus_space_write_1(iot, fdc->sc_fdctlioh, 0, type->rate);
#ifdef FD_DEBUG
		printf("fdcintr: %s drive %d track %d head %d sec %d nblks %d\n",
			read ? "read" : "write", fd->sc_drive, fd->sc_cylin,
			head, sec, nblks);
#endif
		if (finfo) {
			/* formatting */
			if (out_fdc(iot, ioh, NE7CMD_FORMAT) < 0) {
				fdc->sc_errors = 4;
				fdcretry(fdc);
				goto loop;
			}
			out_fdc(iot, ioh, (head << 2) | fd->sc_drive);
			out_fdc(iot, ioh, finfo->fd_formb_secshift);
			out_fdc(iot, ioh, finfo->fd_formb_nsecs);
			out_fdc(iot, ioh, finfo->fd_formb_gaplen);
			out_fdc(iot, ioh, finfo->fd_formb_fillbyte);
		} else {
			if (read)
				out_fdc(iot, ioh, NE7CMD_READ);	/* READ */
			else
				out_fdc(iot, ioh, NE7CMD_WRITE); /* WRITE */
			out_fdc(iot, ioh, (head << 2) | fd->sc_drive);
			out_fdc(iot, ioh, fd->sc_cylin); /* track */
			out_fdc(iot, ioh, head);
			out_fdc(iot, ioh, sec + 1);	 /* sector +1 */
			out_fdc(iot, ioh, type->secsize);/* sector size */
			out_fdc(iot, ioh, type->sectrac);/* sectors/track */
			out_fdc(iot, ioh, type->gap1);	 /* gap1 size */
			out_fdc(iot, ioh, type->datalen);/* data length */
		}
		fdc->sc_state = IOCOMPLETE;

		disk_busy(&fd->sc_dk);

		/* allow 2 seconds for operation */
		callout_reset(&fdc->sc_timo_ch, 2 * hz, fdctimeout, fdc);
		return 1;				/* will return later */

	case SEEKWAIT:
		callout_stop(&fdc->sc_timo_ch);
		fdc->sc_state = SEEKCOMPLETE;
		/* allow 1/50 second for heads to settle */
		callout_reset(&fdc->sc_intr_ch, hz / 50, fdcintrcb, fdc);
		return 1;

	case SEEKCOMPLETE:
		/* no data on seek */
		disk_unbusy(&fd->sc_dk, 0, 0);

		/* Make sure seek really happened. */
		out_fdc(iot, ioh, NE7CMD_SENSEI);
		if (fdcresult(fdc) != 2 || (st0 & 0xf8) != 0x20 ||
		    cyl != bp->b_cylinder * fd->sc_type->step) {
#ifdef FD_DEBUG
			fdcstatus(fd->sc_dev, 2, "seek failed");
#endif
			fdcretry(fdc);
			goto loop;
		}
		fd->sc_cylin = bp->b_cylinder;
		goto doio;

	case IOTIMEDOUT:
		isa_dmaabort(fdc->sc_ic, fdc->sc_drq);
	case SEEKTIMEDOUT:
	case RECALTIMEDOUT:
	case RESETTIMEDOUT:
		fdcretry(fdc);
		goto loop;

	case IOCOMPLETE: /* IO DONE, post-analyze */
		callout_stop(&fdc->sc_timo_ch);

		disk_unbusy(&fd->sc_dk, (bp->b_bcount - bp->b_resid),
		    (bp->b_flags & B_READ));

		if (fdcresult(fdc) != 7 || (st0 & 0xf8) != 0) {
			isa_dmaabort(fdc->sc_ic, fdc->sc_drq);
#ifdef FD_DEBUG
			fdcstatus(fd->sc_dev, 7, bp->b_flags & B_READ ?
			    "read failed" : "write failed");
			printf("blkno %llu nblks %d\n",
			    (unsigned long long)fd->sc_blkno, fd->sc_nblks);
#endif
			fdcretry(fdc);
			goto loop;
		}
		isa_dmadone(fdc->sc_ic, fdc->sc_drq);
		if (fdc->sc_errors) {
			diskerr(bp, "fd", "soft error (corrected)", LOG_PRINTF,
			    fd->sc_skip / FDC_BSIZE, NULL);
			printf("\n");
			fdc->sc_errors = 0;
		}
		fd->sc_blkno += fd->sc_nblks;
		fd->sc_skip += fd->sc_nbytes;
		fd->sc_bcount -= fd->sc_nbytes;
		if (!finfo && fd->sc_bcount > 0) {
			bp->b_cylinder = fd->sc_blkno / fd->sc_type->seccyl;
			goto doseek;
		}
		fdfinish(fd, bp);
		goto loop;

	case DORESET:
		/* try a reset, keep motor on */
		fd_set_motor(fdc, 1);
		delay(100);
		fd_set_motor(fdc, 0);
		fdc->sc_state = RESETCOMPLETE;
		callout_reset(&fdc->sc_timo_ch, hz / 2, fdctimeout, fdc);
		return 1;			/* will return later */

	case RESETCOMPLETE:
		callout_stop(&fdc->sc_timo_ch);
		/* clear the controller output buffer */
		for (i = 0; i < 4; i++) {
			out_fdc(iot, ioh, NE7CMD_SENSEI);
			(void) fdcresult(fdc);
		}

		/* fall through */
	case DORECAL:
		out_fdc(iot, ioh, NE7CMD_RECAL); /* recalibrate function */
		out_fdc(iot, ioh, fd->sc_drive);
		fdc->sc_state = RECALWAIT;
		callout_reset(&fdc->sc_timo_ch, 5 * hz, fdctimeout, fdc);
		return 1;			/* will return later */

	case RECALWAIT:
		callout_stop(&fdc->sc_timo_ch);
		fdc->sc_state = RECALCOMPLETE;
		/* allow 1/30 second for heads to settle */
		callout_reset(&fdc->sc_intr_ch, hz / 30, fdcintrcb, fdc);
		return 1;			/* will return later */

	case RECALCOMPLETE:
		out_fdc(iot, ioh, NE7CMD_SENSEI);
		if (fdcresult(fdc) != 2 || (st0 & 0xf8) != 0x20 || cyl != 0) {
#ifdef FD_DEBUG
			fdcstatus(fd->sc_dev, 2, "recalibrate failed");
#endif
			fdcretry(fdc);
			goto loop;
		}
		fd->sc_cylin = 0;
		goto doseek;

	case MOTORWAIT:
		if (fd->sc_flags & FD_MOTOR_WAIT)
			return 1;		/* time's not up yet */
		goto doseek;

	default:
		fdcstatus(fd->sc_dev, 0, "stray interrupt");
		return 1;
	}
#undef	st0
#undef	cyl

out:
	cv_signal(&fdc->sc_cv);
	return 1;
}
Ejemplo n.º 10
0
void
ccdstart(struct ccd_softc *cs, struct buf *bp)
{
	long bcount, rcount;
	struct ccdbuf **cbpp, *cbp;
	caddr_t addr;
	daddr_t bn;
	struct partition *pp;
	int i, old_io = cs->sc_cflags & CCDF_OLD;

	CCD_DPRINTF(CCDB_FOLLOW, ("ccdstart(%p, %p, %s)\n", cs, bp,
	    bp->b_flags & B_READ? "read" : "write"));

	/* Instrumentation. */
	disk_busy(&cs->sc_dkdev);

	/*
	 * Translate the partition-relative block number to an absolute.
	 */
	bn = bp->b_blkno;
	if (DISKPART(bp->b_dev) != RAW_PART) {
		pp = &cs->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)];
		bn += pp->p_offset;
	}

	/*
	 * Allocate component buffers
	 */
	cbpp = malloc(2 * cs->sc_nccdisks * sizeof(struct ccdbuf *), M_DEVBUF,
	    M_WAITOK);
	bzero(cbpp, 2 * cs->sc_nccdisks * sizeof(struct ccdbuf *));
	addr = bp->b_data;
	old_io = old_io || ((vaddr_t)addr & PAGE_MASK);
	for (bcount = bp->b_bcount; bcount > 0; bcount -= rcount) {
		rcount = ccdbuffer(cs, bp, bn, addr, bcount, cbpp, old_io);
		
		/*
		 * This is the old, slower, but less restrictive, mode of
		 * operation.  It allows interleaves which are not multiples
		 * of PAGE_SIZE and mirroring.
		 */
		if (old_io) {
			if ((cbpp[0]->cb_buf.b_flags & B_READ) == 0)
				cbpp[0]->cb_buf.b_vp->v_numoutput++;
			VOP_STRATEGY(&cbpp[0]->cb_buf);

			if ((cs->sc_cflags & CCDF_MIRROR) &&
			    ((cbpp[0]->cb_buf.b_flags & B_READ) == 0)) {
				cbpp[1]->cb_buf.b_vp->v_numoutput++;
				VOP_STRATEGY(&cbpp[1]->cb_buf);
			}
		}

		bn += btodb(rcount);
		addr += rcount;
	}

	/* The new leaner mode of operation */
	if (!old_io)
		/*
		 * Fire off the requests
		 */
		for (i = 0; i < 2*cs->sc_nccdisks; i++) {
			cbp = cbpp[i];
			if (cbp) {
				if ((cbp->cb_buf.b_flags & B_READ) == 0)
					cbp->cb_buf.b_vp->v_numoutput++;
				VOP_STRATEGY(&cbp->cb_buf);
			}
		}
	free(cbpp, M_DEVBUF);
}
Ejemplo n.º 11
0
/*
 * sdstart looks to see if there is a buf waiting for the device
 * and that the device is not already busy. If both are true,
 * It dequeues the buf and creates a scsi command to perform the
 * transfer in the buf. The transfer request will call scsi_done
 * on completion, which will in turn call this routine again
 * so that the next queued transfer is performed.
 * The bufs are queued by the strategy routine (sdstrategy)
 *
 * This routine is also called after other non-queued requests
 * have been made of the scsi driver, to ensure that the queue
 * continues to be drained.
 */
void
sdstart(void *v)
{
    struct sd_softc *sc = (struct sd_softc *)v;
    struct scsi_link *link = sc->sc_link;
    struct scsi_xfer *xs;
    struct buf *bp;
    daddr64_t blkno;
    int nblks;
    int read;
    struct partition *p;
    int s;

    if (sc->flags & SDF_DYING)
        return;

    SC_DEBUG(link, SDEV_DB2, ("sdstart\n"));

    mtx_enter(&sc->sc_start_mtx);
    if (ISSET(sc->flags, SDF_STARTING)) {
        mtx_leave(&sc->sc_start_mtx);
        return;
    }

    SET(sc->flags, SDF_STARTING);
    mtx_leave(&sc->sc_start_mtx);

    CLR(sc->flags, SDF_WAITING);
    while (!ISSET(sc->flags, SDF_WAITING) &&
            (bp = sd_buf_dequeue(sc)) != NULL) {
        /*
         * If the device has become invalid, abort all the
         * reads and writes until all files have been closed and
         * re-opened
         */
        if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
            bp->b_error = EIO;
            bp->b_flags |= B_ERROR;
            bp->b_resid = bp->b_bcount;
            s = splbio();
            biodone(bp);
            splx(s);
            continue;
        }

        xs = scsi_xs_get(link, SCSI_NOSLEEP);
        if (xs == NULL) {
            sd_buf_requeue(sc, bp);
            break;
        }

        blkno =
            bp->b_blkno / (sc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
        p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
        blkno += DL_GETPOFFSET(p);
        nblks = howmany(bp->b_bcount, sc->sc_dk.dk_label->d_secsize);
        read = bp->b_flags & B_READ;

        /*
         *  Fill out the scsi command.  If the transfer will
         *  fit in a "small" cdb, use it.
         */
        if (!(link->flags & SDEV_ATAPI) &&
                !(link->quirks & SDEV_ONLYBIG) &&
                ((blkno & 0x1fffff) == blkno) &&
                ((nblks & 0xff) == nblks))
            sd_cmd_rw6(xs, read, blkno, nblks);
        else if (((blkno & 0xffffffff) == blkno) &&
                 ((nblks & 0xffff) == nblks))
            sd_cmd_rw10(xs, read, blkno, nblks);
        else if (((blkno & 0xffffffff) == blkno) &&
                 ((nblks & 0xffffffff) == nblks))
            sd_cmd_rw12(xs, read, blkno, nblks);
        else
            sd_cmd_rw16(xs, read, blkno, nblks);

        xs->flags |= (read ? SCSI_DATA_IN : SCSI_DATA_OUT);
        xs->timeout = 60000;
        xs->data = bp->b_data;
        xs->datalen = bp->b_bcount;

        xs->done = sd_buf_done;
        xs->cookie = bp;

        /* Instrumentation. */
        disk_busy(&sc->sc_dk);

        /* Mark disk as dirty. */
        if ((bp->b_flags & B_READ) == 0)
            sc->flags |= SDF_DIRTY;

        scsi_xs_exec(xs);
    }

    mtx_enter(&sc->sc_start_mtx);
    CLR(sc->flags, SDF_STARTING);
    mtx_leave(&sc->sc_start_mtx);
}
Ejemplo n.º 12
0
/*
 * The kernel thread (one for every active snapshot).
 *
 * After wakeup it cleans the cache and runs the I/O requests.
 */
static void
fss_bs_thread(void *arg)
{
	bool thread_idle, is_valid;
	int error, i, todo, len, crotor, is_read;
	long off;
	char *addr;
	u_int32_t c, cl, ch, *indirp;
	struct buf *bp, *nbp;
	struct fss_softc *sc;
	struct fss_cache *scp, *scl;

	sc = arg;
	scl = sc->sc_cache+sc->sc_cache_size;
	crotor = 0;
	thread_idle = false;

	mutex_enter(&sc->sc_slock);

	for (;;) {
		if (thread_idle)
			cv_wait(&sc->sc_work_cv, &sc->sc_slock);
		thread_idle = true;
		if ((sc->sc_flags & FSS_BS_THREAD) == 0) {
			mutex_exit(&sc->sc_slock);
			kthread_exit(0);
		}

		/*
		 * Process I/O requests (persistent)
		 */

		if (sc->sc_flags & FSS_PERSISTENT) {
			if ((bp = bufq_get(sc->sc_bufq)) == NULL)
				continue;
			is_valid = FSS_ISVALID(sc);
			is_read = (bp->b_flags & B_READ);
			thread_idle = false;
			mutex_exit(&sc->sc_slock);

			if (is_valid) {
				disk_busy(sc->sc_dkdev);
				error = fss_bs_io(sc, FSS_READ, 0,
				    dbtob(bp->b_blkno), bp->b_bcount,
				    bp->b_data);
				disk_unbusy(sc->sc_dkdev,
				    (error ? 0 : bp->b_bcount), is_read);
			} else
				error = ENXIO;

			bp->b_error = error;
			bp->b_resid = (error ? bp->b_bcount : 0);
			biodone(bp);

			mutex_enter(&sc->sc_slock);
			continue;
		}

		/*
		 * Clean the cache
		 */
		for (i = 0; i < sc->sc_cache_size; i++) {
			crotor = (crotor + 1) % sc->sc_cache_size;
			scp = sc->sc_cache + crotor;
			if (scp->fc_type != FSS_CACHE_VALID)
				continue;
			mutex_exit(&sc->sc_slock);

			thread_idle = false;
			indirp = fss_bs_indir(sc, scp->fc_cluster);
			if (indirp != NULL) {
				error = fss_bs_io(sc, FSS_WRITE, sc->sc_clnext,
				    0, FSS_CLSIZE(sc), scp->fc_data);
			} else
				error = EIO;

			mutex_enter(&sc->sc_slock);
			if (error == 0) {
				*indirp = sc->sc_clnext++;
				sc->sc_indir_dirty = 1;
			} else
				fss_error(sc, "write error on backing store");

			scp->fc_type = FSS_CACHE_FREE;
			cv_broadcast(&sc->sc_cache_cv);
			break;
		}

		/*
		 * Process I/O requests
		 */
		if ((bp = bufq_get(sc->sc_bufq)) == NULL)
			continue;
		is_valid = FSS_ISVALID(sc);
		is_read = (bp->b_flags & B_READ);
		thread_idle = false;

		if (!is_valid) {
			mutex_exit(&sc->sc_slock);

			bp->b_error = ENXIO;
			bp->b_resid = bp->b_bcount;
			biodone(bp);

			mutex_enter(&sc->sc_slock);
			continue;
		}

		disk_busy(sc->sc_dkdev);

		/*
		 * First read from the snapshotted block device unless
		 * this request is completely covered by backing store.
		 */

		cl = FSS_BTOCL(sc, dbtob(bp->b_blkno));
		off = FSS_CLOFF(sc, dbtob(bp->b_blkno));
		ch = FSS_BTOCL(sc, dbtob(bp->b_blkno)+bp->b_bcount-1);
		error = 0;
		bp->b_resid = 0;
		bp->b_error = 0;
		for (c = cl; c <= ch; c++) {
			if (isset(sc->sc_copied, c))
				continue;
			mutex_exit(&sc->sc_slock);

			/* Not on backing store, read from device. */
			nbp = getiobuf(NULL, true);
			nbp->b_flags = B_READ;
			nbp->b_resid = nbp->b_bcount = bp->b_bcount;
			nbp->b_bufsize = bp->b_bcount;
			nbp->b_data = bp->b_data;
			nbp->b_blkno = bp->b_blkno;
			nbp->b_lblkno = 0;
			nbp->b_dev = sc->sc_bdev;
			SET(nbp->b_cflags, BC_BUSY);	/* mark buffer busy */

			bdev_strategy(nbp);

			error = biowait(nbp);
			if (error != 0) {
				bp->b_resid = bp->b_bcount;
				bp->b_error = nbp->b_error;
				disk_unbusy(sc->sc_dkdev, 0, is_read);
				biodone(bp);
			}
			putiobuf(nbp);

			mutex_enter(&sc->sc_slock);
			break;
		}
		if (error)
			continue;

		/*
		 * Replace those parts that have been saved to backing store.
		 */

		addr = bp->b_data;
		todo = bp->b_bcount;
		for (c = cl; c <= ch; c++, off = 0, todo -= len, addr += len) {
			len = FSS_CLSIZE(sc)-off;
			if (len > todo)
				len = todo;
			if (isclr(sc->sc_copied, c))
				continue;
			mutex_exit(&sc->sc_slock);

			indirp = fss_bs_indir(sc, c);
			if (indirp == NULL || *indirp == 0) {
				/*
				 * Not on backing store. Either in cache
				 * or hole in the snapshotted block device.
				 */

				mutex_enter(&sc->sc_slock);
				for (scp = sc->sc_cache; scp < scl; scp++)
					if (scp->fc_type == FSS_CACHE_VALID &&
					    scp->fc_cluster == c)
						break;
				if (scp < scl)
					memcpy(addr, (char *)scp->fc_data+off,
					    len);
				else
					memset(addr, 0, len);
				continue;
			}

			/*
			 * Read from backing store.
			 */
			error =
			    fss_bs_io(sc, FSS_READ, *indirp, off, len, addr);

			mutex_enter(&sc->sc_slock);
			if (error) {
				bp->b_resid = bp->b_bcount;
				bp->b_error = error;
				break;
			}
		}
		mutex_exit(&sc->sc_slock);

		disk_unbusy(sc->sc_dkdev, (error ? 0 : bp->b_bcount), is_read);
		biodone(bp);

		mutex_enter(&sc->sc_slock);
	}
}
Ejemplo n.º 13
0
Archivo: xbd.c Proyecto: MarginC/kame
static int
xbdstart(struct dk_softc *dksc, struct buf *bp)
{
	struct	xbd_softc *xs;
	struct xbdreq *pxr, *xr;
	struct	partition *pp;
	daddr_t	bn;
	int ret, runqueue;

	DPRINTF_FOLLOW(("xbdstart(%p, %p)\n", dksc, bp));

	runqueue = 1;
	ret = -1;

	xs = getxbd_softc(bp->b_dev);
	if (xs == NULL || xs->sc_shutdown) {
		bp->b_flags |= B_ERROR;
		bp->b_error = EIO;
		biodone(bp);
		return 0;
	}
	dksc = &xs->sc_dksc;

	/* XXXrcd:
	 * Translate partition relative blocks to absolute blocks,
	 * this probably belongs (somehow) in dksubr.c, since it
	 * is independant of the underlying code...  This will require
	 * that the interface be expanded slightly, though.
	 */
	bn = bp->b_blkno;
	if (DISKPART(bp->b_dev) != RAW_PART) {
		pp = &xs->sc_dksc.sc_dkdev.dk_label->
			d_partitions[DISKPART(bp->b_dev)];
		bn += pp->p_offset;
	}

	DPRINTF(XBDB_IO, ("xbdstart: addr %p, sector %llu, "
	    "count %ld [%s]\n", bp->b_data, (unsigned long long)bn,
	    bp->b_bcount, bp->b_flags & B_READ ? "read" : "write"));

	GET_XBDREQ(pxr);
	if (__predict_false(pxr == NULL))
		goto out;

	disk_busy(&dksc->sc_dkdev); /* XXX: put in dksubr.c */
	/*
	 * We have a request slot, return 0 to make dk_start remove
	 * the bp from the work queue.
	 */
	ret = 0;

	pxr->xr_bp = bp;
	pxr->xr_parent = pxr;
	pxr->xr_bn = bn;
	pxr->xr_bqueue = bp->b_bcount;
	pxr->xr_bdone = bp->b_bcount;
	pxr->xr_data = (vaddr_t)bp->b_data;
	pxr->xr_sc = xs;

	if (pxr->xr_data & (XEN_BSIZE - 1))
		map_align(pxr);

	fill_ring(pxr);

	while (__predict_false(pxr->xr_bqueue > 0)) {
		GET_XBDREQ(xr);
		if (__predict_false(xr == NULL))
			break;
		xr->xr_parent = pxr;
		fill_ring(xr);
	}

	if (__predict_false(pxr->xr_bqueue > 0)) {
		SIMPLEQ_INSERT_TAIL(&xbdr_suspended, pxr,
		    xr_suspended);
		DPRINTF(XBDB_IO, ("xbdstart: suspended xbdreq %p "
		    "for bp %p\n", pxr, bp));
	} else if (CANGET_XBDREQ() && BUFQ_PEEK(&bufq) != NULL) {
		/* 
		 * We have enough resources to start another bp and
		 * there are additional bps on the queue, dk_start
		 * will call us again and we'll run the queue then.
		 */
		runqueue = 0;
	}

 out:
	if (runqueue && last_req_prod != req_prod)
		signal_requests_to_xen();

	return ret;
}
Ejemplo n.º 14
0
/*
 * Called from the controller.
 */
int
fdintr(struct fdc_softc *fdc)
{
#define	st0	fdc->sc_status[0]
#define	cyl	fdc->sc_status[1]
	struct fd_softc *fd;
	struct buf *bp;
	bus_space_tag_t iot = fdc->sc_iot;
	bus_space_handle_t ioh = fdc->sc_ioh;
	bus_space_handle_t ioh_ctl = fdc->sc_ioh_ctl;
	int read, head, sec, i, nblks, cylin;
	struct fd_type *type;
	struct fd_formb *finfo = NULL;
	int fd_bsize;

loop:
	/* Is there a transfer to this drive?  If not, deactivate drive. */
	fd = TAILQ_FIRST(&fdc->sc_link.fdlink.sc_drives);
	if (fd == NULL) {
		fdc->sc_state = DEVIDLE;
		return 1;
	}
	fd_bsize = FD_BSIZE(fd);

	bp = fd->sc_bp;
	if (bp == NULL) {
		fd->sc_ops = 0;
		TAILQ_REMOVE(&fdc->sc_link.fdlink.sc_drives, fd, sc_drivechain);
		goto loop;
	}

	if (bp->b_flags & B_FORMAT)
	    finfo = (struct fd_formb *)bp->b_data;

	cylin = ((bp->b_blkno * DEV_BSIZE) + (bp->b_bcount - bp->b_resid)) /
	    (fd_bsize * fd->sc_type->seccyl);

	switch (fdc->sc_state) {
	case DEVIDLE:
		fdc->sc_errors = 0;
		fd->sc_skip = 0;
		fd->sc_bcount = bp->b_bcount;
		fd->sc_blkno = bp->b_blkno / (fd_bsize / DEV_BSIZE);
		timeout_del(&fd->fd_motor_off_to);
		if ((fd->sc_flags & FD_MOTOR_WAIT) != 0) {
			fdc->sc_state = MOTORWAIT;
			return 1;
		}
		if ((fd->sc_flags & FD_MOTOR) == 0) {
			/* Turn on the motor, being careful about pairing. */
			struct fd_softc *ofd =
				fdc->sc_link.fdlink.sc_fd[fd->sc_drive ^ 1];
			if (ofd && ofd->sc_flags & FD_MOTOR) {
				timeout_del(&ofd->fd_motor_off_to);
				ofd->sc_flags &= ~(FD_MOTOR | FD_MOTOR_WAIT);
			}
			fd->sc_flags |= FD_MOTOR | FD_MOTOR_WAIT;
			fd_set_motor(fdc, 0);
			fdc->sc_state = MOTORWAIT;
			/* Allow .25s for motor to stabilize. */
			timeout_add_msec(&fd->fd_motor_on_to, 250);
			return 1;
		}
		/* Make sure the right drive is selected. */
		fd_set_motor(fdc, 0);

		/* FALLTHROUGH */
	case DOSEEK:
	doseek:
		if (fd->sc_cylin == cylin)
			goto doio;

		out_fdc(iot, ioh, NE7CMD_SPECIFY);/* specify command */
		out_fdc(iot, ioh, fd->sc_type->steprate);
		out_fdc(iot, ioh, 6);		/* XXX head load time == 6ms */

		out_fdc(iot, ioh, NE7CMD_SEEK);	/* seek function */
		out_fdc(iot, ioh, fd->sc_drive);	/* drive number */
		out_fdc(iot, ioh, cylin * fd->sc_type->step);

		fd->sc_cylin = -1;
		fdc->sc_state = SEEKWAIT;

		fd->sc_dk.dk_seek++;
		disk_busy(&fd->sc_dk);

		timeout_add_sec(&fd->fdtimeout_to, 4);
		return 1;

	case DOIO:
	doio:
		type = fd->sc_type;
		if (finfo)
		    fd->sc_skip = (char *)&(finfo->fd_formb_cylno(0)) -
			(char *)finfo;
		sec = fd->sc_blkno % type->seccyl;
		nblks = type->seccyl - sec;
		nblks = min(nblks, fd->sc_bcount / fd_bsize);
		nblks = min(nblks, FDC_MAXIOSIZE / fd_bsize);
		fd->sc_nblks = nblks;
		fd->sc_nbytes = finfo ? bp->b_bcount : nblks * fd_bsize;
		head = sec / type->sectrac;
		sec -= head * type->sectrac;
#ifdef DIAGNOSTIC
		{int block;
		 block = (fd->sc_cylin * type->heads + head) * type->sectrac + sec;
		 if (block != fd->sc_blkno) {
			 panic("fdintr: block %d != blkno %llu", block, fd->sc_blkno);
		 }}
#endif
		read = bp->b_flags & B_READ ? DMAMODE_READ : DMAMODE_WRITE;
		isadma_start(bp->b_data + fd->sc_skip, fd->sc_nbytes,
		    fdc->sc_drq, read);
		bus_space_write_1(iot, ioh_ctl, fdctl, type->rate);
#ifdef FD_DEBUG
		printf("fdintr: %s drive %d track %d head %d sec %d nblks %d\n",
		    read ? "read" : "write", fd->sc_drive, fd->sc_cylin, head,
		    sec, nblks);
#endif
		if (finfo) {
                        /* formatting */
			if (out_fdc(iot, ioh, NE7CMD_FORMAT) < 0) {
			    fdc->sc_errors = 4;
			    fdretry(fd);
			    goto loop;
			}
                        out_fdc(iot, ioh, (head << 2) | fd->sc_drive);
                        out_fdc(iot, ioh, finfo->fd_formb_secshift);
                        out_fdc(iot, ioh, finfo->fd_formb_nsecs);
                        out_fdc(iot, ioh, finfo->fd_formb_gaplen);
                        out_fdc(iot, ioh, finfo->fd_formb_fillbyte);
		} else {
			if (read)
				out_fdc(iot, ioh, NE7CMD_READ);	/* READ */
			else
				out_fdc(iot, ioh, NE7CMD_WRITE);/* WRITE */
			out_fdc(iot, ioh, (head << 2) | fd->sc_drive);
			out_fdc(iot, ioh, fd->sc_cylin);	/* track */
			out_fdc(iot, ioh, head);
			out_fdc(iot, ioh, sec + 1);		/* sec +1 */
			out_fdc(iot, ioh, type->secsize);	/* sec size */
			out_fdc(iot, ioh, type->sectrac);	/* secs/track */
			out_fdc(iot, ioh, type->gap1);		/* gap1 size */
			out_fdc(iot, ioh, type->datalen);	/* data len */
		}
		fdc->sc_state = IOCOMPLETE;

		disk_busy(&fd->sc_dk);

		/* allow 2 seconds for operation */
		timeout_add_sec(&fd->fdtimeout_to, 2);
		return 1;				/* will return later */

	case SEEKWAIT:
		timeout_del(&fd->fdtimeout_to);
		fdc->sc_state = SEEKCOMPLETE;
		/* allow 1/50 second for heads to settle */
		timeout_add_msec(&fdc->fdcpseudointr_to, 20);
		return 1;

	case SEEKCOMPLETE:
		disk_unbusy(&fd->sc_dk, 0, 0);	/* no data on seek */

		/* Make sure seek really happened. */
		out_fdc(iot, ioh, NE7CMD_SENSEI);
		if (fdcresult(fdc) != 2 || (st0 & 0xf8) != 0x20 ||
		    cyl != cylin * fd->sc_type->step) {
#ifdef FD_DEBUG
			fdcstatus(&fd->sc_dev, 2, "seek failed");
#endif
			fdretry(fd);
			goto loop;
		}
		fd->sc_cylin = cylin;
		goto doio;

	case IOTIMEDOUT:
		isadma_abort(fdc->sc_drq);
	case SEEKTIMEDOUT:
	case RECALTIMEDOUT:
	case RESETTIMEDOUT:
		fdretry(fd);
		goto loop;

	case IOCOMPLETE: /* IO DONE, post-analyze */
		timeout_del(&fd->fdtimeout_to);

		disk_unbusy(&fd->sc_dk, (bp->b_bcount - bp->b_resid),
		    (bp->b_flags & B_READ));

		if (fdcresult(fdc) != 7 || (st0 & 0xf8) != 0) {
			isadma_abort(fdc->sc_drq);
#ifdef FD_DEBUG
			fdcstatus(&fd->sc_dev, 7, bp->b_flags & B_READ ?
			    "read failed" : "write failed");
			printf("blkno %lld nblks %d\n",
			    (long long)fd->sc_blkno, fd->sc_nblks);
#endif
			fdretry(fd);
			goto loop;
		}
		read = bp->b_flags & B_READ ? DMAMODE_READ : DMAMODE_WRITE;
		isadma_done(fdc->sc_drq);
		if (fdc->sc_errors) {
			diskerr(bp, "fd", "soft error", LOG_PRINTF,
			    fd->sc_skip / fd_bsize, (struct disklabel *)NULL);
			printf("\n");
			fdc->sc_errors = 0;
		}

		fd->sc_blkno += fd->sc_nblks;
		fd->sc_skip += fd->sc_nbytes;
		fd->sc_bcount -= fd->sc_nbytes;
		bp->b_resid -= fd->sc_nbytes;
		if (!finfo && fd->sc_bcount > 0) {
			cylin = fd->sc_blkno / fd->sc_type->seccyl;
			goto doseek;
		}
		fdfinish(fd, bp);
		goto loop;

	case DORESET:
		/* try a reset, keep motor on */
		fd_set_motor(fdc, 1);
		delay(100);
		fd_set_motor(fdc, 0);
		fdc->sc_state = RESETCOMPLETE;
		timeout_add_msec(&fd->fdtimeout_to, 500);
		return 1;			/* will return later */

	case RESETCOMPLETE:
		timeout_del(&fd->fdtimeout_to);
		/* clear the controller output buffer */
		for (i = 0; i < 4; i++) {
			out_fdc(iot, ioh, NE7CMD_SENSEI);
			(void) fdcresult(fdc);
		}

		/* FALLTHROUGH */
	case DORECAL:
		out_fdc(iot, ioh, NE7CMD_RECAL);	/* recal function */
		out_fdc(iot, ioh, fd->sc_drive);
		fdc->sc_state = RECALWAIT;
		timeout_add_sec(&fd->fdtimeout_to, 5);
		return 1;			/* will return later */

	case RECALWAIT:
		timeout_del(&fd->fdtimeout_to);
		fdc->sc_state = RECALCOMPLETE;
		/* allow 1/30 second for heads to settle */
		timeout_add(&fdc->fdcpseudointr_to, hz / 30);
		return 1;			/* will return later */

	case RECALCOMPLETE:
		out_fdc(iot, ioh, NE7CMD_SENSEI);
		if (fdcresult(fdc) != 2 || (st0 & 0xf8) != 0x20 || cyl != 0) {
#ifdef FD_DEBUG
			fdcstatus(&fd->sc_dev, 2, "recalibrate failed");
#endif
			fdretry(fd);
			goto loop;
		}
		fd->sc_cylin = 0;
		goto doseek;

	case MOTORWAIT:
		if (fd->sc_flags & FD_MOTOR_WAIT)
			return 1;		/* time's not up yet */
		goto doseek;

	default:
		fdcstatus(&fd->sc_dev, 0, "stray interrupt");
		return 1;
	}
#ifdef DIAGNOSTIC
	panic("fdintr: impossible");
#endif
#undef	st0
#undef	cyl
}
Ejemplo n.º 15
0
static void
vndthread(void *arg)
{
	struct vnd_softc *vnd = arg;
	int s;

	/* Determine whether we can *use* VOP_BMAP and VOP_STRATEGY to
	 * directly access the backing vnode.  If we can, use these two
	 * operations to avoid messing with the local buffer cache.
	 * Otherwise fall back to regular VOP_READ/VOP_WRITE operations
	 * which are guaranteed to work with any file system. */
	if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0 &&
	    ! vnode_has_strategy(vnd))
		vnd->sc_flags |= VNF_USE_VN_RDWR;

#ifdef DEBUG
	if (vnddebug & VDB_INIT)
		printf("vndthread: vp %p, %s\n", vnd->sc_vp,
		    (vnd->sc_flags & VNF_USE_VN_RDWR) == 0 ?
		    "using bmap/strategy operations" :
		    "using read/write operations");
#endif

	s = splbio();
	vnd->sc_flags |= VNF_KTHREAD;
	wakeup(&vnd->sc_kthread);

	/*
	 * Dequeue requests and serve them depending on the available
	 * vnode operations.
	 */
	while ((vnd->sc_flags & VNF_VUNCONF) == 0) {
		struct vndxfer *vnx;
		int flags;
		struct buf *obp;
		struct buf *bp;

		obp = bufq_get(vnd->sc_tab);
		if (obp == NULL) {
			tsleep(&vnd->sc_tab, PRIBIO, "vndbp", 0);
			continue;
		};
		if ((vnd->sc_flags & VNF_USE_VN_RDWR)) {
			KASSERT(vnd->sc_pending > 0 &&
			    vnd->sc_pending <= VND_MAXPENDING(vnd));
			if (vnd->sc_pending-- == VND_MAXPENDING(vnd))
				wakeup(&vnd->sc_pending);
		}
		splx(s);
		flags = obp->b_flags;
#ifdef DEBUG
		if (vnddebug & VDB_FOLLOW)
			printf("vndthread(%p)\n", obp);
#endif

		if (vnd->sc_vp->v_mount == NULL) {
			obp->b_error = ENXIO;
			goto done;
		}
#ifdef VND_COMPRESSION
		/* handle a compressed read */
		if ((flags & B_READ) != 0 && (vnd->sc_flags & VNF_COMP)) {
			off_t bn;
			
			/* Convert to a byte offset within the file. */
			bn = obp->b_rawblkno *
			    vnd->sc_dkdev.dk_label->d_secsize;

			compstrategy(obp, bn);
			goto done;
		}
#endif /* VND_COMPRESSION */
		
		/*
		 * Allocate a header for this transfer and link it to the
		 * buffer
		 */
		s = splbio();
		vnx = VND_GETXFER(vnd);
		splx(s);
		vnx->vx_vnd = vnd;

		s = splbio();
		while (vnd->sc_active >= vnd->sc_maxactive) {
			tsleep(&vnd->sc_tab, PRIBIO, "vndac", 0);
		}
		vnd->sc_active++;
		splx(s);

		/* Instrumentation. */
		disk_busy(&vnd->sc_dkdev);

		bp = &vnx->vx_buf;
		buf_init(bp);
		bp->b_flags = (obp->b_flags & B_READ);
		bp->b_oflags = obp->b_oflags;
		bp->b_cflags = obp->b_cflags;
		bp->b_iodone = vndiodone;
		bp->b_private = obp;
		bp->b_vp = vnd->sc_vp;
		bp->b_objlock = bp->b_vp->v_interlock;
		bp->b_data = obp->b_data;
		bp->b_bcount = obp->b_bcount;
		BIO_COPYPRIO(bp, obp);

		/* Handle the request using the appropriate operations. */
		if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0)
			handle_with_strategy(vnd, obp, bp);
		else
			handle_with_rdwr(vnd, obp, bp);

		s = splbio();
		continue;

done:
		biodone(obp);
		s = splbio();
	}

	vnd->sc_flags &= (~VNF_KTHREAD | VNF_VUNCONF);
	wakeup(&vnd->sc_kthread);
	splx(s);
	kthread_exit(0);
}
Ejemplo n.º 16
0
Archivo: ld.c Proyecto: MarginC/kame
static int
ldstart(struct ld_softc *sc, struct buf *bp)
{
	struct disklabel *lp;
	int part, s, rv;

	if ((sc->sc_flags & LDF_DETACH) != 0) {
		bp->b_error = EIO;
		bp->b_flags |= B_ERROR;
		bp->b_resid = bp->b_bcount;
		biodone(bp);
		return (-1);
	}

	part = DISKPART(bp->b_dev);
	lp = sc->sc_dk.dk_label;

	/*
	 * The transfer must be a whole number of blocks and the offset must
	 * not be negative.
	 */
	if ((bp->b_bcount % lp->d_secsize) != 0 || bp->b_blkno < 0) {
		bp->b_flags |= B_ERROR;
		biodone(bp);
		return (-1);
	}

	/*
	 * If it's a null transfer, return.
	 */
	if (bp->b_bcount == 0) {
		bp->b_resid = bp->b_bcount;
		biodone(bp);
		return (-1);
	}

	/*
	 * Do bounds checking and adjust the transfer.  If error, process.
	 * If past the end of partition, just return.
	 */
	if (part != RAW_PART &&
	    bounds_check_with_label(bp, lp,
	    (sc->sc_flags & (LDF_WLABEL | LDF_LABELLING)) != 0) <= 0) {
		bp->b_resid = bp->b_bcount;
		biodone(bp);
		return (-1);
	}

	/*
	 * Convert the logical block number to a physical one and put it in
	 * terms of the device's logical block size.
	 */
	if (lp->d_secsize >= DEV_BSIZE)
		bp->b_rawblkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
	else
		bp->b_rawblkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);

	if (part != RAW_PART)
		bp->b_rawblkno += lp->d_partitions[part].p_offset;

	s = splbio();
	disk_busy(&sc->sc_dk);
	sc->sc_queuecnt++;
	splx(s);

	if ((rv = (*sc->sc_start)(sc, bp)) != 0) {
		bp->b_error = rv;
		bp->b_flags |= B_ERROR;
		bp->b_resid = bp->b_bcount;
		s = splbio();
		lddone(sc, bp);
		splx(s);
	}

	return (0);
}