Ejemplo n.º 1
0
/* ARGSUSED */
int
raclose(dev_t dev, int flags, int fmt, struct lwp *l)
{
	struct ra_softc *ra = mscp_device_lookup(dev);
	int mask = (1 << DISKPART(dev));

	mutex_enter(&ra->ra_disk.dk_openlock);

	switch (fmt) {
	case S_IFCHR:
		ra->ra_disk.dk_copenmask &= ~mask;
		break;
	case S_IFBLK:
		ra->ra_disk.dk_bopenmask &= ~mask;
		break;
	}
	ra->ra_disk.dk_openmask =
	    ra->ra_disk.dk_copenmask | ra->ra_disk.dk_bopenmask;

	/*
	 * Should wait for I/O to complete on this partition even if
	 * others are open, but wait for work on blkflush().
	 */
#if notyet
	if (ra->ra_openpart == 0) {
		s = spluba();
		while (bufq_peek(udautab[unit]) != NULL)
			(void) tsleep(&udautab[unit], PZERO - 1,
			    "raclose", 0);
		splx(s);
		ra->ra_state = DK_CLOSED;
		ra->ra_wlabel = 0;
	}
#endif
	mutex_exit(&ra->ra_disk.dk_openlock);
	return (0);
}
Ejemplo n.º 2
0
/* ARGSUSED */
int
mfs_start(struct mount *mp, int flags)
{
	struct vnode *vp;
	struct mfsnode *mfsp;
	struct proc *p;
	struct buf *bp;
	void *base;
	int sleepreturn = 0, refcnt, error;
	ksiginfoq_t kq;

	/*
	 * Ensure that file system is still mounted when getting mfsnode.
	 * Add a reference to the mfsnode to prevent it disappearing in
	 * this routine.
	 */
	if ((error = vfs_busy(mp, NULL)) != 0)
		return error;
	vp = VFSTOUFS(mp)->um_devvp;
	mfsp = VTOMFS(vp);
	mutex_enter(&mfs_lock);
	mfsp->mfs_refcnt++;
	mutex_exit(&mfs_lock);
	vfs_unbusy(mp, false, NULL);

	base = mfsp->mfs_baseoff;
	mutex_enter(&mfs_lock);
	while (mfsp->mfs_shutdown != 1) {
		while ((bp = bufq_get(mfsp->mfs_buflist)) != NULL) {
			mutex_exit(&mfs_lock);
			mfs_doio(bp, base);
			mutex_enter(&mfs_lock);
		}
		/*
		 * If a non-ignored signal is received, try to unmount.
		 * If that fails, or the filesystem is already in the
		 * process of being unmounted, clear the signal (it has been
		 * "processed"), otherwise we will loop here, as tsleep
		 * will always return EINTR/ERESTART.
		 */
		if (sleepreturn != 0) {
			mutex_exit(&mfs_lock);
			if (dounmount(mp, 0, curlwp) != 0) {
				p = curproc;
				ksiginfo_queue_init(&kq);
				mutex_enter(p->p_lock);
				sigclearall(p, NULL, &kq);
				mutex_exit(p->p_lock);
				ksiginfo_queue_drain(&kq);
			}
			sleepreturn = 0;
			mutex_enter(&mfs_lock);
			continue;
		}

		sleepreturn = cv_wait_sig(&mfsp->mfs_cv, &mfs_lock);
	}
	KASSERT(bufq_peek(mfsp->mfs_buflist) == NULL);
	refcnt = --mfsp->mfs_refcnt;
	mutex_exit(&mfs_lock);
	if (refcnt == 0) {
		bufq_free(mfsp->mfs_buflist);
		cv_destroy(&mfsp->mfs_cv);
		kmem_free(mfsp, sizeof(*mfsp));
	}
	return (sleepreturn);
}
Ejemplo n.º 3
0
/*
 * sdstart looks to see if there is a buf waiting for the device
 * and that the device is not already busy. If both are true,
 * It dequeues the buf and creates a scsi command to perform the
 * transfer in the buf. The transfer request will call scsi_done
 * on completion, which will in turn call this routine again
 * so that the next queued transfer is performed.
 * The bufs are queued by the strategy routine (sdstrategy)
 *
 * This routine is also called after other non-queued requests
 * have been made of the scsi driver, to ensure that the queue
 * continues to be drained.
 */
void
sdstart(struct scsi_xfer *xs)
{
	struct scsi_link *link = xs->sc_link;
	struct sd_softc *sc = link->device_softc;
	struct buf *bp;
	u_int64_t secno;
	int nsecs;
	int read;
	struct partition *p;

	if (sc->flags & SDF_DYING) {
		scsi_xs_put(xs);
		return;
	}
	if ((link->flags & SDEV_MEDIA_LOADED) == 0) {
		bufq_drain(&sc->sc_bufq);
		scsi_xs_put(xs);
		return;
	}

	bp = bufq_dequeue(&sc->sc_bufq);
	if (bp == NULL) {
		scsi_xs_put(xs);
		return;
	}

	secno = DL_BLKTOSEC(sc->sc_dk.dk_label, bp->b_blkno);

	p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)];
	secno += DL_GETPOFFSET(p);
	nsecs = howmany(bp->b_bcount, sc->sc_dk.dk_label->d_secsize);
	read = bp->b_flags & B_READ;

	/*
	 *  Fill out the scsi command.  If the transfer will
	 *  fit in a "small" cdb, use it.
	 */
	if (!(link->flags & SDEV_ATAPI) &&
	    !(link->quirks & SDEV_ONLYBIG) &&
	    ((secno & 0x1fffff) == secno) &&
	    ((nsecs & 0xff) == nsecs))
		sd_cmd_rw6(xs, read, secno, nsecs);
	else if (((secno & 0xffffffff) == secno) &&
	    ((nsecs & 0xffff) == nsecs))
		sd_cmd_rw10(xs, read, secno, nsecs);
	else if (((secno & 0xffffffff) == secno) &&
	    ((nsecs & 0xffffffff) == nsecs))
		sd_cmd_rw12(xs, read, secno, nsecs);
	else
		sd_cmd_rw16(xs, read, secno, nsecs);

	xs->flags |= (read ? SCSI_DATA_IN : SCSI_DATA_OUT);
	xs->timeout = 60000;
	xs->data = bp->b_data;
	xs->datalen = bp->b_bcount;

	xs->done = sd_buf_done;
	xs->cookie = bp;
	xs->bp = bp;

	/* Instrumentation. */
	disk_busy(&sc->sc_dk);

	/* Mark disk as dirty. */
	if (!read)
		sc->flags |= SDF_DIRTY;

	scsi_xs_exec(xs);

	/* move onto the next io */
	if (ISSET(sc->flags, SDF_WAITING))
		CLR(sc->flags, SDF_WAITING);
	else if (bufq_peek(&sc->sc_bufq))
		scsi_xsh_add(&sc->sc_xsh);
}
Ejemplo n.º 4
0
Archivo: mba.c Proyecto: ryo/netbsd-src
/*
 * We got an interrupt. Check type of interrupt and call the specific
 * device interrupt handling routine.
 */
void
mbaintr(void *mba)
{
	struct mba_softc * const sc = mba;
	struct mba_device *md;
	struct buf *bp;
	int itype, attn, anr;

	itype = MBA_RCSR(MBA_SR);
	MBA_WCSR(MBA_SR, itype);

	attn = MBA_RCSR(MUREG(0, MU_AS)) & 0xff;
	MBA_WCSR(MUREG(0, MU_AS), attn);

	if (sc->sc_state == SC_AUTOCONF)
		return;	/* During autoconfig */

	md = STAILQ_FIRST(&sc->sc_xfers);
	bp = bufq_peek(md->md_q);
	/*
	 * A data-transfer interrupt. Current operation is finished,
	 * call that device's finish routine to see what to do next.
	 */
	if (sc->sc_state == SC_ACTIVE) {
		sc->sc_state = SC_IDLE;
		switch ((*md->md_finish)(md, itype, &attn)) {

		case XFER_FINISH:
			/*
			 * Transfer is finished. Take buffer of drive
			 * queue, and take drive of adapter queue.
			 * If more to transfer, start the adapter again
			 * by calling mbastart().
			 */
			(void)bufq_get(md->md_q);
			STAILQ_REMOVE_HEAD(&sc->sc_xfers, md_link);
			if (bufq_peek(md->md_q) != NULL) {
				STAILQ_INSERT_TAIL(&sc->sc_xfers, md, md_link);
			}
	
			bp->b_resid = 0;
			biodone(bp);
			if (!STAILQ_EMPTY(&sc->sc_xfers))
				mbastart(sc);
			break;

		case XFER_RESTART:
			/*
			 * Something went wrong with the transfer. Try again.
			 */
			mbastart(sc);
			break;
		}
	}

	while (attn) {
		anr = ffs(attn) - 1;
		attn &= ~(1 << anr);
		if (sc->sc_md[anr]->md_attn == 0)
			panic("Should check for new MBA device %d", anr);
		(*sc->sc_md[anr]->md_attn)(sc->sc_md[anr]);
	}
}
static void
udf_doshedule(struct udf_mount *ump)
{
	struct buf *buf;
	struct timespec now, *last;
	struct strat_private *priv = PRIV(ump);
	void (*b_callback)(struct buf *);
	int new_queue;
	int error;

	buf = bufq_get(priv->queues[priv->cur_queue]);
	if (buf) {
		/* transfer from the current queue to the device queue */
		mutex_exit(&priv->discstrat_mutex);

		/* transform buffer to synchronous; XXX needed? */
		b_callback = buf->b_iodone;
		buf->b_iodone = NULL;
		CLR(buf->b_flags, B_ASYNC);

		/* issue and wait on completion */
		udf_issue_buf(ump, priv->cur_queue, buf);
		biowait(buf);

		mutex_enter(&priv->discstrat_mutex);

		/* if there is an error, repair this error, otherwise propagate */
		if (buf->b_error && ((buf->b_flags & B_READ) == 0)) {
			/* check what we need to do */
			panic("UDF write error, can't handle yet!\n");
		}

		/* propagate result to higher layers */
		if (b_callback) {
			buf->b_iodone = b_callback;
			(*buf->b_iodone)(buf);
		}

		return;
	}

	/* Check if we're idling in this state */
	vfs_timestamp(&now);
	last = &priv->last_queued[priv->cur_queue];
	if (ump->discinfo.mmc_class == MMC_CLASS_CD) {
		/* dont switch too fast for CD media; its expensive in time */
		if (now.tv_sec - last->tv_sec < 3)
			return;
	}

	/* check if we can/should switch */
	new_queue = priv->cur_queue;

	if (bufq_peek(priv->queues[UDF_SHED_READING]))
		new_queue = UDF_SHED_READING;
	if (bufq_peek(priv->queues[UDF_SHED_WRITING]))		/* only for unmount */
		new_queue = UDF_SHED_WRITING;
	if (bufq_peek(priv->queues[UDF_SHED_SEQWRITING]))
		new_queue = UDF_SHED_SEQWRITING;
	if (priv->cur_queue == UDF_SHED_READING) {
		if (new_queue == UDF_SHED_SEQWRITING) {
			/* TODO use flag to signal if this is needed */
			mutex_exit(&priv->discstrat_mutex);

			/* update trackinfo for data and metadata */
			error = udf_update_trackinfo(ump,
					&ump->data_track);
			assert(error == 0);
			error = udf_update_trackinfo(ump,
					&ump->metadata_track);
			assert(error == 0);
			mutex_enter(&priv->discstrat_mutex);
			__USE(error);
		}
	}

	if (new_queue != priv->cur_queue) {
		DPRINTF(SHEDULE, ("switching from %d to %d\n",
			priv->cur_queue, new_queue));
	}

	priv->cur_queue = new_queue;
}