Example #1
0
static void
ptoninvalidate(struct cam_periph *periph)
{
	struct pt_softc *softc;
	struct bio *q_bio;
	struct buf *q_bp;

	softc = (struct pt_softc *)periph->softc;

	/*
	 * De-register any async callbacks.
	 */
	xpt_register_async(0, ptasync, periph, periph->path);

	softc->flags |= PT_FLAG_DEVICE_INVALID;

	/*
	 * Return all queued I/O with ENXIO.
	 * XXX Handle any transactions queued to the card
	 *     with XPT_ABORT_CCB.
	 */
	while ((q_bio = bioq_takefirst(&softc->bio_queue)) != NULL) {
		q_bp = q_bio->bio_buf;
		q_bp->b_resid = q_bp->b_bcount;
		q_bp->b_error = ENXIO;
		q_bp->b_flags |= B_ERROR;
		biodone(q_bio);
	}

	xpt_print(periph->path, "lost device\n");
}
Example #2
0
static void
destroy_geom_disk(struct nvd_disk *ndisk)
{
	struct bio	*bp;
	struct disk	*disk;
	uint32_t	unit;
	int		cnt = 0;

	disk = ndisk->disk;
	unit = disk->d_unit;
	taskqueue_free(ndisk->tq);

	disk_destroy(ndisk->disk);

	mtx_lock(&ndisk->bioqlock);
	for (;;) {
		bp = bioq_takefirst(&ndisk->bioq);
		if (bp == NULL)
			break;
		bp->bio_error = EIO;
		bp->bio_flags |= BIO_ERROR;
		bp->bio_resid = bp->bio_bcount;
		cnt++;
		biodone(bp);
	}

	printf(NVD_STR"%u: lost device - %d outstanding\n", unit, cnt);
	printf(NVD_STR"%u: removing device entry\n", unit);

	mtx_unlock(&ndisk->bioqlock);

	mtx_destroy(&ndisk->bioqlock);
}
Example #3
0
void
g_uzip_wrkthr(void *arg)
{
	struct g_uzip_softc *sc;
	struct bio *bp;

	sc = (struct g_uzip_softc *)arg;
	thread_lock(curthread);
	sched_prio(curthread, PRIBIO);
	thread_unlock(curthread);

	for (;;) {
		mtx_lock(&sc->queue_mtx);
		if (sc->wrkthr_flags & GUZ_SHUTDOWN) {
			sc->wrkthr_flags |= GUZ_EXITING;
			mtx_unlock(&sc->queue_mtx);
			kproc_exit(0);
		}
		bp = bioq_takefirst(&sc->bio_queue);
		if (!bp) {
			msleep(sc, &sc->queue_mtx, PRIBIO | PDROP,
			    "wrkwait", 0);
			continue;
		}
		mtx_unlock(&sc->queue_mtx);
		sc->uzip_do(sc, bp);
	}
}
Example #4
0
static void
mcd_start(struct mcd_softc *sc)
{
    struct bio *bp;

    if (sc->data.flags & MCDMBXBSY) {
        return;
    }

    bp = bioq_takefirst(&sc->data.head);
    if (bp != 0) {
        /* block found to process, dequeue */
        /*MCD_TRACE("mcd_start: found block bp=0x%x\n",bp,0,0,0);*/
        sc->data.flags |= MCDMBXBSY;
    } else {
        /* nothing to do */
        return;
    }

    sc->data.mbx.retry = MCD_RETRYS;
    sc->data.mbx.bp = bp;

    mcd_doread(sc, MCD_S_BEGIN,&(sc->data.mbx));
    return;
}
Example #5
0
static void
nvd_bioq_process(void *arg, int pending)
{
	struct nvd_disk *ndisk = arg;
	struct bio *bp;

	for (;;) {
		mtx_lock(&ndisk->bioqlock);
		bp = bioq_takefirst(&ndisk->bioq);
		mtx_unlock(&ndisk->bioqlock);
		if (bp == NULL)
			break;

		if (nvd_bio_submit(ndisk, bp) != 0) {
			continue;
		}

#ifdef BIO_ORDERED
		/*
		 * BIO_ORDERED flag dictates that the bio with BIO_ORDERED
		 *  flag set must be completed before proceeding with
		 *  additional bios.
		 */
		if (bp->bio_flags & BIO_ORDERED) {
			while (ndisk->cur_depth > 0) {
				pause("nvd flush", 1);
			}
		}
#endif
	}
}
Example #6
0
static void
nand_io_proc(void *arg, int pending)
{
	struct nand_chip *chip = arg;
	struct bio *bp;
	int err = 0;

	for (;;) {
		mtx_lock(&chip->qlock);
		bp = bioq_takefirst(&chip->bioq);
		mtx_unlock(&chip->qlock);
		if (bp == NULL)
			break;

		if (bp->bio_driver1 == BIO_NAND_STD) {
			if (bp->bio_cmd == BIO_READ) {
				err = nand_read(chip,
				    bp->bio_offset & 0xffffffff,
				    bp->bio_data, bp->bio_bcount);
			} else if (bp->bio_cmd == BIO_WRITE) {
				err = nand_write(chip,
				    bp->bio_offset & 0xffffffff,
				    bp->bio_data, bp->bio_bcount);
			}
		} else if (bp->bio_driver1 == BIO_NAND_RAW) {
			if (bp->bio_cmd == BIO_READ) {
				err = nand_read_raw(chip,
				    bp->bio_offset & 0xffffffff,
				    bp->bio_data, bp->bio_bcount);
			} else if (bp->bio_cmd == BIO_WRITE) {
				err = nand_write_raw(chip,
				    bp->bio_offset & 0xffffffff,
				    bp->bio_data, bp->bio_bcount);
			}
		} else
			panic("Unknown access type in bio->bio_driver1\n");

		if (bp->bio_cmd == BIO_DELETE) {
			nand_debug(NDBG_GEOM, "Delete on chip%d offset %lld "
			    "length %ld\n", chip->num, bp->bio_offset,
			    bp->bio_bcount);
			err = nand_erase_blocks(chip,
			    bp->bio_offset & 0xffffffff,
			    bp->bio_bcount);
		}

		if (err == 0 || err == ECC_CORRECTABLE)
			bp->bio_resid = 0;
		else {
			nand_debug(NDBG_GEOM,"nand_[read|write|erase_blocks] "
			    "error: %d\n", err);

			bp->bio_error = EIO;
			bp->bio_flags |= BIO_ERROR;
			bp->bio_resid = bp->bio_bcount;
		}
		biodone(bp);
	}
}
Example #7
0
void
bioq_flush(struct bio_queue_head *head, struct devstat *stp, int error)
{
	struct bio *bp;

	while ((bp = bioq_takefirst(head)) != NULL)
		biofinish(bp, stp, error);
}
Example #8
0
static void
nvd_bioq_process(void *arg, int pending)
{
	struct nvd_disk *ndisk = arg;
	struct bio *bp;
	int err;

	for (;;) {
		mtx_lock(&ndisk->bioqlock);
		bp = bioq_takefirst(&ndisk->bioq);
		mtx_unlock(&ndisk->bioqlock);
		if (bp == NULL)
			break;

#ifdef BIO_ORDERED
		/*
		 * BIO_ORDERED flag dictates that all outstanding bios
		 *  must be completed before processing the bio with
		 *  BIO_ORDERED flag set.
		 */
		if (bp->bio_flags & BIO_ORDERED) {
			while (ndisk->cur_depth > 0) {
				pause("nvd flush", 1);
			}
		}
#endif

		bp->bio_driver1 = NULL;
		atomic_add_int(&ndisk->cur_depth, 1);

		err = nvme_ns_bio_process(ndisk->ns, bp, nvd_done);

		if (err) {
			atomic_add_int(&ndisk->cur_depth, -1);
			bp->bio_error = err;
			bp->bio_flags |= BIO_ERROR;
			bp->bio_resid = bp->bio_bcount;
			biodone(bp);
		}

#ifdef BIO_ORDERED
		/*
		 * BIO_ORDERED flag dictates that the bio with BIO_ORDERED
		 *  flag set must be completed before proceeding with
		 *  additional bios.
		 */
		if (bp->bio_flags & BIO_ORDERED) {
			while (ndisk->cur_depth > 0) {
				pause("nvd flush", 1);
			}
		}
#endif
	}
}
Example #9
0
/*
 * Set up and start the next I/O.  Transition to the I/O state, but allow the
 * caller to schedule the next timeout, as this may be called either from an
 * initial attach context, or from the task queue, which requires different
 * behaviour.
 */
static void
altera_sdcard_nextio(struct altera_sdcard_softc *sc)
{
	struct bio *bp;

	ALTERA_SDCARD_LOCK_ASSERT(sc);
	KASSERT(sc->as_currentbio == NULL,
	    ("%s: bio already active", __func__));

	bp = bioq_takefirst(&sc->as_bioq);
	if (bp == NULL)
		panic("%s: bioq empty", __func__);
	altera_sdcard_io_start(sc, bp);
	sc->as_state = ALTERA_SDCARD_STATE_IO;
}
Example #10
0
static void
destroy_geom_disk(struct nvd_disk *ndisk)
{
	struct bio *bp;

	taskqueue_free(ndisk->tq);
	disk_destroy(ndisk->disk);

	mtx_lock(&ndisk->bioqlock);
	for (;;) {
		bp = bioq_takefirst(&ndisk->bioq);
		if (bp == NULL)
			break;
		bp->bio_error = EIO;
		bp->bio_flags |= BIO_ERROR;
		bp->bio_resid = bp->bio_bcount;

		biodone(bp);
	}
	mtx_unlock(&ndisk->bioqlock);

	mtx_destroy(&ndisk->bioqlock);
}
Example #11
0
static void
cfi_io_proc(void *arg, int pending)
{
	struct cfi_disk_softc *sc = arg;
	struct cfi_softc *cfi = sc->parent;
	struct bio *bp;

	for (;;) {
		mtx_lock(&sc->qlock);
		bp = bioq_takefirst(&sc->bioq);
		mtx_unlock(&sc->qlock);
		if (bp == NULL)
			break;

		switch (bp->bio_cmd) {
		case BIO_READ:
			cfi_disk_read(cfi, bp);
			break;
		case BIO_WRITE:
			cfi_disk_write(cfi, bp);
			break;
		}
	}
}
Example #12
0
void
destroy_geom_disk(struct nand_chip *chip)
{
	struct bio *bp;

	taskqueue_free(chip->tq);
	disk_destroy(chip->ndisk);
	disk_destroy(chip->rdisk);

	mtx_lock(&chip->qlock);
	for (;;) {
		bp = bioq_takefirst(&chip->bioq);
		if (bp == NULL)
			break;
		bp->bio_error = EIO;
		bp->bio_flags |= BIO_ERROR;
		bp->bio_resid = bp->bio_bcount;

		biodone(bp);
	}
	mtx_unlock(&chip->qlock);

	mtx_destroy(&chip->qlock);
}
Example #13
0
static void
htif_blk_task(void *arg)
{
	struct htif_blk_request req __aligned(HTIF_ALIGN);
	struct htif_blk_softc *sc;
	uint64_t req_paddr;
	struct bio *bp;
	uint64_t paddr;
	uint64_t resp;
	uint64_t cmd;
	int i;

	sc = (struct htif_blk_softc *)arg;

	while (1) {
		HTIF_BLK_LOCK(sc);
		do {
			bp = bioq_takefirst(&sc->bio_queue);
			if (bp == NULL)
				msleep(sc, &sc->sc_mtx, PRIBIO, "jobqueue", 0);
		} while (bp == NULL);
		HTIF_BLK_UNLOCK(sc);

		if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
			HTIF_BLK_LOCK(sc);

			rmb();
			req.offset = (bp->bio_pblkno * sc->disk->d_sectorsize);
			req.size = bp->bio_bcount;
			paddr = vtophys(bp->bio_data);
			KASSERT(paddr != 0, ("paddr is 0"));
			req.addr = paddr;
			sc->curtag++;
			req.tag = sc->curtag;

			cmd = sc->index;
			cmd <<= HTIF_DEV_ID_SHIFT;
			if (bp->bio_cmd == BIO_READ)
				cmd |= (HTIF_CMD_READ << HTIF_CMD_SHIFT);
			else
				cmd |= (HTIF_CMD_WRITE << HTIF_CMD_SHIFT);
			req_paddr = vtophys(&req);
			KASSERT(req_paddr != 0, ("req_paddr is 0"));
			cmd |= req_paddr;

			sc->cmd_done = 0;
			resp = htif_command(cmd);
			htif_blk_intr(sc, resp);

			/* Wait for interrupt */
			i = 0;
			while (sc->cmd_done == 0) {
				msleep(&sc->intr_chan, &sc->sc_mtx, PRIBIO, "intr", hz/2);

				if (i++ > 2) {
					/* TODO: try to re-issue operation on timeout ? */
					bp->bio_error = EIO;
					bp->bio_flags |= BIO_ERROR;
					disk_err(bp, "hard error", -1, 1);
					break;
				}
			}
			HTIF_BLK_UNLOCK(sc);

			biodone(bp);
		} else {
			printf("unknown op %d\n", bp->bio_cmd);
		}
	}
}
Example #14
0
static void
ptdone(struct cam_periph *periph, union ccb *done_ccb)
{
	struct pt_softc *softc;
	struct ccb_scsiio *csio;

	softc = (struct pt_softc *)periph->softc;
	csio = &done_ccb->csio;
	switch (csio->ccb_h.ccb_state) {
	case PT_CCB_BUFFER_IO:
	case PT_CCB_BUFFER_IO_UA:
	{
		struct buf *bp;
		struct bio *bio;

		bio = (struct bio *)done_ccb->ccb_h.ccb_bio;
		bp = bio->bio_buf;

		if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
			int error;
			int sf;
			
			if ((csio->ccb_h.ccb_state & PT_CCB_RETRY_UA) != 0)
				sf = SF_RETRY_UA;
			else
				sf = 0;

			error = pterror(done_ccb, CAM_RETRY_SELTO, sf);
			if (error == ERESTART) {
				/*
				 * A retry was scheuled, so
				 * just return.
				 */
				return;
			}
			if (error != 0) {
				struct buf *q_bp;
				struct bio *q_bio;

				if (error == ENXIO) {
					/*
					 * Catastrophic error.  Mark our device
					 * as invalid.
					 */
					xpt_print(periph->path,
					    "Invalidating device\n");
					softc->flags |= PT_FLAG_DEVICE_INVALID;
				}

				/*
				 * return all queued I/O with EIO, so that
				 * the client can retry these I/Os in the
				 * proper order should it attempt to recover.
				 */
				while ((q_bio = bioq_takefirst(&softc->bio_queue)) != NULL) {
					q_bp = q_bio->bio_buf;
					q_bp->b_resid = q_bp->b_bcount;
					q_bp->b_error = EIO;
					q_bp->b_flags |= B_ERROR;
					biodone(q_bio);
				}
				bp->b_error = error;
				bp->b_resid = bp->b_bcount;
				bp->b_flags |= B_ERROR;
			} else {
				bp->b_resid = csio->resid;
				bp->b_error = 0;
				if (bp->b_resid != 0) {
					/* Short transfer ??? */
					bp->b_flags |= B_ERROR;
				}
			}
			if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0)
				cam_release_devq(done_ccb->ccb_h.path,
						 /*relsim_flags*/0,
						 /*reduction*/0,
						 /*timeout*/0,
						 /*getcount_only*/0);
		} else {
			bp->b_resid = csio->resid;
			if (bp->b_resid != 0)
				bp->b_flags |= B_ERROR;
		}

		/*
		 * Block out any asyncronous callbacks
		 * while we touch the pending ccb list.
		 */
		LIST_REMOVE(&done_ccb->ccb_h, periph_links.le);

		devstat_end_transaction_buf(&softc->device_stats, bp);
		biodone(bio);
		break;
	}
	case PT_CCB_WAITING:
		/* Caller will release the CCB */
		wakeup(&done_ccb->ccb_h.cbfcnp);
		return;
	}
	xpt_release_ccb(done_ccb);
}