示例#1
0
static void
pcata_drive_unsetup(ata_soft_t *softp)
{
	ata_unit_t	*unitp;
	struct ata_cmpkt *pktp;
	int drive;
	buf_t		*bp;

	/*
	 * free ab_active
	 */
	if ((pktp = softp->ab_active) != NULL) {
		bp = pktp->cp_bp;
		if (bp && ((bp->b_flags & B_DONE) == 0)) {
			bioerror(bp, ENXIO);
			biodone(bp);
		}
		kmem_free((void *)pktp, sizeof (*pktp));
		softp->ab_active = NULL;
	}

	/* release any packets queued on the controller */
	while ((pktp = softp->ab_head) != NULL) {
		softp->ab_head = pktp->pkt_forw;
		bp = pktp->cp_bp;
		if (bp && ((bp->b_flags & B_DONE) == 0)) {
			/* first free the packets */
			bioerror(bp, ENXIO);
			biodone(bp);
		}
		kmem_free((void *)pktp, sizeof (*pktp));
	}

	/* release the unit structures */
	while ((unitp = softp->ab_link) != NULL) {
		softp->ab_link = unitp->a_forw;
		kmem_free(unitp, sizeof (ata_unit_t));
	}

	/*
	 * now free the atarpbuf memory
	 * It is poor code practice to use artificial number of drives,
	 * but we need to be consistant with the rest of the code, hence the
	 * drive=1 value.
	 */
	for (drive = 0; drive < 1; drive++) {
		if (softp->ab_rpbp[drive]) {
			kmem_free(softp->ab_rpbp[drive],
				(sizeof (struct atarpbuf) +
				sizeof (struct scsi_inquiry)));
			softp->ab_rpbp[drive] = NULL;
		}
	}
}
示例#2
0
caddr_t
dadk_iob_xfer(opaque_t objp, struct tgdk_iob *iobp, int rw)
{
	struct dadk	*dadkp = (struct dadk *)objp;
	struct buf	*bp;
	int		err;

	bp = iobp->b_bp;
	if (dadkp->dad_rdonly && !(rw & B_READ)) {
		bioerror(bp, EROFS);
		return (NULL);
	}

	bp->b_flags |= (B_BUSY | rw);
	bp->b_bcount = iobp->b_pbytecnt;
	SET_BP_SEC(bp, iobp->b_psec);
	bp->av_back = (struct buf *)0;
	bp->b_resid = 0;

	/* call flow control */
	mutex_enter(&dadkp->dad_cmd_mutex);
	dadkp->dad_cmd_count++;
	mutex_exit(&dadkp->dad_cmd_mutex);
	FLC_ENQUE(dadkp->dad_flcobjp, bp);
	err = biowait(bp);

	bp->b_bcount = iobp->b_xfer;
	bp->b_flags &= ~(B_DONE|B_BUSY);

	if (err)
		return (NULL);

	return (bp->b_un.b_addr+iobp->b_pbyteoff);
}
示例#3
0
int
dadk_dump(opaque_t objp, struct buf *bp)
{
	struct dadk *dadkp = (struct dadk *)objp;
	struct cmpkt *pktp;

	if (dadkp->dad_rdonly) {
		bioerror(bp, EROFS);
		return (DDI_FAILURE);
	}

	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
		bioerror(bp, ENXIO);
		return (DDI_FAILURE);
	}

	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));

	pktp = dadk_pktprep(dadkp, NULL, bp, dadk_polldone, NULL, NULL);
	if (!pktp) {
		cmn_err(CE_WARN, "no resources for dumping");
		bioerror(bp, EIO);
		return (DDI_FAILURE);
	}
	pktp->cp_flags |= CPF_NOINTR;

	(void) dadk_ioprep(dadkp, pktp);
	dadk_transport(dadkp, bp);
	pktp->cp_byteleft -= pktp->cp_bytexfer;

	while (geterror(bp) == 0 && pktp->cp_byteleft != 0) {
		(void) dadk_iosetup(dadkp, pktp);
		dadk_transport(dadkp, bp);
		pktp->cp_byteleft -= pktp->cp_bytexfer;
	}

	if (pktp->cp_private)
		BBH_FREEHANDLE(dadkp->dad_bbhobjp, pktp->cp_private);
	gda_free(dadkp->dad_ctlobjp, pktp, NULL);
	return (DDI_SUCCESS);
}
示例#4
0
static int
_raw_strategy(struct buf *bp)
{
	int cd = __raw_get_cd(bp->b_edev);

	if (cd == -1 || _nsc_raw_files[cd].major == NULL) {
		bioerror(bp, ENXIO);
		biodone(bp);
		return (NULL);
	}

	return ((*_nsc_raw_files[cd].major->strategy)(bp));
}
示例#5
0
int
dadk_strategy(opaque_t objp, struct buf *bp)
{
	struct dadk *dadkp = (struct dadk *)objp;

	if (dadkp->dad_rdonly && !(bp->b_flags & B_READ)) {
		bioerror(bp, EROFS);
		return (DDI_FAILURE);
	}

	if (bp->b_bcount & (dadkp->DAD_SECSIZ-1)) {
		bioerror(bp, ENXIO);
		return (DDI_FAILURE);
	}

	SET_BP_SEC(bp, (LBLK2SEC(GET_BP_SEC(bp), dadkp->dad_blkshf)));
	mutex_enter(&dadkp->dad_cmd_mutex);
	dadkp->dad_cmd_count++;
	mutex_exit(&dadkp->dad_cmd_mutex);
	FLC_ENQUE(dadkp->dad_flcobjp, bp);

	return (DDI_SUCCESS);
}
示例#6
0
static void
vdev_file_io_strategy(void *arg)
{
	buf_t *bp = arg;
	vnode_t *vp = bp->b_private;
	ssize_t resid;
	int error;

	error = vn_rdwr((bp->b_flags & B_READ) ? UIO_READ : UIO_WRITE,
	    vp, bp->b_un.b_addr, bp->b_bcount, ldbtob(bp->b_lblkno),
	    UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, &resid);

	if (error == 0) {
		bp->b_resid = resid;
		biodone(bp);
	} else {
		bioerror(bp, error);
		biodone(bp);
	}
}
示例#7
0
static bd_xfer_impl_t *
bd_xfer_alloc(bd_t *bd, struct buf *bp, int (*func)(void *, bd_xfer_t *),
    int kmflag)
{
	bd_xfer_impl_t		*xi;
	int			rv = 0;
	int			status;
	unsigned		dir;
	int			(*cb)(caddr_t);
	size_t			len;
	uint32_t		shift;

	if (kmflag == KM_SLEEP) {
		cb = DDI_DMA_SLEEP;
	} else {
		cb = DDI_DMA_DONTWAIT;
	}

	xi = kmem_cache_alloc(bd->d_cache, kmflag);
	if (xi == NULL) {
		bioerror(bp, ENOMEM);
		return (NULL);
	}

	ASSERT(bp);

	xi->i_bp = bp;
	xi->i_func = func;
	xi->i_blkno = bp->b_lblkno;

	if (bp->b_bcount == 0) {
		xi->i_len = 0;
		xi->i_nblks = 0;
		xi->i_kaddr = NULL;
		xi->i_resid = 0;
		xi->i_num_win = 0;
		goto done;
	}

	if (bp->b_flags & B_READ) {
		dir = DDI_DMA_READ;
		xi->i_func = bd->d_ops.o_read;
	} else {
		dir = DDI_DMA_WRITE;
		xi->i_func = bd->d_ops.o_write;
	}

	shift = bd->d_blkshift;
	xi->i_blkshift = shift;

	if (!bd->d_use_dma) {
		bp_mapin(bp);
		rv = 0;
		xi->i_offset = 0;
		xi->i_num_win =
		    (bp->b_bcount + (bd->d_maxxfer - 1)) / bd->d_maxxfer;
		xi->i_cur_win = 0;
		xi->i_len = min(bp->b_bcount, bd->d_maxxfer);
		xi->i_nblks = xi->i_len >> shift;
		xi->i_kaddr = bp->b_un.b_addr;
		xi->i_resid = bp->b_bcount;
	} else {
示例#8
0
文件: zvol.c 项目: andreiw/polaris
int
zvol_strategy(buf_t *bp)
{
	zvol_state_t *zv = ddi_get_soft_state(zvol_state, getminor(bp->b_edev));
	uint64_t off, volsize;
	size_t size, resid;
	char *addr;
	objset_t *os;
	int error = 0;
	int sync;
	int reading;
	int txg_sync_needed = B_FALSE;

	if (zv == NULL) {
		bioerror(bp, ENXIO);
		biodone(bp);
		return (0);
	}

	if (getminor(bp->b_edev) == 0) {
		bioerror(bp, EINVAL);
		biodone(bp);
		return (0);
	}

	if (zv->zv_readonly && !(bp->b_flags & B_READ)) {
		bioerror(bp, EROFS);
		biodone(bp);
		return (0);
	}

	off = ldbtob(bp->b_blkno);
	volsize = zv->zv_volsize;

	os = zv->zv_objset;
	ASSERT(os != NULL);
	sync = !(bp->b_flags & B_ASYNC) && !(zil_disable);

	bp_mapin(bp);
	addr = bp->b_un.b_addr;
	resid = bp->b_bcount;

	/*
	 * There must be no buffer changes when doing a dmu_sync() because
	 * we can't change the data whilst calculating the checksum.
	 * A better approach than a per zvol rwlock would be to lock ranges.
	 */
	reading = bp->b_flags & B_READ;
	if (reading || resid <= zvol_immediate_write_sz)
		rw_enter(&zv->zv_dslock, RW_READER);
	else
		rw_enter(&zv->zv_dslock, RW_WRITER);

	while (resid != 0 && off < volsize) {

		size = MIN(resid, 1UL << 20);	/* cap at 1MB per tx */

		if (size > volsize - off)	/* don't write past the end */
			size = volsize - off;

		if (reading) {
			error = dmu_read(os, ZVOL_OBJ, off, size, addr);
		} else {
			dmu_tx_t *tx = dmu_tx_create(os);
			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
			error = dmu_tx_assign(tx, TXG_WAIT);
			if (error) {
				dmu_tx_abort(tx);
			} else {
				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
				if (sync) {
					/* use the ZIL to commit this write */
					if (zvol_log_write(zv, tx, off, size,
					    addr) != 0) {
						txg_sync_needed = B_TRUE;
					}
				}
				dmu_tx_commit(tx);
			}
		}
		if (error)
			break;
		off += size;
		addr += size;
		resid -= size;
	}
	rw_exit(&zv->zv_dslock);

	if ((bp->b_resid = resid) == bp->b_bcount)
		bioerror(bp, off > volsize ? EINVAL : error);

	biodone(bp);

	if (sync) {
		if (txg_sync_needed)
			txg_wait_synced(dmu_objset_pool(os), 0);
		else
			zil_commit(zv->zv_zilog, UINT64_MAX, 0);
	}

	return (0);
}