Пример #1
0
static bd_xfer_impl_t *
bd_xfer_alloc(bd_t *bd, struct buf *bp, int (*func)(void *, bd_xfer_t *),
    int kmflag)
{
	bd_xfer_impl_t		*xi;
	int			rv = 0;
	int			status;
	unsigned		dir;
	int			(*cb)(caddr_t);
	size_t			len;
	uint32_t		shift;

	if (kmflag == KM_SLEEP) {
		cb = DDI_DMA_SLEEP;
	} else {
		cb = DDI_DMA_DONTWAIT;
	}

	xi = kmem_cache_alloc(bd->d_cache, kmflag);
	if (xi == NULL) {
		bioerror(bp, ENOMEM);
		return (NULL);
	}

	ASSERT(bp);

	xi->i_bp = bp;
	xi->i_func = func;
	xi->i_blkno = bp->b_lblkno;

	if (bp->b_bcount == 0) {
		xi->i_len = 0;
		xi->i_nblks = 0;
		xi->i_kaddr = NULL;
		xi->i_resid = 0;
		xi->i_num_win = 0;
		goto done;
	}

	if (bp->b_flags & B_READ) {
		dir = DDI_DMA_READ;
		xi->i_func = bd->d_ops.o_read;
	} else {
		dir = DDI_DMA_WRITE;
		xi->i_func = bd->d_ops.o_write;
	}

	shift = bd->d_blkshift;
	xi->i_blkshift = shift;

	if (!bd->d_use_dma) {
		bp_mapin(bp);
		rv = 0;
		xi->i_offset = 0;
		xi->i_num_win =
		    (bp->b_bcount + (bd->d_maxxfer - 1)) / bd->d_maxxfer;
		xi->i_cur_win = 0;
		xi->i_len = min(bp->b_bcount, bd->d_maxxfer);
		xi->i_nblks = xi->i_len >> shift;
		xi->i_kaddr = bp->b_un.b_addr;
		xi->i_resid = bp->b_bcount;
	} else {
Пример #2
0
/* ARGSUSED */
static struct scsi_pkt *
emul64_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
	struct buf *bp, int cmdlen, int statuslen, int tgtlen,
	int flags, int (*callback)(), caddr_t arg)
{
	struct emul64		*emul64	= ADDR2EMUL64(ap);
	struct emul64_cmd	*sp;

	ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);

	/*
	 * First step of emul64_scsi_init_pkt:  pkt allocation
	 */
	if (pkt == NULL) {
		pkt = scsi_hba_pkt_alloc(emul64->emul64_dip, ap, cmdlen,
		    statuslen,
		    tgtlen, sizeof (struct emul64_cmd), callback, arg);
		if (pkt == NULL) {
			cmn_err(CE_WARN, "emul64_scsi_init_pkt: "
			    "scsi_hba_pkt_alloc failed");
			return (NULL);
		}

		sp = PKT2CMD(pkt);

		/*
		 * Initialize the new pkt - we redundantly initialize
		 * all the fields for illustrative purposes.
		 */
		sp->cmd_pkt		= pkt;
		sp->cmd_flags		= 0;
		sp->cmd_scblen		= statuslen;
		sp->cmd_cdblen		= cmdlen;
		sp->cmd_emul64		= emul64;
		pkt->pkt_address	= *ap;
		pkt->pkt_comp		= (void (*)())NULL;
		pkt->pkt_flags		= 0;
		pkt->pkt_time		= 0;
		pkt->pkt_resid		= 0;
		pkt->pkt_statistics	= 0;
		pkt->pkt_reason		= 0;

	} else {
		sp = PKT2CMD(pkt);
	}

	/*
	 * Second step of emul64_scsi_init_pkt:  dma allocation/move
	 */
	if (bp && bp->b_bcount != 0) {
		if (bp->b_flags & B_READ) {
			sp->cmd_flags &= ~CFLAG_DMASEND;
		} else {
			sp->cmd_flags |= CFLAG_DMASEND;
		}
		bp_mapin(bp);
		sp->cmd_addr = (unsigned char *) bp->b_un.b_addr;
		sp->cmd_count = bp->b_bcount;
		pkt->pkt_resid = 0;
	}

	return (pkt);
}
Пример #3
0
int
zvol_strategy(buf_t *bp)
{
	zvol_state_t *zv = ddi_get_soft_state(zvol_state, getminor(bp->b_edev));
	uint64_t off, volsize;
	size_t size, resid;
	char *addr;
	objset_t *os;
	int error = 0;
	int sync;
	int reading;
	int txg_sync_needed = B_FALSE;

	if (zv == NULL) {
		bioerror(bp, ENXIO);
		biodone(bp);
		return (0);
	}

	if (getminor(bp->b_edev) == 0) {
		bioerror(bp, EINVAL);
		biodone(bp);
		return (0);
	}

	if (zv->zv_readonly && !(bp->b_flags & B_READ)) {
		bioerror(bp, EROFS);
		biodone(bp);
		return (0);
	}

	off = ldbtob(bp->b_blkno);
	volsize = zv->zv_volsize;

	os = zv->zv_objset;
	ASSERT(os != NULL);
	sync = !(bp->b_flags & B_ASYNC) && !(zil_disable);

	bp_mapin(bp);
	addr = bp->b_un.b_addr;
	resid = bp->b_bcount;

	/*
	 * There must be no buffer changes when doing a dmu_sync() because
	 * we can't change the data whilst calculating the checksum.
	 * A better approach than a per zvol rwlock would be to lock ranges.
	 */
	reading = bp->b_flags & B_READ;
	if (reading || resid <= zvol_immediate_write_sz)
		rw_enter(&zv->zv_dslock, RW_READER);
	else
		rw_enter(&zv->zv_dslock, RW_WRITER);

	while (resid != 0 && off < volsize) {

		size = MIN(resid, 1UL << 20);	/* cap at 1MB per tx */

		if (size > volsize - off)	/* don't write past the end */
			size = volsize - off;

		if (reading) {
			error = dmu_read(os, ZVOL_OBJ, off, size, addr);
		} else {
			dmu_tx_t *tx = dmu_tx_create(os);
			dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
			error = dmu_tx_assign(tx, TXG_WAIT);
			if (error) {
				dmu_tx_abort(tx);
			} else {
				dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
				if (sync) {
					/* use the ZIL to commit this write */
					if (zvol_log_write(zv, tx, off, size,
					    addr) != 0) {
						txg_sync_needed = B_TRUE;
					}
				}
				dmu_tx_commit(tx);
			}
		}
		if (error)
			break;
		off += size;
		addr += size;
		resid -= size;
	}
	rw_exit(&zv->zv_dslock);

	if ((bp->b_resid = resid) == bp->b_bcount)
		bioerror(bp, off > volsize ? EINVAL : error);

	biodone(bp);

	if (sync) {
		if (txg_sync_needed)
			txg_wait_synced(dmu_objset_pool(os), 0);
		else
			zil_commit(zv->zv_zilog, UINT64_MAX, 0);
	}

	return (0);
}