int vdev_disk_ldi_physio(ldi_handle_t vd_lh, caddr_t data, size_t size, uint64_t offset, int flags) { #ifdef illumos buf_t *bp; #else ldi_buf_t *bp; #endif int error = 0; if (vd_lh == NULL) return (SET_ERROR(EINVAL)); ASSERT(flags & B_READ || flags & B_WRITE); bp = getrbuf(KM_SLEEP); bp->b_flags = flags | B_BUSY | B_NOCACHE | B_FAILFAST; bp->b_bcount = size; bp->b_un.b_addr = (void *)data; bp->b_lblkno = lbtodb(offset); bp->b_bufsize = size; error = ldi_strategy(vd_lh, bp); ASSERT(error == 0); if ((error = biowait(bp)) == 0 && bp->b_resid != 0) error = SET_ERROR(EIO); freerbuf(bp); return (error); }
int dadk_flushdone(struct buf *bp) { struct dk_callback *dkc = bp->b_private; ASSERT(dkc != NULL && dkc->dkc_callback != NULL); (*dkc->dkc_callback)(dkc->dkc_cookie, geterror(bp)); kmem_free(dkc, sizeof (*dkc)); freerbuf(bp); return (0); }
/* ARGSUSED */ int dadk_iob_free(opaque_t objp, struct tgdk_iob *iobp) { struct buf *bp; if (iobp) { if (iobp->b_bp && (iobp->b_flag & IOB_BPALLOC)) { bp = iobp->b_bp; if (bp->b_un.b_addr && (iobp->b_flag & IOB_BPBUFALLOC)) i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL); freerbuf(bp); } kmem_free(iobp, sizeof (*iobp)); } return (DDI_SUCCESS); }
/* ARGSUSED */ static int _raw_maxfbas(dev_t dev, int flag, nsc_size_t *ptr) { struct buf *bp; if (flag == NSC_CACHEBLK) *ptr = 1; else { if (nsc_rawmaxfbas == 0) { bp = getrbuf(KM_SLEEP); bp->b_bcount = 4096 * 512; minphys(bp); nsc_rawmaxfbas = FBA_NUM(bp->b_bcount); freerbuf(bp); } *ptr = nsc_rawmaxfbas; } return (0); }
/* ARGSUSED */ int vhci_tpgs_get_target_fo_mode(struct scsi_device *sd, int *mode, int *state, int *xlf_capable, int *preferred) { int retval = 0; struct buf *bp; struct scsi_address *ap; int lu = 0, rel_tgt_port = 0, tgt_port = 0x0; VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_tpgs_get_target_fo_mode: enter\n")); *mode = *state = *xlf_capable = 0; bp = getrbuf(KM_NOSLEEP); if (bp == NULL) { VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_tpgs_get_target_fo_mode: " " failed getrbuf\n")); return (1); } ap = &sd->sd_address; if (vhci_tpgs_inquiry(ap, bp, mode)) { VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_tpgs_get_target_fo_mode: " " failed vhci_tpgs_inquiry\n")); retval = 1; } else if (vhci_tpgs_page83(ap, bp, &rel_tgt_port, &tgt_port, &lu)) { VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_tpgs_get_target_fo_mode: " " failed vhci_tpgs_page83\n")); retval = 1; } else if (vhci_tpgs_report_target_groups(ap, bp, rel_tgt_port, tgt_port, state, preferred)) { VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_tpgs_get_target_fo_mode: " " failed vhci_tpgs_report_target_groups\n")); retval = 1; } freerbuf(bp); if (retval == 0) { VHCI_DEBUG(6, (CE_NOTE, NULL, "!vhci_tpgs_get_target_fo_mode: " "SUCCESS\n")); } return (retval); }
tgdk_iob_handle dadk_iob_alloc(opaque_t objp, daddr_t blkno, ssize_t xfer, int kmsflg) { struct dadk *dadkp = (struct dadk *)objp; struct buf *bp; struct tgdk_iob *iobp; size_t rlen; iobp = kmem_zalloc(sizeof (*iobp), kmsflg); if (iobp == NULL) return (NULL); if ((bp = getrbuf(kmsflg)) == NULL) { kmem_free(iobp, sizeof (*iobp)); return (NULL); } iobp->b_psec = LBLK2SEC(blkno, dadkp->dad_blkshf); iobp->b_pbyteoff = (blkno & ((1<<dadkp->dad_blkshf) - 1)) << SCTRSHFT; iobp->b_pbytecnt = ((iobp->b_pbyteoff + xfer + dadkp->DAD_SECSIZ - 1) >> dadkp->dad_secshf) << dadkp->dad_secshf; bp->b_un.b_addr = 0; /* * use i_ddi_mem_alloc() for now until we have an interface to allocate * memory for DMA which doesn't require a DMA handle. */ if (i_ddi_mem_alloc((dadkp->dad_sd)->sd_dev, &dadk_alloc_attr, (size_t)iobp->b_pbytecnt, ((kmsflg == KM_SLEEP) ? 1 : 0), 0, NULL, &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) { freerbuf(bp); kmem_free(iobp, sizeof (*iobp)); return (NULL); } iobp->b_flag |= IOB_BPALLOC | IOB_BPBUFALLOC; iobp->b_bp = bp; iobp->b_lblk = blkno; iobp->b_xfer = xfer; iobp->b_lblk = blkno; iobp->b_xfer = xfer; return (iobp); }
int vhci_tpgs_set_target_groups(struct scsi_address *ap, int set_state, int tpg_id) { struct scsi_pkt *pkt; struct buf *bp; int len, rval, ss = SCSI_SENSE_UNKNOWN; char *bufp; uint8_t *sns, skey, asc, ascq; len = 8; bp = getrbuf(KM_NOSLEEP); if (bp == NULL) { VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_tpgs_set_target_groups: " " failed getrbuf")); return (1); } bufp = kmem_zalloc(len, KM_NOSLEEP); if (bufp == NULL) { VHCI_DEBUG(1, (CE_WARN, NULL, "!vhci_tpgs_set_target_groups: " "request packet allocation for %d failed....", len)); freerbuf(bp); return (1); } bp->b_un.b_addr = bufp; bp->b_flags = B_WRITE; bp->b_bcount = len; bp->b_resid = 0; bufp[4] = (0x0f & set_state); bufp[6] = (0xff00 & tpg_id) >> 8; bufp[7] = (0x00ff & tpg_id); pkt = scsi_init_pkt(ap, NULL, bp, CDB_GROUP5, sizeof (struct scsi_arq_status), 0, 0, NULL, NULL); if (pkt == NULL) { VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_tpgs_set_target_groups: scsi_init_pkt error\n")); freerbuf(bp); kmem_free((void *)bufp, len); return (1); } /* * Sends 1 TPG descriptor only. Hence Parameter list length pkt_cdbp[9] * is set to 8 bytes - Refer SPC3 for details. */ pkt->pkt_cdbp[0] = SCMD_MAINTENANCE_OUT; pkt->pkt_cdbp[1] = SSVC_ACTION_SET_TARGET_PORT_GROUPS; pkt->pkt_cdbp[9] = 8; pkt->pkt_time = 90; VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_tpgs_set_target_groups: sending set target port group:" " cdb[0/1/6/7/8/9]: %x/%x/%x/%x/%x/%x\n", pkt->pkt_cdbp[0], pkt->pkt_cdbp[1], pkt->pkt_cdbp[6], pkt->pkt_cdbp[7], pkt->pkt_cdbp[8], pkt->pkt_cdbp[9])); #ifdef DEBUG print_buf(bufp, len); #endif rval = vhci_do_scsi_cmd(pkt); if (rval == 0) { VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_tpgs_set_target_groups:" " vhci_do_scsi_cmd failed\n")); freerbuf(bp); kmem_free((void *)bufp, len); scsi_destroy_pkt(pkt); return (-1); } else if ((pkt->pkt_reason == CMD_CMPLT) && (SCBP_C(pkt) == STATUS_CHECK) && (pkt->pkt_state & STATE_ARQ_DONE)) { sns = (uint8_t *) &(((struct scsi_arq_status *)(uintptr_t) (pkt->pkt_scbp))->sts_sensedata); skey = scsi_sense_key(sns); asc = scsi_sense_asc(sns); ascq = scsi_sense_ascq(sns); if ((skey == KEY_UNIT_ATTENTION) && (asc == STD_SCSI_ASC_STATE_CHG) && (ascq == STD_SCSI_ASCQ_STATE_CHG_SUCC)) { ss = SCSI_SENSE_STATE_CHANGED; VHCI_DEBUG(4, (CE_NOTE, NULL, "!vhci_tpgs_set_target_groups:" " sense:%x, add_code: %x, qual_code:%x" " sense:%x\n", skey, asc, ascq, ss)); } else if ((skey == KEY_ILLEGAL_REQUEST) && (asc == STD_SCSI_ASC_INVAL_PARAM_LIST)) { ss = SCSI_SENSE_NOFAILOVER; VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_tpgs_set_target_groups:" " sense:%x, add_code: %x, qual_code:%x" " sense:%x\n", skey, asc, ascq, ss)); } else if ((skey == KEY_ILLEGAL_REQUEST) && (asc == STD_SCSI_ASC_INVAL_CMD_OPCODE)) { ss = SCSI_SENSE_NOFAILOVER; VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_tpgs_set_target_groups:" " sense_key:%x, add_code: %x, qual_code:%x" " sense:%x\n", skey, asc, ascq, rval)); } else { /* * At this point sns data may be for power-on-reset * UNIT ATTN hardware errors, vendor unqiue sense etc. * For all these cases, sense is unknown. */ ss = SCSI_SENSE_NOFAILOVER; VHCI_DEBUG(1, (CE_NOTE, NULL, "!vhci_tpgs_set_target_groups: " " sense UNKNOWN: sense key:%x, ASC:%x, ASCQ:%x\n", skey, asc, ascq)); } if (ss == SCSI_SENSE_STATE_CHANGED) { freerbuf(bp); kmem_free((void *)bufp, len); scsi_destroy_pkt(pkt); return (0); } } else if ((pkt->pkt_reason == CMD_CMPLT) && (SCBP_C(pkt) == STATUS_GOOD)) { freerbuf(bp); kmem_free((void *)bufp, len); scsi_destroy_pkt(pkt); return (0); } freerbuf(bp); kmem_free((void *)bufp, len); scsi_destroy_pkt(pkt); return (1); }
/* ARGSUSED */ int dadk_ioctl(opaque_t objp, dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p, int *rval_p) { struct dadk *dadkp = (struct dadk *)objp; switch (cmd) { case DKIOCGETDEF: { struct buf *bp; int err, head; unsigned char *secbuf; STRUCT_DECL(defect_header, adh); STRUCT_INIT(adh, flag & FMODELS); /* * copyin header .... * yields head number and buffer address */ if (ddi_copyin((caddr_t)arg, STRUCT_BUF(adh), STRUCT_SIZE(adh), flag)) return (EFAULT); head = STRUCT_FGET(adh, head); if (head < 0 || head >= dadkp->dad_phyg.g_head) return (ENXIO); secbuf = kmem_zalloc(NBPSCTR, KM_SLEEP); if (!secbuf) return (ENOMEM); bp = getrbuf(KM_SLEEP); if (!bp) { kmem_free(secbuf, NBPSCTR); return (ENOMEM); } bp->b_edev = dev; bp->b_dev = cmpdev(dev); bp->b_flags = B_BUSY; bp->b_resid = 0; bp->b_bcount = NBPSCTR; bp->b_un.b_addr = (caddr_t)secbuf; bp->b_blkno = head; /* I had to put it somwhere! */ bp->b_forw = (struct buf *)dadkp; bp->b_back = (struct buf *)DCMD_GETDEF; mutex_enter(&dadkp->dad_cmd_mutex); dadkp->dad_cmd_count++; mutex_exit(&dadkp->dad_cmd_mutex); FLC_ENQUE(dadkp->dad_flcobjp, bp); err = biowait(bp); if (!err) { if (ddi_copyout((caddr_t)secbuf, STRUCT_FGETP(adh, buffer), NBPSCTR, flag)) err = ENXIO; } kmem_free(secbuf, NBPSCTR); freerbuf(bp); return (err); } case DIOCTL_RWCMD: { struct dadkio_rwcmd *rwcmdp; int status, rw; /* * copied in by cmdk and, if necessary, converted to the * correct datamodel */ rwcmdp = (struct dadkio_rwcmd *)(intptr_t)arg; /* * handle the complex cases here; we pass these * through to the driver, which will queue them and * handle the requests asynchronously. The simpler * cases ,which can return immediately, fail here, and * the request reverts to the dadk_ioctl routine, while * will reroute them directly to the ata driver. */ switch (rwcmdp->cmd) { case DADKIO_RWCMD_READ : /*FALLTHROUGH*/ case DADKIO_RWCMD_WRITE: rw = ((rwcmdp->cmd == DADKIO_RWCMD_WRITE) ? B_WRITE : B_READ); status = dadk_dk_buf_setup(dadkp, (opaque_t)rwcmdp, dev, ((flag &FKIOCTL) ? UIO_SYSSPACE : UIO_USERSPACE), rw); return (status); default: return (EINVAL); } } case DKIOC_UPDATEFW: /* * Require PRIV_ALL privilege to invoke DKIOC_UPDATEFW * to protect the firmware update from malicious use */ if (PRIV_POLICY(cred_p, PRIV_ALL, B_FALSE, EPERM, NULL) != 0) return (EPERM); else return (dadk_ctl_ioctl(dadkp, cmd, arg, flag)); case DKIOCFLUSHWRITECACHE: { struct buf *bp; int err = 0; struct dk_callback *dkc = (struct dk_callback *)arg; struct cmpkt *pktp; int is_sync = 1; mutex_enter(&dadkp->dad_mutex); if (dadkp->dad_noflush || ! dadkp->dad_wce) { err = dadkp->dad_noflush ? ENOTSUP : 0; mutex_exit(&dadkp->dad_mutex); /* * If a callback was requested: a * callback will always be done if the * caller saw the DKIOCFLUSHWRITECACHE * ioctl return 0, and never done if the * caller saw the ioctl return an error. */ if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback != NULL) { (*dkc->dkc_callback)(dkc->dkc_cookie, err); /* * Did callback and reported error. * Since we did a callback, ioctl * should return 0. */ err = 0; } return (err); } mutex_exit(&dadkp->dad_mutex); bp = getrbuf(KM_SLEEP); bp->b_edev = dev; bp->b_dev = cmpdev(dev); bp->b_flags = B_BUSY; bp->b_resid = 0; bp->b_bcount = 0; SET_BP_SEC(bp, 0); if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback != NULL) { struct dk_callback *dkc2 = (struct dk_callback *)kmem_zalloc( sizeof (struct dk_callback), KM_SLEEP); bcopy(dkc, dkc2, sizeof (*dkc2)); bp->b_private = dkc2; bp->b_iodone = dadk_flushdone; is_sync = 0; } /* * Setup command pkt * dadk_pktprep() can't fail since DDI_DMA_SLEEP set */ pktp = dadk_pktprep(dadkp, NULL, bp, dadk_iodone, DDI_DMA_SLEEP, NULL); pktp->cp_time = DADK_FLUSH_CACHE_TIME; *((char *)(pktp->cp_cdbp)) = DCMD_FLUSH_CACHE; pktp->cp_byteleft = 0; pktp->cp_private = NULL; pktp->cp_secleft = 0; pktp->cp_srtsec = -1; pktp->cp_bytexfer = 0; CTL_IOSETUP(dadkp->dad_ctlobjp, pktp); mutex_enter(&dadkp->dad_cmd_mutex); dadkp->dad_cmd_count++; mutex_exit(&dadkp->dad_cmd_mutex); FLC_ENQUE(dadkp->dad_flcobjp, bp); if (is_sync) { err = biowait(bp); freerbuf(bp); } return (err); } default: if (!dadkp->dad_rmb) return (dadk_ctl_ioctl(dadkp, cmd, arg, flag)); } switch (cmd) { case CDROMSTOP: return (dadk_rmb_ioctl(dadkp, DCMD_STOP_MOTOR, 0, 0, DADK_SILENT)); case CDROMSTART: return (dadk_rmb_ioctl(dadkp, DCMD_START_MOTOR, 0, 0, DADK_SILENT)); case DKIOCLOCK: return (dadk_rmb_ioctl(dadkp, DCMD_LOCK, 0, 0, DADK_SILENT)); case DKIOCUNLOCK: return (dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT)); case DKIOCEJECT: case CDROMEJECT: { int ret; if (ret = dadk_rmb_ioctl(dadkp, DCMD_UNLOCK, 0, 0, DADK_SILENT)) { return (ret); } if (ret = dadk_rmb_ioctl(dadkp, DCMD_EJECT, 0, 0, DADK_SILENT)) { return (ret); } mutex_enter(&dadkp->dad_mutex); dadkp->dad_iostate = DKIO_EJECTED; cv_broadcast(&dadkp->dad_state_cv); mutex_exit(&dadkp->dad_mutex); return (0); } default: return (ENOTTY); /* * cdrom audio commands */ case CDROMPAUSE: cmd = DCMD_PAUSE; break; case CDROMRESUME: cmd = DCMD_RESUME; break; case CDROMPLAYMSF: cmd = DCMD_PLAYMSF; break; case CDROMPLAYTRKIND: cmd = DCMD_PLAYTRKIND; break; case CDROMREADTOCHDR: cmd = DCMD_READTOCHDR; break; case CDROMREADTOCENTRY: cmd = DCMD_READTOCENT; break; case CDROMVOLCTRL: cmd = DCMD_VOLCTRL; break; case CDROMSUBCHNL: cmd = DCMD_SUBCHNL; break; case CDROMREADMODE2: cmd = DCMD_READMODE2; break; case CDROMREADMODE1: cmd = DCMD_READMODE1; break; case CDROMREADOFFSET: cmd = DCMD_READOFFSET; break; } return (dadk_rmb_ioctl(dadkp, cmd, arg, flag, 0)); }
/* ARGSUSED */ uint_t rdsv3_ib_dma_map_sg_rdma(struct ib_device *dev, struct rdsv3_rdma_sg scat[], uint_t num, struct rdsv3_scatterlist **scatl) { ibt_hca_hdl_t hca_hdl; ibt_iov_attr_t iov_attr; struct buf *bp; uint_t i, j, k; uint_t count; struct rdsv3_scatterlist *sg; int ret; RDSV3_DPRINTF4("rdsv3_ib_dma_map_sg_rdma", "scat: %p, num: %d", scat, num); hca_hdl = ib_get_ibt_hca_hdl(dev); scat[0].hca_hdl = hca_hdl; bzero(&iov_attr, sizeof (ibt_iov_attr_t)); iov_attr.iov_flags = IBT_IOV_BUF; iov_attr.iov_lso_hdr_sz = 0; for (i = 0, count = 0; i < num; i++) { /* transpose umem_cookie to buf structure */ bp = ddi_umem_iosetup(scat[i].umem_cookie, scat[i].iovec.addr & PAGEOFFSET, scat[i].iovec.bytes, B_WRITE, 0, 0, NULL, DDI_UMEM_SLEEP); if (bp == NULL) { /* free resources and return error */ goto out; } /* setup ibt_map_mem_iov() attributes */ iov_attr.iov_buf = bp; iov_attr.iov_wr_nds = (scat[i].iovec.bytes / PAGESIZE) + 2; scat[i].swr.wr_sgl = kmem_zalloc(iov_attr.iov_wr_nds * sizeof (ibt_wr_ds_t), KM_SLEEP); ret = ibt_map_mem_iov(hca_hdl, &iov_attr, (ibt_all_wr_t *)&scat[i].swr, &scat[i].mihdl); freerbuf(bp); if (ret != IBT_SUCCESS) { RDSV3_DPRINTF2("rdsv3_ib_dma_map_sg_rdma", "ibt_map_mem_iov returned: %d", ret); /* free resources and return error */ kmem_free(scat[i].swr.wr_sgl, iov_attr.iov_wr_nds * sizeof (ibt_wr_ds_t)); goto out; } count += scat[i].swr.wr_nds; #ifdef DEBUG for (j = 0; j < scat[i].swr.wr_nds; j++) { RDSV3_DPRINTF5("rdsv3_ib_dma_map_sg_rdma", "sgl[%d] va %llx len %x", j, scat[i].swr.wr_sgl[j].ds_va, scat[i].swr.wr_sgl[j].ds_len); } #endif RDSV3_DPRINTF4("rdsv3_ib_dma_map_sg_rdma", "iovec.bytes: 0x%x scat[%d]swr.wr_nds: %d", scat[i].iovec.bytes, i, scat[i].swr.wr_nds); } count = ((count - 1) / RDSV3_IB_MAX_SGE) + 1; RDSV3_DPRINTF4("rdsv3_ib_dma_map_sg_rdma", "Ret: num: %d", count); return (count); out: rdsv3_ib_dma_unmap_sg_rdma(dev, num, scat); return (0); }