static void ld_sdmmc_dobio(void *arg) { struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg; struct ld_sdmmc_softc *sc = task->task_sc; struct buf *bp = task->task_bp; int error, s; callout_stop(&task->task_callout); /* * I/O operation */ DPRINTF(("%s: I/O operation (dir=%s, blkno=0x%jx, bcnt=0x%x)\n", device_xname(sc->sc_ld.sc_dv), bp->b_flags & B_READ ? "IN" : "OUT", bp->b_rawblkno, bp->b_bcount)); /* is everything done in terms of blocks? */ if (bp->b_rawblkno >= sc->sc_sf->csd.capacity) { /* trying to read or write past end of device */ aprint_error_dev(sc->sc_ld.sc_dv, "blkno 0x%" PRIu64 " exceeds capacity %d\n", bp->b_rawblkno, sc->sc_sf->csd.capacity); bp->b_error = EINVAL; bp->b_resid = bp->b_bcount; lddone(&sc->sc_ld, bp); return; } s = splbio(); if (bp->b_flags & B_READ) error = sdmmc_mem_read_block(sc->sc_sf, bp->b_rawblkno, bp->b_data, bp->b_bcount); else error = sdmmc_mem_write_block(sc->sc_sf, bp->b_rawblkno, bp->b_data, bp->b_bcount); if (error) { DPRINTF(("%s: error %d\n", device_xname(sc->sc_ld.sc_dv), error)); bp->b_error = error; bp->b_resid = bp->b_bcount; } else { bp->b_resid = 0; } splx(s); lddone(&sc->sc_ld, bp); }
static void ld_virtio_vq_done1(struct ld_virtio_softc *sc, struct virtio_softc *vsc, struct virtqueue *vq, int slot) { struct virtio_blk_req *vr = &sc->sc_reqs[slot]; struct buf *bp = vr->vr_bp; vr->vr_bp = NULL; bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, 0, sizeof(struct virtio_blk_req_hdr), BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_payload, 0, bp->b_bcount, (bp->b_flags & B_READ)?BUS_DMASYNC_POSTREAD :BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(vsc->sc_dmat, vr->vr_cmdsts, sizeof(struct virtio_blk_req_hdr), sizeof(uint8_t), BUS_DMASYNC_POSTREAD); if (vr->vr_status != VIRTIO_BLK_S_OK) { bp->b_error = EIO; bp->b_resid = bp->b_bcount; } else { bp->b_error = 0; bp->b_resid = 0; } virtio_dequeue_commit(vsc, vq, slot); lddone(&sc->sc_ld, bp); }
void ld_cac_done(device_t dv, void *context, int error) { struct buf *bp; struct ld_cac_softc *sc; int rv; bp = context; rv = 0; sc = device_private(dv); if ((error & CAC_RET_CMD_REJECTED) == CAC_RET_CMD_REJECTED) { aprint_error_dev(dv, "command rejected\n"); rv = EIO; } if (rv == 0 && (error & CAC_RET_INVAL_BLOCK) != 0) { aprint_error_dev(dv, "invalid request block\n"); rv = EIO; } if (rv == 0 && (error & CAC_RET_HARD_ERROR) != 0) { aprint_error_dev(dv, "hard error\n"); rv = EIO; } if (rv == 0 && (error & CAC_RET_SOFT_ERROR) != 0) { sc->sc_serrcnt++; if (ratecheck(&sc->sc_serrtm, &ld_cac_serrintvl)) { sc->sc_serrcnt = 0; aprint_error_dev(dv, "%d soft errors; array may be degraded\n", sc->sc_serrcnt); } } if (rv) { bp->b_error = rv; bp->b_resid = bp->b_bcount; } else bp->b_resid = 0; mutex_exit(sc->sc_mutex); lddone(&sc->sc_ld, bp); mutex_enter(sc->sc_mutex); }
static void ld_sdmmc_timeout(void *arg) { struct ld_sdmmc_task *task = (struct ld_sdmmc_task *)arg; struct ld_sdmmc_softc *sc = task->task_sc; struct buf *bp = task->task_bp; int s; s = splbio(); if (!sdmmc_task_pending(&task->task)) { splx(s); return; } bp->b_error = EIO; /* XXXX */ bp->b_resid = bp->b_bcount; sdmmc_del_task(&task->task); splx(s); lddone(&sc->sc_ld, bp); }
static void ld_twa_handler(struct twa_request *tr) { uint8_t status; struct buf *bp; struct ld_twa_softc *sc; bp = tr->bp; sc = (struct ld_twa_softc *)tr->tr_ld_sc; status = tr->tr_command->command.cmd_pkt_9k.status; if (status != 0) { bp->b_error = EIO; bp->b_resid = bp->b_bcount; } else { bp->b_resid = 0; bp->b_error = 0; } twa_release_request(tr); lddone(&sc->sc_ld, bp); }
static void ld_aac_intr(struct aac_ccb *ac) { struct aac_blockread_response *brr; struct aac_blockwrite_response *bwr; struct ld_aac_softc *sc; struct aac_softc *aac; struct buf *bp; u_int32_t status; bp = ac->ac_context; sc = device_private(ac->ac_device); aac = device_private(device_parent(ac->ac_device)); if ((bp->b_flags & B_READ) != 0) { brr = (struct aac_blockread_response *)&ac->ac_fib->data[0]; status = le32toh(brr->Status); } else { bwr = (struct aac_blockwrite_response *)&ac->ac_fib->data[0]; status = le32toh(bwr->Status); } aac_ccb_unmap(aac, ac); aac_ccb_free(aac, ac); if (status != ST_OK) { bp->b_error = EIO; bp->b_resid = bp->b_bcount; aprint_error_dev(sc->sc_ld.sc_dv, "I/O error: %s\n", aac_describe_code(aac_command_status_table, status)); } else bp->b_resid = 0; lddone(&sc->sc_ld, bp); }
void ld_cac_done(struct device *dv, void *context, int error) { struct buf *bp; struct ld_cac_softc *sc; bp = context; if ((error & CAC_RET_HARD_ERROR) != 0) { printf("%s: hard error\n", dv->dv_xname); error = EIO; } if ((error & CAC_RET_CMD_REJECTED) != 0) { printf("%s: invalid request\n", dv->dv_xname); error = EIO; } if ((error & CAC_RET_SOFT_ERROR) != 0) { sc = (struct ld_cac_softc *)dv; sc->sc_serrcnt++; if (ratecheck(&sc->sc_serrtm, &ld_cac_serrintvl)) { printf("%s: %d soft errors; array may be degraded\n", dv->dv_xname, sc->sc_serrcnt); sc->sc_serrcnt = 0; } error = 0; } if (error) { bp->b_flags |= B_ERROR; bp->b_error = error; bp->b_resid = bp->b_bcount; } else bp->b_resid = 0; lddone((struct ld_softc *)dv, bp); }
/* * Called at interrupt time. Mark the component as done and if all * components are done, take an "interrupt". */ static void ld_ataraid_iodone_raid0(struct buf *vbp) { struct cbuf *cbp = (struct cbuf *) vbp, *other_cbp; struct buf *bp = cbp->cb_obp; struct ld_ataraid_softc *sc = cbp->cb_sc; struct ataraid_array_info *aai = sc->sc_aai; struct ataraid_disk_info *adi; long count; int s, iodone; s = splbio(); iodone = cbp->cb_flags & CBUF_IODONE; other_cbp = cbp->cb_other; if (other_cbp != NULL) /* You are alone */ other_cbp->cb_other = NULL; if (cbp->cb_buf.b_error != 0) { /* * Mark this component broken. */ adi = &aai->aai_disks[cbp->cb_comp]; adi->adi_status &= ~ADI_S_ONLINE; printf("%s: error %d on component %d (%s)\n", device_xname(sc->sc_ld.sc_dv), bp->b_error, cbp->cb_comp, device_xname(adi->adi_dev)); /* * If we didn't see an error yet and we are reading * RAID1 disk, try another component. */ if (bp->b_error == 0 && (cbp->cb_buf.b_flags & B_READ) != 0 && (aai->aai_level & AAI_L_RAID1) != 0 && cbp->cb_comp < aai->aai_width) { cbp->cb_comp += aai->aai_width; adi = &aai->aai_disks[cbp->cb_comp]; if (adi->adi_status & ADI_S_ONLINE) { cbp->cb_buf.b_error = 0; VOP_STRATEGY(cbp->cb_buf.b_vp, &cbp->cb_buf); goto out; } } if (iodone || other_cbp != NULL) /* * If I/O on other component successfully done * or the I/O is still in progress, no need * to tell an error to upper layer. */ ; else { bp->b_error = cbp->cb_buf.b_error ? cbp->cb_buf.b_error : EIO; } /* XXX Update component config blocks. */ } else { /* * If other I/O is still in progress, tell it that * our I/O is successfully done. */ if (other_cbp != NULL) other_cbp->cb_flags |= CBUF_IODONE; } count = cbp->cb_buf.b_bcount; buf_destroy(&cbp->cb_buf); CBUF_PUT(cbp); if (other_cbp != NULL) goto out; /* If all done, "interrupt". */ bp->b_resid -= count; if (bp->b_resid < 0) panic("ld_ataraid_iodone_raid0: count"); if (bp->b_resid == 0) lddone(&sc->sc_ld, bp); out: splx(s); }
static int ldstart(struct ld_softc *sc, struct buf *bp) { struct disklabel *lp; int part, s, rv; if ((sc->sc_flags & LDF_DETACH) != 0) { bp->b_error = EIO; bp->b_flags |= B_ERROR; bp->b_resid = bp->b_bcount; biodone(bp); return (-1); } part = DISKPART(bp->b_dev); lp = sc->sc_dk.dk_label; /* * The transfer must be a whole number of blocks and the offset must * not be negative. */ if ((bp->b_bcount % lp->d_secsize) != 0 || bp->b_blkno < 0) { bp->b_flags |= B_ERROR; biodone(bp); return (-1); } /* * If it's a null transfer, return. */ if (bp->b_bcount == 0) { bp->b_resid = bp->b_bcount; biodone(bp); return (-1); } /* * Do bounds checking and adjust the transfer. If error, process. * If past the end of partition, just return. */ if (part != RAW_PART && bounds_check_with_label(bp, lp, (sc->sc_flags & (LDF_WLABEL | LDF_LABELLING)) != 0) <= 0) { bp->b_resid = bp->b_bcount; biodone(bp); return (-1); } /* * Convert the logical block number to a physical one and put it in * terms of the device's logical block size. */ if (lp->d_secsize >= DEV_BSIZE) bp->b_rawblkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); else bp->b_rawblkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize); if (part != RAW_PART) bp->b_rawblkno += lp->d_partitions[part].p_offset; s = splbio(); disk_busy(&sc->sc_dk); sc->sc_queuecnt++; splx(s); if ((rv = (*sc->sc_start)(sc, bp)) != 0) { bp->b_error = rv; bp->b_flags |= B_ERROR; bp->b_resid = bp->b_bcount; s = splbio(); lddone(sc, bp); splx(s); } return (0); }