static void g_mbr_ioctl(void *arg) { struct bio *bp; struct g_geom *gp; struct g_slicer *gsp; struct g_mbr_softc *ms; struct g_ioctl *gio; struct g_consumer *cp; u_char *sec0; int error; /* Get hold of the interesting bits from the bio. */ bp = arg; gp = bp->bio_to->geom; gsp = gp->softc; ms = gsp->softc; gio = (struct g_ioctl *)bp->bio_data; /* The disklabel to set is the ioctl argument. */ sec0 = gio->data; error = g_mbr_modify(gp, ms, sec0); if (error) { g_io_deliver(bp, error); return; } cp = LIST_FIRST(&gp->consumer); error = g_write_data(cp, 0, sec0, 512); g_io_deliver(bp, error); }
static int g_mbr_start(struct bio *bp) { struct g_provider *pp; struct g_geom *gp; struct g_mbr_softc *mp; struct g_slicer *gsp; struct g_ioctl *gio; int idx, error; pp = bp->bio_to; idx = pp->index; gp = pp->geom; gsp = gp->softc; mp = gsp->softc; if (bp->bio_cmd == BIO_GETATTR) { if (g_handleattr_int(bp, "MBR::type", mp->type[idx])) return (1); if (g_handleattr_off_t(bp, "MBR::offset", gsp->slices[idx].offset)) return (1); } /* We only handle ioctl(2) requests of the right format. */ if (strcmp(bp->bio_attribute, "GEOM::ioctl")) return (0); else if (bp->bio_length != sizeof(*gio)) return (0); /* Get hold of the ioctl parameters. */ gio = (struct g_ioctl *)bp->bio_data; switch (gio->cmd) { case DIOCGMBR: /* Return a copy of the disklabel to userland. */ bcopy(mp->sec0, gio->data, 512); g_io_deliver(bp, 0); return (1); case DIOCSMBR: /* * These we cannot do without the topology lock and some * some I/O requests. Ask the event-handler to schedule * us in a less restricted environment. */ error = g_call_me(g_mbr_ioctl, bp); if (error) g_io_deliver(bp, error); /* * We must return non-zero to indicate that we will deal * with this bio, even though we have not done so yet. */ return (1); default: return (0); } return (0); }
/* * If the user tries to overwrite our disklabel through an open partition * or via a magicwrite config call, we end up here and try to prevent * footshooting as best we can. */ static void g_bsd_hotwrite(void *arg, int flag) { struct bio *bp; struct g_geom *gp; struct g_slicer *gsp; struct g_slice *gsl; struct g_bsd_softc *ms; u_char *p; int error; g_topology_assert(); /* * We should never get canceled, because that would amount to a removal * of the geom while there was outstanding I/O requests. */ KASSERT(flag != EV_CANCEL, ("g_bsd_hotwrite cancelled")); bp = arg; gp = bp->bio_to->geom; gsp = gp->softc; ms = gsp->softc; gsl = &gsp->slices[bp->bio_to->index]; p = (u_char*)bp->bio_data + ms->labeloffset - (bp->bio_offset + gsl->offset); error = g_bsd_modify(gp, p); if (error) { g_io_deliver(bp, EPERM); return; } g_slice_finish_hot(bp); }
static void g_sunlabel_hotwrite(void *arg, int flag) { struct bio *bp; struct g_geom *gp; struct g_slicer *gsp; struct g_slice *gsl; struct g_sunlabel_softc *ms; u_char *p; int error; KASSERT(flag != EV_CANCEL, ("g_sunlabel_hotwrite cancelled")); bp = arg; gp = bp->bio_to->geom; gsp = gp->softc; ms = gsp->softc; gsl = &gsp->slices[bp->bio_to->index]; /* * XXX: For all practical purposes, this whould be equvivalent to * XXX: "p = (u_char *)bp->bio_data;" because the label is always * XXX: in the first sector and we refuse sectors smaller than the * XXX: label. */ p = (u_char *)bp->bio_data - (bp->bio_offset + gsl->offset); error = g_sunlabel_modify(gp, ms, p); if (error) { g_io_deliver(bp, EPERM); return; } g_slice_finish_hot(bp); }
static void g_uzip_start(struct bio *bp) { struct g_provider *pp; struct g_geom *gp; struct g_uzip_softc *sc; pp = bp->bio_to; gp = pp->geom; DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: cmd=%d, offset=%jd, length=%jd, " "buffer=%p\n", __func__, gp->name, bp, bp->bio_cmd, (intmax_t)bp->bio_offset, (intmax_t)bp->bio_length, bp->bio_data)); sc = gp->softc; sc->req_total++; if (bp->bio_cmd != BIO_READ) { g_io_deliver(bp, EOPNOTSUPP); return; } bp->bio_resid = bp->bio_length; bp->bio_completed = 0; g_uzip_request(gp, bp); }
static void acd_geom_start(struct bio *bp) { device_t dev = bp->bio_to->geom->softc; struct acd_softc *cdp = device_get_ivars(dev); if (bp->bio_cmd != BIO_READ && bp->bio_cmd != BIO_WRITE) { g_io_deliver(bp, EOPNOTSUPP); return; } if (bp->bio_cmd == BIO_READ && cdp->disk_size == -1) { g_io_deliver(bp, EIO); return; } /* GEOM classes must do their own request limiting */ if (bp->bio_length <= cdp->iomax) { bp->bio_pblkno = bp->bio_offset / bp->bio_to->sectorsize; acd_strategy(bp); } else { u_int pos, size = cdp->iomax - cdp->iomax % bp->bio_to->sectorsize; struct bio *bp2, *bp3; if (!(bp2 = g_clone_bio(bp))) g_io_deliver(bp, EIO); for (pos = 0; bp2; pos += size) { bp3 = NULL; bp2->bio_done = g_std_done; bp2->bio_to = bp->bio_to; bp2->bio_offset += pos; bp2->bio_data += pos; bp2->bio_length = bp->bio_length - pos; if (bp2->bio_length > size) { bp2->bio_length = size; if (!(bp3 = g_clone_bio(bp))) bp->bio_error = ENOMEM; } bp2->bio_pblkno = bp2->bio_offset / bp2->bio_to->sectorsize; acd_strategy(bp2); bp2 = bp3; } } }
/* * The function is called after data encryption. * * g_eli_start -> g_eli_crypto_run -> G_ELI_CRYPTO_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver */ static int g_eli_crypto_write_done(struct cryptop *crp) { struct g_eli_softc *sc; struct g_geom *gp; struct g_consumer *cp; struct bio *bp, *cbp; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; if (crp->crp_etype == 0) { G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).", bp->bio_inbed, bp->bio_children); } else { G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0) bp->bio_error = crp->crp_etype; } gp = bp->bio_to->geom; sc = gp->softc; g_eli_key_drop(sc, crp->crp_desc->crd_key); /* * All sectors are already encrypted? */ if (bp->bio_inbed < bp->bio_children) return (0); bp->bio_inbed = 0; bp->bio_children = 1; cbp = bp->bio_driver1; bp->bio_driver1 = NULL; if (bp->bio_error != 0) { G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).", bp->bio_error); free(bp->bio_driver2, M_ELI); bp->bio_driver2 = NULL; g_destroy_bio(cbp); g_io_deliver(bp, bp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); return (0); } cbp->bio_data = bp->bio_driver2; cbp->bio_done = g_eli_write_done; cp = LIST_FIRST(&gp->consumer); cbp->bio_to = cp->provider; G_ELI_LOGREQ(2, cbp, "Sending request."); /* * Send encrypted data to the provider. */ g_io_request(cbp, cp); return (0); }
static void acd_done(struct ata_request *request) { struct bio *bp = request->bio; /* finish up transfer */ bp->bio_completed = request->donecount; g_io_deliver(bp, request->result); ata_free_request(request); }
static void g_bde_start(struct bio *bp) { switch (bp->bio_cmd) { case BIO_DELETE: case BIO_READ: case BIO_WRITE: g_bde_start1(bp); break; case BIO_GETATTR: g_io_deliver(bp, EOPNOTSUPP); break; default: g_io_deliver(bp, EOPNOTSUPP); return; } return; }
/* * The function is called after we read and decrypt data. * * g_eli_start -> g_eli_crypto_read -> g_io_request -> g_eli_read_done -> g_eli_crypto_run -> G_ELI_CRYPTO_READ_DONE -> g_io_deliver */ static int g_eli_crypto_read_done(struct cryptop *crp) { struct g_eli_softc *sc; struct bio *bp; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; if (crp->crp_etype == 0) { G_ELI_DEBUG(3, "Crypto READ request done (%d/%d).", bp->bio_inbed, bp->bio_children); bp->bio_completed += crp->crp_olen; } else { G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0) bp->bio_error = crp->crp_etype; } sc = bp->bio_to->geom->softc; g_eli_key_drop(sc, crp->crp_desc->crd_key); /* * Do we have all sectors already? */ if (bp->bio_inbed < bp->bio_children) return (0); free(bp->bio_driver2, M_ELI); bp->bio_driver2 = NULL; if (bp->bio_error != 0) { G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).", bp->bio_error); bp->bio_completed = 0; } /* * Read is finished, send it up. */ g_io_deliver(bp, bp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); return (0); }
static void g_zero_start(struct bio *bp) { int error = ENXIO; switch (bp->bio_cmd) { case BIO_READ: if (g_zero_clear && (bp->bio_flags & BIO_UNMAPPED) == 0) memset(bp->bio_data, g_zero_byte, bp->bio_length); /* FALLTHROUGH */ case BIO_DELETE: case BIO_WRITE: bp->bio_completed = bp->bio_length; error = 0; break; case BIO_GETATTR: default: error = EOPNOTSUPP; break; } g_io_deliver(bp, error); }
static int g_uzip_cached(struct g_geom *gp, struct bio *bp) { struct g_uzip_softc *sc; off_t ofs; size_t blk, blkofs, usz; sc = gp->softc; ofs = bp->bio_offset + bp->bio_completed; blk = ofs / sc->blksz; mtx_lock(&sc->last_mtx); if (blk == sc->last_blk) { blkofs = ofs % sc->blksz; usz = sc->blksz - blkofs; if (bp->bio_resid < usz) usz = bp->bio_resid; memcpy(bp->bio_data + bp->bio_completed, sc->last_buf + blkofs, usz); sc->req_cached++; mtx_unlock(&sc->last_mtx); DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: offset=%jd: got %jd bytes " "from cache\n", __func__, gp->name, bp, (intmax_t)ofs, (intmax_t)usz)); bp->bio_completed += usz; bp->bio_resid -= usz; if (bp->bio_resid == 0) { g_io_deliver(bp, 0); return (1); } } else mtx_unlock(&sc->last_mtx); return (0); }
/* * The function is called after we read and decrypt data. * * g_eli_start -> g_eli_auth_read -> g_io_request -> g_eli_read_done -> g_eli_auth_run -> G_ELI_AUTH_READ_DONE -> g_io_deliver */ static int g_eli_auth_read_done(struct cryptop *crp) { struct g_eli_softc *sc; struct bio *bp; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; if (crp->crp_etype == 0) { bp->bio_completed += crp->crp_olen; G_ELI_DEBUG(3, "Crypto READ request done (%d/%d) (add=%jd completed=%jd).", bp->bio_inbed, bp->bio_children, (intmax_t)crp->crp_olen, (intmax_t)bp->bio_completed); } else { G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0) bp->bio_error = crp->crp_etype; } sc = bp->bio_to->geom->softc; g_eli_key_drop(sc, crp->crp_desc->crd_next->crd_key); /* * Do we have all sectors already? */ if (bp->bio_inbed < bp->bio_children) return (0); if (bp->bio_error == 0) { u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize; u_char *srcdata, *dstdata, *auth; off_t coroff, corsize; /* * Verify data integrity based on calculated and read HMACs. */ /* Sectorsize of decrypted provider eg. 4096. */ decr_secsize = bp->bio_to->sectorsize; /* The real sectorsize of encrypted provider, eg. 512. */ encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize; /* Number of data bytes in one encrypted sector, eg. 480. */ data_secsize = sc->sc_data_per_sector; /* Number of sectors from decrypted provider, eg. 2. */ nsec = bp->bio_length / decr_secsize; /* Number of sectors from encrypted provider, eg. 18. */ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize; /* Last sector number in every big sector, eg. 9. */ lsec = sc->sc_bytes_per_sector / encr_secsize; srcdata = bp->bio_driver2; dstdata = bp->bio_data; auth = srcdata + encr_secsize * nsec; coroff = -1; corsize = 0; for (i = 1; i <= nsec; i++) { data_secsize = sc->sc_data_per_sector; if ((i % lsec) == 0) data_secsize = decr_secsize % data_secsize; if (bcmp(srcdata, auth, sc->sc_alen) != 0) { /* * Curruption detected, remember the offset if * this is the first corrupted sector and * increase size. */ if (bp->bio_error == 0) bp->bio_error = -1; if (coroff == -1) { coroff = bp->bio_offset + (dstdata - (u_char *)bp->bio_data); } corsize += data_secsize; } else { /* * No curruption, good. * Report previous corruption if there was one. */ if (coroff != -1) { G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " "bytes of data at offset %jd.", sc->sc_name, (intmax_t)corsize, (intmax_t)coroff); coroff = -1; corsize = 0; } bcopy(srcdata + sc->sc_alen, dstdata, data_secsize); } srcdata += encr_secsize; dstdata += data_secsize; auth += sc->sc_alen; } /* Report previous corruption if there was one. */ if (coroff != -1) { G_ELI_DEBUG(0, "%s: Failed to authenticate %jd " "bytes of data at offset %jd.", sc->sc_name, (intmax_t)corsize, (intmax_t)coroff); } } free(bp->bio_driver2, M_ELI); bp->bio_driver2 = NULL; if (bp->bio_error != 0) { if (bp->bio_error == -1) bp->bio_error = EINVAL; else { G_ELI_LOGREQ(0, bp, "Crypto READ request failed (error=%d).", bp->bio_error); } bp->bio_completed = 0; } /* * Read is finished, send it up. */ g_io_deliver(bp, bp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); return (0); }
static void g_uzip_do(struct g_uzip_softc *sc, struct bio *bp) { struct bio *bp2; struct g_provider *pp; struct g_consumer *cp; struct g_geom *gp; char *data, *data2; off_t ofs; size_t blk, blkofs, len, ulen, firstblk; int err; bp2 = bp->bio_parent; gp = bp2->bio_to->geom; cp = LIST_FIRST(&gp->consumer); pp = cp->provider; bp2->bio_error = bp->bio_error; if (bp2->bio_error != 0) goto done; /* Make sure there's forward progress. */ if (bp->bio_completed == 0) { bp2->bio_error = ECANCELED; goto done; } ofs = bp2->bio_offset + bp2->bio_completed; firstblk = blk = ofs / sc->blksz; blkofs = ofs % sc->blksz; data = bp->bio_data + sc->toc[blk].offset % pp->sectorsize; data2 = bp2->bio_data + bp2->bio_completed; while (bp->bio_completed && bp2->bio_resid) { if (blk > firstblk && !BLK_IS_CONT(sc, blk)) { DPRINTF_BLK(GUZ_DBG_IO, blk, ("%s/%s: %p: backref'ed " "cluster #%u requested, looping around\n", __func__, gp->name, bp2, (u_int)blk)); goto done; } ulen = MIN(sc->blksz - blkofs, bp2->bio_resid); len = sc->toc[blk].blen; DPRINTF(GUZ_DBG_IO, ("%s/%s: %p/%ju: data2=%p, ulen=%u, " "data=%p, len=%u\n", __func__, gp->name, gp, bp->bio_completed, data2, (u_int)ulen, data, (u_int)len)); if (len == 0) { /* All zero block: no cache update */ bzero(data2, ulen); } else if (len <= bp->bio_completed) { mtx_lock(&sc->last_mtx); err = sc->dcp->decompress(sc->dcp, gp->name, data, len, sc->last_buf); if (err != 0) { sc->last_blk = -1; mtx_unlock(&sc->last_mtx); bp2->bio_error = EILSEQ; DPRINTF(GUZ_DBG_ERR, ("%s/%s: decompress" "(%p) failed\n", __func__, gp->name, sc->dcp)); goto done; } sc->last_blk = blk; memcpy(data2, sc->last_buf + blkofs, ulen); mtx_unlock(&sc->last_mtx); err = sc->dcp->rewind(sc->dcp, gp->name); if (err != 0) { bp2->bio_error = EILSEQ; DPRINTF(GUZ_DBG_ERR, ("%s/%s: rewind(%p) " "failed\n", __func__, gp->name, sc->dcp)); goto done; } data += len; } else break; data2 += ulen; bp2->bio_completed += ulen; bp2->bio_resid -= ulen; bp->bio_completed -= len; blkofs = 0; blk++; } done: /* Finish processing the request. */ free(bp->bio_data, M_GEOM_UZIP); g_destroy_bio(bp); if (bp2->bio_error != 0 || bp2->bio_resid == 0) g_io_deliver(bp2, bp2->bio_error); else g_uzip_request(gp, bp2); }
static int g_uzip_request(struct g_geom *gp, struct bio *bp) { struct g_uzip_softc *sc; struct bio *bp2; struct g_consumer *cp; struct g_provider *pp; off_t ofs, start_blk_ofs; size_t i, start_blk, end_blk, zsize; if (g_uzip_cached(gp, bp) != 0) return (1); sc = gp->softc; cp = LIST_FIRST(&gp->consumer); pp = cp->provider; ofs = bp->bio_offset + bp->bio_completed; start_blk = ofs / sc->blksz; KASSERT(start_blk < sc->nblocks, ("start_blk out of range")); end_blk = howmany(ofs + bp->bio_resid, sc->blksz); KASSERT(end_blk <= sc->nblocks, ("end_blk out of range")); for (; BLK_IS_NIL(sc, start_blk) && start_blk < end_blk; start_blk++) { /* Fill in any leading Nil blocks */ start_blk_ofs = ofs % sc->blksz; zsize = MIN(sc->blksz - start_blk_ofs, bp->bio_resid); DPRINTF_BLK(GUZ_DBG_IO, start_blk, ("%s/%s: %p/%ju: " "filling %ju zero bytes\n", __func__, gp->name, gp, (uintmax_t)bp->bio_completed, (uintmax_t)zsize)); bzero(bp->bio_data + bp->bio_completed, zsize); bp->bio_completed += zsize; bp->bio_resid -= zsize; ofs += zsize; } if (start_blk == end_blk) { KASSERT(bp->bio_resid == 0, ("bp->bio_resid is invalid")); /* * No non-Nil data is left, complete request immediately. */ DPRINTF(GUZ_DBG_IO, ("%s/%s: %p: all done returning %ju " "bytes\n", __func__, gp->name, gp, (uintmax_t)bp->bio_completed)); g_io_deliver(bp, 0); return (1); } for (i = start_blk + 1; i < end_blk; i++) { /* Trim discontinuous areas if any */ if (!BLK_IS_CONT(sc, i)) { end_blk = i; break; } } DPRINTF_BRNG(GUZ_DBG_IO, start_blk, end_blk, ("%s/%s: %p: " "start=%u (%ju), end=%u (%ju)\n", __func__, gp->name, bp, (u_int)start_blk, (uintmax_t)sc->toc[start_blk].offset, (u_int)end_blk, (uintmax_t)BLK_ENDS(sc, end_blk - 1))); bp2 = g_clone_bio(bp); if (bp2 == NULL) { g_io_deliver(bp, ENOMEM); return (1); } bp2->bio_done = g_uzip_read_done; bp2->bio_offset = TOFF_2_BOFF(sc, pp, start_blk); while (1) { bp2->bio_length = TLEN_2_BLEN(sc, pp, bp2, end_blk - 1); if (bp2->bio_length <= MAXPHYS) break; if (end_blk == (start_blk + 1)) { break; } end_blk--; } DPRINTF(GUZ_DBG_IO, ("%s/%s: bp2->bio_length = %jd\n", __func__, gp->name, (intmax_t)bp2->bio_length)); bp2->bio_data = malloc(bp2->bio_length, M_GEOM_UZIP, M_NOWAIT); if (bp2->bio_data == NULL) { g_destroy_bio(bp2); g_io_deliver(bp, ENOMEM); return (1); } DPRINTF_BRNG(GUZ_DBG_IO, start_blk, end_blk, ("%s/%s: %p: " "reading %jd bytes from offset %jd\n", __func__, gp->name, bp, (intmax_t)bp2->bio_length, (intmax_t)bp2->bio_offset)); g_io_request(bp2, cp); return (0); }
static void g_aes_start(struct bio *bp) { struct g_geom *gp; struct g_consumer *cp; struct g_aes_softc *sc; struct bio *bp2; u_char *p1, *p2, *b, *e; keyInstance ekey; off_t o; gp = bp->bio_to->geom; cp = LIST_FIRST(&gp->consumer); sc = gp->softc; switch (bp->bio_cmd) { case BIO_READ: bp2 = g_clone_bio(bp); if (bp2 == NULL) { g_io_deliver(bp, ENOMEM); return; } bp2->bio_done = g_aes_read_done; bp2->bio_offset += sc->sectorsize; g_io_request(bp2, cp); break; case BIO_WRITE: bp2 = g_clone_bio(bp); if (bp2 == NULL) { g_io_deliver(bp, ENOMEM); return; } bp2->bio_done = g_aes_write_done; bp2->bio_offset += sc->sectorsize; bp2->bio_data = g_malloc(bp->bio_length, M_WAITOK); b = bp->bio_data; e = bp->bio_data; e += bp->bio_length; p2 = bp2->bio_data; o = bp->bio_offset; for (p1 = b; p1 < e; p1 += sc->sectorsize) { g_aes_makekey(sc, o, &ekey, DIR_ENCRYPT); rijndael_blockEncrypt(&sc->ci, &ekey, p1, sc->sectorsize * 8, p2); p2 += sc->sectorsize; o += sc->sectorsize; } bzero(&ekey, sizeof ekey); /* destroy evidence */ g_io_request(bp2, cp); break; case BIO_GETATTR: bp2 = g_clone_bio(bp); if (bp2 == NULL) { g_io_deliver(bp, ENOMEM); return; } bp2->bio_done = g_std_done; bp2->bio_offset += sc->sectorsize; g_io_request(bp2, cp); break; default: g_io_deliver(bp, EOPNOTSUPP); return; } return; }
static void acd_strategy(struct bio *bp) { device_t dev = bp->bio_to->geom->softc; struct ata_device *atadev = device_get_softc(dev); struct acd_softc *cdp = device_get_ivars(dev); struct ata_request *request; u_int32_t lba, lastlba, count; int8_t ccb[16]; int track, blocksize; /* reject all queued entries if media changed */ if (atadev->flags & ATA_D_MEDIA_CHANGED) { g_io_deliver(bp, EIO); return; } bzero(ccb, sizeof(ccb)); track = bp->bio_to->index; if (track) { blocksize = (cdp->toc.tab[track - 1].control & 4) ? 2048 : 2352; lastlba = ntohl(cdp->toc.tab[track].addr.lba); lba = bp->bio_offset / blocksize; lba += ntohl(cdp->toc.tab[track - 1].addr.lba); } else { blocksize = cdp->block_size; lastlba = cdp->disk_size; lba = bp->bio_offset / blocksize; } count = bp->bio_length / blocksize; if (bp->bio_cmd == BIO_READ) { /* if transfer goes beyond range adjust it to be within limits */ if (lba + count > lastlba) { /* if we are entirely beyond EOM return EOF */ if (lastlba <= lba) { g_io_deliver(bp, 0); return; } count = lastlba - lba; } switch (blocksize) { case 2048: ccb[0] = ATAPI_READ_BIG; break; case 2352: ccb[0] = ATAPI_READ_CD; ccb[9] = 0xf8; break; default: ccb[0] = ATAPI_READ_CD; ccb[9] = 0x10; } } else ccb[0] = ATAPI_WRITE_BIG; ccb[1] = 0; ccb[2] = lba>>24; ccb[3] = lba>>16; ccb[4] = lba>>8; ccb[5] = lba; ccb[6] = count>>16; ccb[7] = count>>8; ccb[8] = count; if (!(request = ata_alloc_request())) { g_io_deliver(bp, ENOMEM); return; } request->dev = dev; request->bio = bp; bcopy(ccb, request->u.atapi.ccb, 16); request->data = bp->bio_data; request->bytecount = count * blocksize; request->transfersize = min(request->bytecount, 65534); request->timeout = (ccb[0] == ATAPI_WRITE_BIG) ? 60 : 30; request->retries = 2; request->callback = acd_done; request->flags = ATA_R_ATAPI; if (atadev->mode >= ATA_DMA) request->flags |= ATA_R_DMA; switch (bp->bio_cmd) { case BIO_READ: request->flags |= ATA_R_READ; break; case BIO_WRITE: request->flags |= ATA_R_WRITE; break; default: device_printf(dev, "unknown BIO operation\n"); ata_free_request(request); g_io_deliver(bp, EIO); return; } ata_queue_request(request); }
static void g_uzip_start(struct bio *bp) { struct bio *bp2; struct g_provider *pp, *pp2; struct g_geom *gp; struct g_consumer *cp; struct g_uzip_softc *sc; uint32_t start_blk, end_blk; size_t bsize; pp = bp->bio_to; gp = pp->geom; DPRINTF(("%s: start (%d)\n", gp->name, bp->bio_cmd)); if (bp->bio_cmd != BIO_READ) { g_io_deliver(bp, EOPNOTSUPP); return; } cp = LIST_FIRST(&gp->consumer); pp2 = cp->provider; sc = gp->softc; start_blk = bp->bio_offset / sc->blksz; end_blk = (bp->bio_offset + bp->bio_length + sc->blksz - 1) / sc->blksz; KASSERT(start_blk < sc->nblocks, ("start_blk out of range")); KASSERT(end_blk <= sc->nblocks, ("end_blk out of range")); sc->req_total++; if (start_blk + 1 == end_blk) { mtx_lock(&sc->last_mtx); if (start_blk == sc->last_blk) { off_t uoff; uoff = bp->bio_offset % sc->blksz; KASSERT(bp->bio_length <= sc->blksz - uoff, ("cached data error")); memcpy(bp->bio_data, sc->last_buf + uoff, bp->bio_length); sc->req_cached++; mtx_unlock(&sc->last_mtx); DPRINTF(("%s: start: cached 0 + %lld, %lld + 0 + %lld\n", gp->name, bp->bio_length, uoff, bp->bio_length)); bp->bio_completed = bp->bio_length; g_io_deliver(bp, 0); return; } mtx_unlock(&sc->last_mtx); } bp2 = g_clone_bio(bp); if (bp2 == NULL) { g_io_deliver(bp, ENOMEM); return; } bp2->bio_done = g_uzip_done; DPRINTF(("%s: start (%d..%d), %s: %d + %lld, %s: %d + %lld\n", gp->name, start_blk, end_blk, pp->name, pp->sectorsize, pp->mediasize, pp2->name, pp2->sectorsize, pp2->mediasize)); bsize = pp2->sectorsize; bp2->bio_offset = sc->offsets[start_blk] - sc->offsets[start_blk] % bsize; bp2->bio_length = sc->offsets[end_blk] - bp2->bio_offset; bp2->bio_length = (bp2->bio_length + bsize - 1) / bsize * bsize; DPRINTF(("%s: start %lld + %lld -> %lld + %lld -> %lld + %lld\n", gp->name, bp->bio_offset, bp->bio_length, sc->offsets[start_blk], sc->offsets[end_blk] - sc->offsets[start_blk], bp2->bio_offset, bp2->bio_length)); bp2->bio_data = malloc(bp2->bio_length, M_GEOM_UZIP, M_NOWAIT); if (bp2->bio_data == NULL) { g_destroy_bio(bp2); g_io_deliver(bp, ENOMEM); return; } g_io_request(bp2, cp); DPRINTF(("%s: start ok\n", gp->name)); }
static int g_uzip_request(struct g_geom *gp, struct bio *bp) { struct g_uzip_softc *sc; struct bio *bp2; struct g_consumer *cp; struct g_provider *pp; off_t ofs; size_t start_blk, end_blk; if (g_uzip_cached(gp, bp) != 0) return (1); sc = gp->softc; bp2 = g_clone_bio(bp); if (bp2 == NULL) { g_io_deliver(bp, ENOMEM); return (1); } bp2->bio_done = g_uzip_done; cp = LIST_FIRST(&gp->consumer); pp = cp->provider; ofs = bp->bio_offset + bp->bio_completed; start_blk = ofs / sc->blksz; KASSERT(start_blk < sc->nblocks, ("start_blk out of range")); end_blk = (ofs + bp->bio_resid + sc->blksz - 1) / sc->blksz; KASSERT(end_blk <= sc->nblocks, ("end_blk out of range")); DPRINTF(("%s/%s: %p: start=%u (%jd), end=%u (%jd)\n", __func__, gp->name, bp, (u_int)start_blk, (intmax_t)sc->offsets[start_blk], (u_int)end_blk, (intmax_t)sc->offsets[end_blk])); bp2->bio_offset = sc->offsets[start_blk] - sc->offsets[start_blk] % pp->sectorsize; while (1) { bp2->bio_length = sc->offsets[end_blk] - bp2->bio_offset; bp2->bio_length = (bp2->bio_length + pp->sectorsize - 1) / pp->sectorsize * pp->sectorsize; if (bp2->bio_length <= MAXPHYS) break; end_blk--; } bp2->bio_data = malloc(bp2->bio_length, M_GEOM_UZIP, M_NOWAIT); if (bp2->bio_data == NULL) { g_destroy_bio(bp2); g_io_deliver(bp, ENOMEM); return (1); } DPRINTF(("%s/%s: %p: reading %jd bytes from offset %jd\n", __func__, gp->name, bp, (intmax_t)bp2->bio_length, (intmax_t)bp2->bio_offset)); g_io_request(bp2, cp); return (0); }
static void g_uzip_done(struct bio *bp) { z_stream zs; struct bio *bp2; struct g_provider *pp; struct g_consumer *cp; struct g_geom *gp; struct g_uzip_softc *sc; char *data, *data2; off_t ofs; size_t blk, blkofs, len, ulen; bp2 = bp->bio_parent; gp = bp2->bio_to->geom; sc = gp->softc; cp = LIST_FIRST(&gp->consumer); pp = cp->provider; bp2->bio_error = bp->bio_error; if (bp2->bio_error != 0) goto done; /* Make sure there's forward progress. */ if (bp->bio_completed == 0) { bp2->bio_error = ECANCELED; goto done; } zs.zalloc = z_alloc; zs.zfree = z_free; if (inflateInit(&zs) != Z_OK) { bp2->bio_error = EILSEQ; goto done; } ofs = bp2->bio_offset + bp2->bio_completed; blk = ofs / sc->blksz; blkofs = ofs % sc->blksz; data = bp->bio_data + sc->offsets[blk] % pp->sectorsize; data2 = bp2->bio_data + bp2->bio_completed; while (bp->bio_completed && bp2->bio_resid) { ulen = MIN(sc->blksz - blkofs, bp2->bio_resid); len = sc->offsets[blk + 1] - sc->offsets[blk]; DPRINTF(("%s/%s: %p/%ju: data2=%p, ulen=%u, data=%p, len=%u\n", __func__, gp->name, gp, bp->bio_completed, data2, (u_int)ulen, data, (u_int)len)); if (len == 0) { /* All zero block: no cache update */ bzero(data2, ulen); } else if (len <= bp->bio_completed) { zs.next_in = data; zs.avail_in = len; zs.next_out = sc->last_buf; zs.avail_out = sc->blksz; mtx_lock(&sc->last_mtx); if (inflate(&zs, Z_FINISH) != Z_STREAM_END) { sc->last_blk = -1; mtx_unlock(&sc->last_mtx); inflateEnd(&zs); bp2->bio_error = EILSEQ; goto done; } sc->last_blk = blk; memcpy(data2, sc->last_buf + blkofs, ulen); mtx_unlock(&sc->last_mtx); if (inflateReset(&zs) != Z_OK) { inflateEnd(&zs); bp2->bio_error = EILSEQ; goto done; } data += len; } else break; data2 += ulen; bp2->bio_completed += ulen; bp2->bio_resid -= ulen; bp->bio_completed -= len; blkofs = 0; blk++; } if (inflateEnd(&zs) != Z_OK) bp2->bio_error = EILSEQ; done: /* Finish processing the request. */ free(bp->bio_data, M_GEOM_UZIP); g_destroy_bio(bp); if (bp2->bio_error != 0 || bp2->bio_resid == 0) g_io_deliver(bp2, bp2->bio_error); else g_uzip_request(gp, bp2); }
/* * The function is called after data encryption. * * g_eli_start -> g_eli_auth_run -> G_ELI_AUTH_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver */ static int g_eli_auth_write_done(struct cryptop *crp) { struct g_eli_softc *sc; struct g_consumer *cp; struct bio *bp, *cbp, *cbp2; u_int nsec; if (crp->crp_etype == EAGAIN) { if (g_eli_crypto_rerun(crp) == 0) return (0); } bp = (struct bio *)crp->crp_opaque; bp->bio_inbed++; if (crp->crp_etype == 0) { G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).", bp->bio_inbed, bp->bio_children); } else { G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.", bp->bio_inbed, bp->bio_children, crp->crp_etype); if (bp->bio_error == 0) bp->bio_error = crp->crp_etype; } sc = bp->bio_to->geom->softc; g_eli_key_drop(sc, crp->crp_desc->crd_key); /* * All sectors are already encrypted? */ if (bp->bio_inbed < bp->bio_children) return (0); if (bp->bio_error != 0) { G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).", bp->bio_error); free(bp->bio_driver2, M_ELI); bp->bio_driver2 = NULL; cbp = bp->bio_driver1; bp->bio_driver1 = NULL; g_destroy_bio(cbp); g_io_deliver(bp, bp->bio_error); atomic_subtract_int(&sc->sc_inflight, 1); return (0); } cp = LIST_FIRST(&sc->sc_geom->consumer); cbp = bp->bio_driver1; bp->bio_driver1 = NULL; cbp->bio_to = cp->provider; cbp->bio_done = g_eli_write_done; /* Number of sectors from decrypted provider, eg. 1. */ nsec = bp->bio_length / bp->bio_to->sectorsize; /* Number of sectors from encrypted provider, eg. 9. */ nsec = (nsec * sc->sc_bytes_per_sector) / cp->provider->sectorsize; cbp->bio_length = cp->provider->sectorsize * nsec; cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector; cbp->bio_data = bp->bio_driver2; /* * We write more than what is requested, so we have to be ready to write * more than MAXPHYS. */ cbp2 = NULL; if (cbp->bio_length > MAXPHYS) { cbp2 = g_duplicate_bio(bp); cbp2->bio_length = cbp->bio_length - MAXPHYS; cbp2->bio_data = cbp->bio_data + MAXPHYS; cbp2->bio_offset = cbp->bio_offset + MAXPHYS; cbp2->bio_to = cp->provider; cbp2->bio_done = g_eli_write_done; cbp->bio_length = MAXPHYS; } /* * Send encrypted data to the provider. */ G_ELI_LOGREQ(2, cbp, "Sending request."); bp->bio_inbed = 0; bp->bio_children = (cbp2 != NULL ? 2 : 1); g_io_request(cbp, cp); if (cbp2 != NULL) { G_ELI_LOGREQ(2, cbp2, "Sending request."); g_io_request(cbp2, cp); } return (0); }
static void g_uzip_done(struct bio *bp) { int err; struct bio *bp2; z_stream zs; struct g_provider *pp, *pp2; struct g_consumer *cp; struct g_geom *gp; struct g_uzip_softc *sc; off_t pos, upos; uint32_t start_blk, i; size_t bsize; bp2 = bp->bio_parent; pp = bp2->bio_to; gp = pp->geom; cp = LIST_FIRST(&gp->consumer); pp2 = cp->provider; sc = gp->softc; DPRINTF(("%s: done\n", gp->name)); bp2->bio_error = bp->bio_error; if (bp2->bio_error != 0) goto done; /* * Uncompress data. */ zs.zalloc = z_alloc; zs.zfree = z_free; err = inflateInit(&zs); if (err != Z_OK) { bp2->bio_error = EIO; goto done; } start_blk = bp2->bio_offset / sc->blksz; bsize = pp2->sectorsize; pos = sc->offsets[start_blk] % bsize; upos = 0; DPRINTF(("%s: done: start_blk %d, pos %lld, upos %lld (%lld, %d, %d)\n", gp->name, start_blk, pos, upos, bp2->bio_offset, sc->blksz, bsize)); for (i = start_blk; upos < bp2->bio_length; i++) { off_t len, ulen, uoff; uoff = i == start_blk ? bp2->bio_offset % sc->blksz : 0; ulen = MIN(sc->blksz - uoff, bp2->bio_length - upos); len = sc->offsets[i + 1] - sc->offsets[i]; if (len == 0) { /* All zero block: no cache update */ bzero(bp2->bio_data + upos, ulen); upos += ulen; bp2->bio_completed += ulen; continue; } zs.next_in = bp->bio_data + pos; zs.avail_in = len; zs.next_out = sc->last_buf; zs.avail_out = sc->blksz; mtx_lock(&sc->last_mtx); err = inflate(&zs, Z_FINISH); if (err != Z_STREAM_END) { sc->last_blk = -1; mtx_unlock(&sc->last_mtx); DPRINTF(("%s: done: inflate failed (%lld + %lld -> %lld + %lld + %lld)\n", gp->name, pos, len, uoff, upos, ulen)); inflateEnd(&zs); bp2->bio_error = EIO; goto done; } sc->last_blk = i; DPRINTF(("%s: done: inflated %lld + %lld -> %lld + %lld + %lld\n", gp->name, pos, len, uoff, upos, ulen)); memcpy(bp2->bio_data + upos, sc->last_buf + uoff, ulen); mtx_unlock(&sc->last_mtx); pos += len; upos += ulen; bp2->bio_completed += ulen; err = inflateReset(&zs); if (err != Z_OK) { inflateEnd(&zs); bp2->bio_error = EIO; goto done; } } err = inflateEnd(&zs); if (err != Z_OK) { bp2->bio_error = EIO; goto done; } done: /* * Finish processing the request. */ DPRINTF(("%s: done: (%d, %lld, %ld)\n", gp->name, bp2->bio_error, bp2->bio_completed, bp2->bio_resid)); free(bp->bio_data, M_GEOM_UZIP); g_destroy_bio(bp); g_io_deliver(bp2, bp2->bio_error); }