static void hexprint(const char *start, void *buf, int len) { char *c = buf; DIAGCONDPANIC(len < 0, ("hexprint: called with len < 0")); printf("%s: len=%06d 0x", start, len); while (len--) printf("%02x", (unsigned char) *c++); }
static void fill_ring(struct xbdreq *xr) { struct xbdreq *pxr = xr->xr_parent; paddr_t pa; unsigned long ma; vaddr_t addr, off; blk_ring_req_entry_t *ring_req; int breq, nr_sectors; /* Fill out a communications ring structure. */ ring_req = &blk_ring->ring[MASK_BLK_IDX(req_prod)].req; ring_req->id = (unsigned long)xr; ring_req->operation = pxr->xr_bp->b_flags & B_READ ? XEN_BLOCK_READ : XEN_BLOCK_WRITE; ring_req->sector_number = (xen_sector_t)pxr->xr_bn; ring_req->device = pxr->xr_sc->sc_xd_device; DPRINTF(XBDB_IO, ("fill_ring(%d): bp %p sector %llu pxr %p xr %p\n", MASK_BLK_IDX(req_prod), pxr->xr_bp, (unsigned long long)pxr->xr_bn, pxr, xr)); xr->xr_breq = 0; ring_req->nr_segments = 0; addr = trunc_page(pxr->xr_data); off = pxr->xr_data - addr; while (pxr->xr_bqueue > 0) { #if 0 pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map), addr, &pa); #else pmap_extract(pmap_kernel(), addr, &pa); #endif ma = xpmap_ptom_masked(pa) + off; DIAGCONDPANIC((ma & (XEN_BSIZE - 1)) != 0, ("xbd request ma not sector aligned")); if (pxr->xr_bqueue > PAGE_SIZE - off) breq = PAGE_SIZE - off; else breq = pxr->xr_bqueue; nr_sectors = breq >> XEN_BSHIFT; DIAGCONDPANIC(nr_sectors >= XEN_BSIZE, ("xbd request nr_sectors >= XEN_BSIZE")); DPRINTF(XBDB_IO, ("fill_ring(%d): va 0x%08lx pa 0x%08lx " "ma 0x%08lx, sectors %d, left %ld/%ld\n", MASK_BLK_IDX(req_prod), addr, pa, ma, nr_sectors, pxr->xr_bqueue >> XEN_BSHIFT, pxr->xr_bqueue)); ring_req->buffer_and_sects[ring_req->nr_segments++] = ma | nr_sectors; addr += PAGE_SIZE; pxr->xr_bqueue -= breq; pxr->xr_bn += nr_sectors; xr->xr_breq += breq; off = 0; if (ring_req->nr_segments == MAX_BLK_SEGS) break; } pxr->xr_data = addr; req_prod++; }
static void cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv, size_t len, daddr_t blkno, size_t secsize, int dir) { char *dst = dstv; char *src = srcv; cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher; struct uio dstuio; struct uio srcuio; struct iovec dstiov[2]; struct iovec srciov[2]; size_t blocksize = cs->sc_cdata.cf_blocksize; size_t todo; char sink[CGD_MAXBLOCKSIZE]; char zero_iv[CGD_MAXBLOCKSIZE]; char blkno_buf[CGD_MAXBLOCKSIZE]; DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir)); DIAGCONDPANIC(len % blocksize != 0, ("cgd_cipher: len %% blocksize != 0")); /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */ DIAGCONDPANIC(sizeof(daddr_t) > blocksize, ("cgd_cipher: sizeof(daddr_t) > blocksize")); memset(zero_iv, 0x0, blocksize); dstuio.uio_iov = dstiov; dstuio.uio_iovcnt = 2; srcuio.uio_iov = srciov; srcuio.uio_iovcnt = 2; dstiov[0].iov_base = sink; dstiov[0].iov_len = blocksize; srciov[0].iov_base = blkno_buf; srciov[0].iov_len = blocksize; for (; len > 0; len -= todo) { todo = MIN(len, secsize); dstiov[1].iov_base = dst; srciov[1].iov_base = src; dstiov[1].iov_len = todo; srciov[1].iov_len = todo; memset(blkno_buf, 0x0, blocksize); blkno2blkno_buf(blkno_buf, blkno); if (dir == CGD_CIPHER_DECRYPT) { dstuio.uio_iovcnt = 1; srcuio.uio_iovcnt = 1; IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf", blkno_buf, blocksize)); cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, CGD_CIPHER_ENCRYPT); memcpy(blkno_buf, sink, blocksize); dstuio.uio_iovcnt = 2; srcuio.uio_iovcnt = 2; } IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf", blkno_buf, blocksize)); cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir); IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink", sink, blocksize)); dst += todo; src += todo; blkno++; } }
static int xbd_response_handler(void *arg) { struct buf *bp; struct xbd_softc *xs; blk_ring_resp_entry_t *ring_resp; struct xbdreq *pxr, *xr; int i; for (i = resp_cons; i != blk_ring->resp_prod; i = BLK_RING_INC(i)) { ring_resp = &blk_ring->ring[MASK_BLK_IDX(i)].resp; xr = (struct xbdreq *)ring_resp->id; pxr = xr->xr_parent; DPRINTF(XBDB_IO, ("xbd_response_handler(%d): pxr %p xr %p " "bdone %04lx breq %04lx\n", i, pxr, xr, pxr->xr_bdone, xr->xr_breq)); pxr->xr_bdone -= xr->xr_breq; DIAGCONDPANIC(pxr->xr_bdone < 0, ("xbd_response_handler: pxr->xr_bdone < 0")); if (__predict_false(ring_resp->status)) { pxr->xr_bp->b_flags |= B_ERROR; pxr->xr_bp->b_error = EIO; } if (xr != pxr) { PUT_XBDREQ(xr); if (!SIMPLEQ_EMPTY(&xbdr_suspended)) xbdresume(); } if (pxr->xr_bdone == 0) { bp = pxr->xr_bp; xs = getxbd_softc(bp->b_dev); if (xs == NULL) { /* don't fail bp if we're shutdown */ bp->b_flags |= B_ERROR; bp->b_error = EIO; } DPRINTF(XBDB_IO, ("xbd_response_handler(%d): " "completed bp %p\n", i, bp)); if (bp->b_flags & B_ERROR) bp->b_resid = bp->b_bcount; else bp->b_resid = 0; if (pxr->xr_aligned) unmap_align(pxr); PUT_XBDREQ(pxr); if (xs) { disk_unbusy(&xs->sc_dksc.sc_dkdev, (bp->b_bcount - bp->b_resid), (bp->b_flags & B_READ)); #if NRND > 0 rnd_add_uint32(&xs->rnd_source, bp->b_blkno); #endif } biodone(bp); if (!SIMPLEQ_EMPTY(&xbdr_suspended)) xbdresume(); /* XXX possible lockup if this was the only * active device and requests were held back in * the queue. */ if (xs) dk_iodone(xs->sc_di, &xs->sc_dksc); } } resp_cons = i; /* check if xbdresume queued any requests */ if (last_req_prod != req_prod) signal_requests_to_xen(); return 0; }