static int vn_readwrite_io(struct vn_softc * vn, struct buf * bp, vfs_context_t ctx) { int error = 0; char * iov_base; caddr_t vaddr; if (buf_map(bp, &vaddr)) panic("vn device: buf_map failed"); iov_base = (char *)vaddr; if (vn->sc_shadow_vp == NULL) { user_ssize_t temp_resid; error = file_io(vn->sc_vp, ctx, buf_flags(bp) & B_READ ? UIO_READ : UIO_WRITE, iov_base, (off_t)buf_blkno(bp) * vn->sc_secsize, buf_resid(bp), &temp_resid); buf_setresid(bp, temp_resid); } else { if (buf_flags(bp) & B_READ) error = shadow_read(vn, bp, iov_base, ctx); else error = shadow_write(vn, bp, iov_base, ctx); } buf_unmap(bp); return (error); }
static int vnop_strategy_9p(struct vnop_strategy_args *ap) { mount_t mp; struct buf *bp; node_9p *np; caddr_t addr; uio_t uio; int e, flags; TRACE(); bp = ap->a_bp; np = NTO9P(buf_vnode(bp)); flags = buf_flags(bp); uio = NULL; addr = NULL; mp = vnode_mount(buf_vnode(bp)); if (mp == NULL) return ENXIO; if ((e=buf_map(bp, &addr))) goto error; uio = uio_create(1, buf_blkno(bp) * vfs_statfs(mp)->f_bsize, UIO_SYSSPACE, ISSET(flags, B_READ)? UIO_READ: UIO_WRITE); if (uio == NULL) { e = ENOMEM; goto error; } uio_addiov(uio, CAST_USER_ADDR_T(addr), buf_count(bp)); if (ISSET(flags, B_READ)) { if((e=nread_9p(np, uio))) goto error; /* zero the rest of the page if we reached EOF */ if (uio_resid(uio) > 0) { bzero(addr+buf_count(bp)-uio_resid(uio), uio_resid(uio)); uio_update(uio, uio_resid(uio)); } } else { if ((e=nwrite_9p(np, uio))) goto error; } buf_setresid(bp, uio_resid(uio)); error: if (uio) uio_free(uio); if (addr) buf_unmap(bp); buf_seterror(bp, e); buf_biodone(bp); return e; }
void buf_map(struct buf *bp) { vaddr_t va; splassert(IPL_BIO); if (bp->b_data == NULL) { unsigned long i; /* * First, just use the pre-allocated space until we run out. */ if (buf_kva_start < buf_kva_end) { va = buf_kva_start; buf_kva_start += MAXPHYS; bcstats.kvaslots_avail--; } else { struct buf *vbp; /* * Find some buffer we can steal the space from. */ while ((vbp = TAILQ_FIRST(&buf_valist)) == NULL) { buf_needva++; buf_nkvmsleep++; tsleep(&buf_needva, PRIBIO, "buf_needva", 0); } va = buf_unmap(vbp); } mtx_enter(&bp->b_pobj->vmobjlock); for (i = 0; i < atop(bp->b_bufsize); i++) { struct vm_page *pg = uvm_pagelookup(bp->b_pobj, bp->b_poffs + ptoa(i)); KASSERT(pg != NULL); pmap_kenter_pa(va + ptoa(i), VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE); } mtx_leave(&bp->b_pobj->vmobjlock); pmap_update(pmap_kernel()); bp->b_data = (caddr_t)va; } else { TAILQ_REMOVE(&buf_valist, bp, b_valist); bcstats.kvaslots_avail--; } bcstats.busymapped++; CLR(bp->b_flags, B_NOTMAPPED); }
__private_extern__ int fuse_internal_strategy(vnode_t vp, buf_t bp) { size_t biosize; size_t chunksize; size_t respsize; int mapped = FALSE; int mode; int op; int vtype = vnode_vtype(vp); int err = 0; caddr_t bufdat; off_t left; off_t offset; int32_t bflags = buf_flags(bp); fufh_type_t fufh_type; struct fuse_dispatcher fdi; struct fuse_data *data; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; mount_t mp = vnode_mount(vp); data = fuse_get_mpdata(mp); biosize = data->blocksize; if (!(vtype == VREG || vtype == VDIR)) { return ENOTSUP; } if (bflags & B_READ) { mode = FREAD; fufh_type = FUFH_RDONLY; /* FUFH_RDWR will also do */ } else { mode = FWRITE; fufh_type = FUFH_WRONLY; /* FUFH_RDWR will also do */ } if (fvdat->flag & FN_CREATING) { fuse_lck_mtx_lock(fvdat->createlock); if (fvdat->flag & FN_CREATING) { (void)fuse_msleep(fvdat->creator, fvdat->createlock, PDROP | PINOD | PCATCH, "fuse_internal_strategy", NULL); } else { fuse_lck_mtx_unlock(fvdat->createlock); } } fufh = &(fvdat->fufh[fufh_type]); if (!FUFH_IS_VALID(fufh)) { fufh_type = FUFH_RDWR; fufh = &(fvdat->fufh[fufh_type]); if (!FUFH_IS_VALID(fufh)) { fufh = NULL; } else { /* We've successfully fallen back to FUFH_RDWR. */ } } if (!fufh) { if (mode == FREAD) { fufh_type = FUFH_RDONLY; } else { fufh_type = FUFH_RDWR; } /* * Lets NOT do the filehandle preflight check here. */ err = fuse_filehandle_get(vp, NULL, fufh_type, 0 /* mode */); if (!err) { fufh = &(fvdat->fufh[fufh_type]); FUFH_AUX_INC(fufh); /* We've created a NEW fufh of type fufh_type. open_count is 1. */ } } else { /* good fufh */ FUSE_OSAddAtomic(1, (SInt32 *)&fuse_fh_reuse_count); /* We're using an existing fufh of type fufh_type. */ } if (err) { /* A more typical error case. */ if ((err == ENOTCONN) || fuse_isdeadfs(vp)) { buf_seterror(bp, EIO); buf_biodone(bp); return EIO; } IOLog("MacFUSE: strategy failed to get fh " "(vtype=%d, fufh_type=%d, err=%d)\n", vtype, fufh_type, err); if (!vfs_issynchronous(mp)) { IOLog("MacFUSE: asynchronous write failed!\n"); } buf_seterror(bp, EIO); buf_biodone(bp); return EIO; } if (!fufh) { panic("MacFUSE: tried everything but still no fufh"); /* NOTREACHED */ } #define B_INVAL 0x00040000 /* Does not contain valid info. */ #define B_ERROR 0x00080000 /* I/O error occurred. */ if (bflags & B_INVAL) { IOLog("MacFUSE: buffer does not contain valid information\n"); } if (bflags & B_ERROR) { IOLog("MacFUSE: an I/O error has occured\n"); } if (buf_count(bp) == 0) { return 0; } fdisp_init(&fdi, 0); if (mode == FREAD) { struct fuse_read_in *fri; buf_setresid(bp, buf_count(bp)); offset = (off_t)((off_t)buf_blkno(bp) * biosize); if (offset >= fvdat->filesize) { /* Trying to read at/after EOF? */ if (offset != fvdat->filesize) { /* Trying to read after EOF? */ buf_seterror(bp, EINVAL); } buf_biodone(bp); return 0; } /* Note that we just made sure that offset < fvdat->filesize. */ if ((offset + buf_count(bp)) > fvdat->filesize) { /* Trimming read */ buf_setcount(bp, (uint32_t)(fvdat->filesize - offset)); } if (buf_map(bp, &bufdat)) { IOLog("MacFUSE: failed to map buffer in strategy\n"); return EFAULT; } else { mapped = TRUE; } while (buf_resid(bp) > 0) { chunksize = min((size_t)buf_resid(bp), data->iosize); fdi.iosize = sizeof(*fri); op = FUSE_READ; if (vtype == VDIR) { op = FUSE_READDIR; } fdisp_make_vp(&fdi, op, vp, (vfs_context_t)0); fri = fdi.indata; fri->fh = fufh->fh_id; /* * Historical note: * * fri->offset = ((off_t)(buf_blkno(bp))) * biosize; * * This wasn't being incremented!? */ fri->offset = offset; fri->size = (typeof(fri->size))chunksize; fdi.tick->tk_aw_type = FT_A_BUF; fdi.tick->tk_aw_bufdata = bufdat; if ((err = fdisp_wait_answ(&fdi))) { /* There was a problem with reading. */ goto out; } respsize = fdi.tick->tk_aw_bufsize; if (respsize < 0) { /* Cannot really happen... */ err = EIO; goto out; } buf_setresid(bp, (uint32_t)(buf_resid(bp) - respsize)); bufdat += respsize; offset += respsize; /* Did we hit EOF before being done? */ if ((respsize == 0) && (buf_resid(bp) > 0)) { /* * Historical note: * If we don't get enough data, just fill the rest with zeros. * In NFS context, this would mean a hole in the file. */ /* Zero-pad the incomplete buffer. */ bzero(bufdat, buf_resid(bp)); buf_setresid(bp, 0); break; } } /* while (buf_resid(bp) > 0) */ } else { /* write */ struct fuse_write_in *fwi; struct fuse_write_out *fwo; int merr = 0; off_t diff; if (buf_map(bp, &bufdat)) { IOLog("MacFUSE: failed to map buffer in strategy\n"); return EFAULT; } else { mapped = TRUE; } /* Write begin */ buf_setresid(bp, buf_count(bp)); offset = (off_t)((off_t)buf_blkno(bp) * biosize); /* XXX: TBD -- Check here for extension (writing past end) */ left = buf_count(bp); while (left) { fdi.iosize = sizeof(*fwi); op = FUSE_WRITE; fdisp_make_vp(&fdi, op, vp, (vfs_context_t)0); chunksize = min((size_t)left, data->iosize); fwi = fdi.indata; fwi->fh = fufh->fh_id; fwi->offset = offset; fwi->size = (typeof(fwi->size))chunksize; fdi.tick->tk_ms_type = FT_M_BUF; fdi.tick->tk_ms_bufdata = bufdat; fdi.tick->tk_ms_bufsize = chunksize; /* About to write <chunksize> at <offset> */ if ((err = fdisp_wait_answ(&fdi))) { merr = 1; break; } fwo = fdi.answ; diff = chunksize - fwo->size; if (diff < 0) { err = EINVAL; break; } left -= fwo->size; bufdat += fwo->size; offset += fwo->size; buf_setresid(bp, buf_resid(bp) - fwo->size); } if (merr) { goto out; } } if (fdi.tick) { fuse_ticket_drop(fdi.tick); } else { /* No ticket upon leaving */ } out: if (err) { buf_seterror(bp, err); } if (mapped == TRUE) { buf_unmap(bp); } buf_biodone(bp); return err; }