static int nread_9p(node_9p *np, uio_t uio) { openfid_9p *op; uint32_t n, l, sz; char *p; int e; TRACE(); op = &np->openfid[OREAD]; if (op->fid == NOFID) op = &np->openfid[ORDWR]; if (op->fid == NOFID) return EBADF; sz = np->iounit; if (sz == 0) sz = np->nmp->msize-IOHDRSZ; p = malloc_9p(sz); if (p == NULL) return ENOMEM; e = 0; while (uio_resid(uio) > 0) { n = MIN(uio_resid(uio), sz); if ((e=read_9p(np->nmp, op->fid, p, n, uio_offset(uio), &l)) || l==0) break; if ((e=uiomove(p, l, uio))) break; } free_9p(p); return e; }
/* * gfs_readdir_init: initiate a generic readdir * st - a pointer to an uninitialized gfs_readdir_state_t structure * name_max - the directory's maximum file name length * ureclen - the exported file-space record length (1 for non-legacy FSs) * uiop - the uiop passed to readdir * parent - the parent directory's inode * self - this directory's inode * flags - flags from VOP_READDIR * * Returns 0 or a non-zero errno. * * Typical VOP_READDIR usage of gfs_readdir_*: * * if ((error = gfs_readdir_init(...)) != 0) * return (error); * eof = 0; * while ((error = gfs_readdir_pred(..., &voffset)) != 0) { * if (!consumer_entry_at(voffset)) * voffset = consumer_next_entry(voffset); * if (consumer_eof(voffset)) { * eof = 1 * break; * } * if ((error = gfs_readdir_emit(..., voffset, * consumer_ino(voffset), consumer_name(voffset))) != 0) * break; * } * return (gfs_readdir_fini(..., error, eofp, eof)); * * As you can see, a zero result from gfs_readdir_pred() or * gfs_readdir_emit() indicates that processing should continue, * whereas a non-zero result indicates that the loop should terminate. * Most consumers need do nothing more than let gfs_readdir_fini() * determine what the cause of failure was and return the appropriate * value. */ int gfs_readdir_init(gfs_readdir_state_t *st, int name_max, int ureclen, uio_t *uiop, ino64_t parent, ino64_t self, int flags) { size_t dirent_size; boolean_t extended = (flags & VNODE_READDIR_EXTENDED); if (uio_offset(uiop) < 0 || uio_resid(uiop) <= 0 || (uio_offset(uiop) % ureclen) != 0) return (EINVAL); st->grd_ureclen = ureclen; st->grd_oresid = uio_resid(uiop); st->grd_namlen = name_max; dirent_size = DIRENT_RECLEN(st->grd_namlen, extended); st->grd_dirent = kmem_zalloc(dirent_size, KM_SLEEP); st->grd_parent = parent; st->grd_self = self; st->grd_flags = flags; return (0); }
static int vnop_read_9p(struct vnop_read_args *ap) { node_9p *np; vnode_t vp; uio_t uio; int e; TRACE(); vp = ap->a_vp; uio = ap->a_uio; np = NTO9P(vp); if (vnode_isdir(vp)) return EISDIR; if (uio_offset(uio) < 0) return EINVAL; if (uio_resid(uio) == 0) return 0; nlock_9p(np, NODE_LCK_SHARED); if (vnode_isnocache(vp) || ISSET(ap->a_ioflag, IO_NOCACHE)) { if (ISSET(np->flags, NODE_MMAPPED)) ubc_msync(vp, 0, ubc_getsize(vp), NULL, UBC_PUSHDIRTY|UBC_SYNC); else cluster_push(vp, IO_SYNC); ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_INVALIDATE); e = nread_9p(np, uio); } else e = cluster_read(vp, uio, np->dir.length, ap->a_ioflag); nunlock_9p(np); return e; }
int zfs_vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, ssize_t len, offset_t offset, enum uio_seg seg, int ioflag, rlim64_t ulimit, cred_t *cr, ssize_t *residp) { uio_t *auio; int spacetype; int error=0; vfs_context_t vctx; spacetype = UIO_SEG_IS_USER_SPACE(seg) ? UIO_USERSPACE32 : UIO_SYSSPACE; vctx = vfs_context_create((vfs_context_t)0); auio = uio_create(1, 0, spacetype, rw); uio_reset(auio, offset, spacetype, rw); uio_addiov(auio, (uint64_t)(uintptr_t)base, len); if (rw == UIO_READ) { error = VNOP_READ(vp, auio, ioflag, vctx); } else { error = VNOP_WRITE(vp, auio, ioflag, vctx); } if (residp) { *residp = uio_resid(auio); } else { if (uio_resid(auio) && error == 0) error = EIO; } uio_free(auio); vfs_context_rele(vctx); return (error); }
static int vnop_write_9p(struct vnop_write_args *ap) { vnode_t vp; node_9p *np; uio_t uio; user_ssize_t resid; off_t eof, zh, zt, off; int e, flag; TRACE(); vp = ap->a_vp; uio = ap->a_uio; np = NTO9P(vp); if (vnode_isdir(vp)) return EISDIR; off = uio_offset(uio); if (off < 0) return EINVAL; resid = uio_resid(uio); if (resid == 0) return 0; flag = ap->a_ioflag; if (ISSET(flag, IO_APPEND)) { off = np->dir.length; uio_setoffset(uio, off); } nlock_9p(np, NODE_LCK_EXCLUSIVE); if (vnode_isnocache(vp) || ISSET(flag, IO_NOCACHE)) { ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_PUSHDIRTY|UBC_SYNC); ubc_msync(vp, uio_offset(uio), uio_offset(uio)+uio_resid(uio), NULL, UBC_INVALIDATE); e = nwrite_9p(np, uio); } else { zh = zt = 0; eof = MAX(np->dir.length, resid+off); if (eof > np->dir.length) { if (off > np->dir.length) { zh = np->dir.length; SET(flag, IO_HEADZEROFILL); } zt = (eof + (PAGE_SIZE_64 - 1)) & ~PAGE_MASK_64; if (zt > eof) { zt = eof; SET(flag, IO_TAILZEROFILL); } } e = cluster_write(vp, uio, np->dir.length, eof, zh, zt, flag); if (e==0 && eof>np->dir.length) { np->dirtimer = 0; np->dir.length = eof; ubc_setsize(vp, eof); } } nunlock_9p(np); return e; }
static int vnread_shadow(struct vn_softc * vn, struct uio *uio, int ioflag, vfs_context_t ctx) { u_int32_t blocksize = vn->sc_secsize; int error = 0; off_t offset; user_ssize_t resid; off_t orig_offset; user_ssize_t orig_resid; orig_resid = resid = uio_resid(uio); orig_offset = offset = uio_offset(uio); while (resid > 0) { u_int32_t remainder; u_int32_t this_block_number; u_int32_t this_block_count; off_t this_offset; user_ssize_t this_resid; struct vnode * vp; /* figure out which blocks to read */ remainder = block_remainder(offset, blocksize); if (shadow_map_read(vn->sc_shadow_map, block_truncate(offset, blocksize), block_round(resid + remainder, blocksize), &this_block_number, &this_block_count)) { vp = vn->sc_shadow_vp; } else { vp = vn->sc_vp; } /* read the blocks (or parts thereof) */ this_offset = (off_t)this_block_number * blocksize + remainder; uio_setoffset(uio, this_offset); this_resid = this_block_count * blocksize - remainder; if (this_resid > resid) { this_resid = resid; } uio_setresid(uio, this_resid); error = VNOP_READ(vp, uio, ioflag, ctx); if (error) { break; } /* figure out how much we actually read */ this_resid -= uio_resid(uio); if (this_resid == 0) { printf("vn device: vnread_shadow zero length read\n"); break; } resid -= this_resid; offset += this_resid; } uio_setresid(uio, resid); uio_setoffset(uio, offset); return (error); }
/* * gfs_readdir_emit_int: internal routine to emit directory entry * * st - the current readdir state, which must have d_ino/ed_ino * and d_name/ed_name set * uiop - caller-supplied uio pointer * next - the offset of the next entry */ static int gfs_readdir_emit_int(gfs_readdir_state_t *st, uio_t *uiop, offset_t next, int *ncookies, u_long **cookies) { int reclen, namlen; dirent64_t *dp; boolean_t extended = (st->grd_flags & VNODE_READDIR_EXTENDED); dp = st->grd_dirent; namlen = strlen(dp->d_name); reclen = DIRENT_RECLEN(namlen, extended); dprintf("trying to add '%s': extended %d isascii %d: next %lld\n", dp->d_name, st->grd_flags & VNODE_READDIR_EXTENDED, is_ascii_str(dp->d_name), next); if (reclen > uio_resid(uiop)) { /* * Error if no entries were returned yet */ if (uio_resid(uiop) == st->grd_oresid) return (EINVAL); return (-1); } if (extended) { // d_fileno // /* NOTE: d_seekoff is the offset for the *next* entry */ //next = &(dp->d_seekoff); dp->d_seekoff = next; dp->d_type = DT_DIR; dp->d_namlen = namlen; dp->d_reclen = (ushort_t)reclen; } else { /* XXX: This can change in the future. */ dp->d_type = DT_DIR; dp->d_namlen = namlen; dp->d_reclen = (ushort_t)reclen; } if (uiomove((caddr_t)st->grd_dirent, reclen, UIO_READ, uiop)) return (EFAULT); uio_setoffset(uiop, next); if (*cookies != NULL) { **cookies = next; (*cookies)++; (*ncookies)--; KASSERT(*ncookies >= 0, ("ncookies=%d", *ncookies)); } dprintf("Copied out %d bytes\n", reclen); return (0); }
__private_extern__ int fuse_internal_readdir(vnode_t vp, uio_t uio, vfs_context_t context, struct fuse_filehandle *fufh, struct fuse_iov *cookediov, int *numdirent) { int err = 0; struct fuse_dispatcher fdi; struct fuse_read_in *fri; struct fuse_data *data; if (uio_resid(uio) == 0) { return 0; } fdisp_init(&fdi, 0); /* Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p I/O). */ while (uio_resid(uio) > 0) { fdi.iosize = sizeof(*fri); fdisp_make_vp(&fdi, FUSE_READDIR, vp, context); fri = fdi.indata; fri->fh = fufh->fh_id; fri->offset = uio_offset(uio); data = fuse_get_mpdata(vnode_mount(vp)); fri->size = (typeof(fri->size))min((size_t)uio_resid(uio), data->iosize); if ((err = fdisp_wait_answ(&fdi))) { goto out; } if ((err = fuse_internal_readdir_processdata(vp, uio, fri->size, fdi.answ, fdi.iosize, cookediov, numdirent))) { break; } } /* done: */ fuse_ticket_drop(fdi.tick); out: return ((err == -1) ? 0 : err); }
static int vnop_strategy_9p(struct vnop_strategy_args *ap) { mount_t mp; struct buf *bp; node_9p *np; caddr_t addr; uio_t uio; int e, flags; TRACE(); bp = ap->a_bp; np = NTO9P(buf_vnode(bp)); flags = buf_flags(bp); uio = NULL; addr = NULL; mp = vnode_mount(buf_vnode(bp)); if (mp == NULL) return ENXIO; if ((e=buf_map(bp, &addr))) goto error; uio = uio_create(1, buf_blkno(bp) * vfs_statfs(mp)->f_bsize, UIO_SYSSPACE, ISSET(flags, B_READ)? UIO_READ: UIO_WRITE); if (uio == NULL) { e = ENOMEM; goto error; } uio_addiov(uio, CAST_USER_ADDR_T(addr), buf_count(bp)); if (ISSET(flags, B_READ)) { if((e=nread_9p(np, uio))) goto error; /* zero the rest of the page if we reached EOF */ if (uio_resid(uio) > 0) { bzero(addr+buf_count(bp)-uio_resid(uio), uio_resid(uio)); uio_update(uio, uio_resid(uio)); } } else { if ((e=nwrite_9p(np, uio))) goto error; } buf_setresid(bp, uio_resid(uio)); error: if (uio) uio_free(uio); if (addr) buf_unmap(bp); buf_seterror(bp, e); buf_biodone(bp); return e; }
/* * Vnode op for read */ int fifo_read(struct vnop_read_args *ap) { struct uio *uio = ap->a_uio; struct socket *rso = ap->a_vp->v_fifoinfo->fi_readsock; user_ssize_t startresid; int error; int rflags; #if DIAGNOSTIC if (uio->uio_rw != UIO_READ) panic("fifo_read mode"); #endif if (uio_resid(uio) == 0) return (0); rflags = (ap->a_ioflag & IO_NDELAY) ? MSG_NBIO : 0; startresid = uio_resid(uio); /* fifo conformance - if we have a reader open on the fifo but no * writers then we need to make sure we do not block. We do that by * checking the receive buffer and if empty set error to EWOULDBLOCK. * If error is set to EWOULDBLOCK we skip the call into soreceive */ error = 0; if (ap->a_vp->v_fifoinfo->fi_writers < 1) { socket_lock(rso, 1); error = (rso->so_rcv.sb_cc == 0) ? EWOULDBLOCK : 0; socket_unlock(rso, 1); } /* skip soreceive to avoid blocking when we have no writers */ if (error != EWOULDBLOCK) { error = soreceive(rso, (struct sockaddr **)0, uio, (struct mbuf **)0, (struct mbuf **)0, &rflags); if (error == 0) lock_vnode_and_post(ap->a_vp, 0); } else { /* clear EWOULDBLOCK and return EOF (zero) */ error = 0; } /* * Clear EOF indication after first such return. */ if (uio_resid(uio) == startresid) { socket_lock(rso, 1); rso->so_state &= ~SS_CANTRCVMORE; socket_unlock(rso, 1); } return (error); }
/* * Get next character written in by user from uio. */ int uwritec(uio_t uio) { int c = 0; if (uio_resid(uio) <= 0) return (-1); again: if (uio->uio_iovcnt <= 0) panic("uwritec: non-positive iovcnt"); if (uio_iov_len(uio) == 0) { uio_next_iov(uio); if (--uio->uio_iovcnt == 0) return (-1); goto again; } switch (uio->uio_segflg) { case UIO_USERSPACE32: case UIO_USERSPACE: c = fubyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base)); break; case UIO_USERSPACE64: c = fubyte((user_addr_t)uio->uio_iovs.iov64p->iov_base); break; case UIO_SYSSPACE32: case UIO_SYSSPACE: c = *((caddr_t)uio->uio_iovs.iov32p->iov_base) & 0377; break; case UIO_USERISPACE32: case UIO_USERISPACE: c = fuibyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base)); break; default: c = 0; /* avoid uninitialized variable warning */ panic("uwritec: bogus uio_segflg"); break; } if (c < 0) return (-1); uio_iov_base_add(uio, 1); uio_iov_len_add(uio, -1); uio_setresid(uio, (uio_resid(uio) - 1)); uio->uio_offset++; return (c); }
int readFile(char *file, uint8_t *buffer, off_t offset, user_size_t size) { int res = EIO; vfs_context_t vfsContext = vfs_context_create(NULL); if (vfsContext == NULL) { return EIO; } vnode_t fileVnode = NULLVP; if (vnode_lookup(file, 0, &fileVnode, vfsContext) == 0) { uio_t uio = uio_create(1, offset, UIO_SYSSPACE, UIO_READ); if (uio == NULL) goto exit; if (uio_addiov(uio, CAST_USER_ADDR_T(buffer), size)) goto exit; if (VNOP_READ(fileVnode, uio, 0, vfsContext)) goto exit; if (uio_resid(uio)) goto exit; res = 0; } else { vfs_context_rele(vfsContext); return ENOENT; } exit: vnode_put(fileVnode); vfs_context_rele(vfsContext); return res; }
/* * Give next character to user as result of read. */ int ureadc(int c, struct uio *uio) { if (uio_resid(uio) <= 0) panic("ureadc: non-positive resid"); uio_update(uio, 0); if (uio->uio_iovcnt == 0) panic("ureadc: non-positive iovcnt"); if (uio_curriovlen(uio) <= 0) panic("ureadc: non-positive iovlen"); switch ((int) uio->uio_segflg) { case UIO_USERSPACE32: case UIO_USERSPACE: case UIO_USERISPACE32: case UIO_USERISPACE: case UIO_USERSPACE64: case UIO_USERISPACE64: if (subyte((user_addr_t)uio->uio_iovs.uiovp->iov_base, c) < 0) return (EFAULT); break; case UIO_SYSSPACE32: case UIO_SYSSPACE: *(CAST_DOWN(caddr_t, uio->uio_iovs.kiovp->iov_base)) = c; break; default: break; } uio_update(uio, 1); return (0); }
static int mdevrw(dev_t dev, struct uio *uio, __unused int ioflag) { int status; addr64_t mdata; int devid; enum uio_seg saveflag; devid = minor(dev); /* Get minor device number */ if (devid > 16) return (ENXIO); /* Not valid */ if (!(mdev[devid].mdFlags & mdInited)) return (ENXIO); /* Have we actually been defined yet? */ mdata = ((addr64_t)mdev[devid].mdBase << 12) + uio->uio_offset; /* Point to the area in "file" */ saveflag = uio->uio_segflg; /* Remember what the request is */ #if LP64_DEBUG if (UIO_IS_USER_SPACE(uio) == 0 && UIO_IS_SYS_SPACE(uio) == 0) { panic("mdevrw - invalid uio_segflg\n"); } #endif /* LP64_DEBUG */ /* Make sure we are moving from physical ram if physical device */ if (mdev[devid].mdFlags & mdPhys) { if (uio->uio_segflg == UIO_USERSPACE64) uio->uio_segflg = UIO_PHYS_USERSPACE64; else if (uio->uio_segflg == UIO_USERSPACE32) uio->uio_segflg = UIO_PHYS_USERSPACE32; else uio->uio_segflg = UIO_PHYS_USERSPACE; } status = uiomove64(mdata, uio_resid(uio), uio); /* Move the data */ uio->uio_segflg = saveflag; /* Restore the flag */ return (status); }
/* * retrieve the first page of kernel binary at disk into a buffer * version that uses KPI VFS functions and a ripped uio_createwithbuffer() from XNU */ kern_return_t get_mach_header(void *buffer, vnode_t vnode, vfs_context_t ctxt) { int error = 0; uio_t uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ); if (uio == NULL) { return KERN_FAILURE; } // imitate the kernel and read a single page from the header error = uio_addiov(uio, CAST_USER_ADDR_T(buffer), HEADER_SIZE); if (error) { return error; } // read kernel vnode into the buffer error = VNOP_READ(vnode, uio, 0, ctxt); if (error) { return error; } else if (uio_resid(uio)) return EINVAL; // process the header uint32_t magic = *(uint32_t*)buffer; if (magic != MH_MAGIC_64) { return KERN_FAILURE; } return KERN_SUCCESS; }
/* * gfs_readdir_pred: readdir loop predicate * voffp - a pointer in which the next virtual offset should be stored * * Returns a 0 on success, a non-zero errno on failure, or -1 if the * readdir loop should terminate. A non-zero result (either errno or * -1) from this function is typically passed directly to * gfs_readdir_fini(). */ int gfs_readdir_pred(gfs_readdir_state_t *st, uio_t *uiop, offset_t *voffp, int *ncookies, u_long **cookies) { offset_t off, voff; int error; top: if (uio_resid(uiop) <= 0) return (-1); off = uio_offset(uiop) / st->grd_ureclen; voff = off - 2; if (off == 0) { if ((error = gfs_readdir_emit(st, uiop, voff, st->grd_self, ".", 0, ncookies, cookies)) == 0) goto top; } else if (off == 1) { dprintf("Sending out .. with id %d\n", st->grd_parent); if ((error = gfs_readdir_emit(st, uiop, voff, st->grd_parent, "..", 0, ncookies, cookies)) == 0) goto top; } else { *voffp = voff; return (0); } return (error); }
// This function uses as many pmem_partial_read() calls as necessary, // to copy uio->resid bytes of physical memory from the physical address, as // specified in uio->offset to the buffer in the uio. static kern_return_t pmem_read_memory(struct uio *uio) { while (uio_resid(uio) > 0) { // Try to read as many times as necessary until the uio is full. pmem_partial_read(uio, uio_offset(uio), uio_offset(uio) + uio_curriovlen(uio)); } return KERN_SUCCESS; }
/* * Give next character to user as result of read. */ int ureadc(int c, struct uio *uio) { if (uio_resid(uio) <= 0) panic("ureadc: non-positive resid"); again: if (uio->uio_iovcnt == 0) panic("ureadc: non-positive iovcnt"); if (uio_iov_len(uio) <= 0) { uio->uio_iovcnt--; uio_next_iov(uio); goto again; } switch (uio->uio_segflg) { case UIO_USERSPACE32: case UIO_USERSPACE: if (subyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), c) < 0) return (EFAULT); break; case UIO_USERSPACE64: if (subyte((user_addr_t)uio->uio_iovs.iov64p->iov_base, c) < 0) return (EFAULT); break; case UIO_SYSSPACE32: case UIO_SYSSPACE: *((caddr_t)uio->uio_iovs.iov32p->iov_base) = c; break; case UIO_USERISPACE32: case UIO_USERISPACE: if (suibyte(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), c) < 0) return (EFAULT); break; default: break; } uio_iov_base_add(uio, 1); uio_iov_len_add(uio, -1); uio_setresid(uio, (uio_resid(uio) - 1)); uio->uio_offset++; return (0); }
/* * Gets the first page of kernel binary at disk into a buffer * Uses KPI VFS functions and a ripped uio_createwithbuffer() from XNU */ static kern_return_t get_k_mh(void *buffer, vnode_t k_vnode, struct kernel_info *kinfo) { uio_t uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ); if(uio == NULL) { return KERN_FAILURE; } /* imitate the kernel and read a single page from the header */ if(uio_addiov(uio, CAST_USER_ADDR_T(buffer), HEADER_SIZE) != 0) { return KERN_FAILURE; } /* read kernel vnode into the buffer */ if(VNOP_READ(k_vnode, uio, 0, NULL) != 0) { return KERN_FAILURE; } else if(uio_resid(uio)) { return KERN_FAILURE; } /* process the header */ uint32_t magic = *(uint32_t *)buffer; if(magic == FAT_CIGAM) { struct fat_header *fh = (struct fat_header *)buffer; struct fat_arch *fa = (struct fat_arch *)(buffer + sizeof(struct fat_header)); uint32_t file_off = 0; for(uint32_t i = 0; i < ntohl(fh->nfat_arch); i++) { if(sizeof(void *) == 8 && ntohl(fa->cputype) == CPU_TYPE_X86_64) { file_off = ntohl(fa->offset); break; } else if(sizeof(void *) == 4 && ntohl(fa->cputype) == CPU_TYPE_X86) { file_off = ntohl(fa->offset); break; } fa++; } /* read again */ uio = uio_create(1, file_off, UIO_SYSSPACE, UIO_READ); uio_addiov(uio, CAST_USER_ADDR_T(buffer), HEADER_SIZE); VNOP_READ(k_vnode, uio, 0, NULL); kinfo->fat_offset = file_off; } else { kinfo->fat_offset = 0; } return KERN_SUCCESS; }
int fuse_internal_readdir(struct vnode *vp, struct uio *uio, struct fuse_filehandle *fufh, struct fuse_iov *cookediov) { int err = 0; struct fuse_dispatcher fdi; struct fuse_read_in *fri; if (uio_resid(uio) == 0) { return 0; } fdisp_init(&fdi, 0); /* * Note that we DO NOT have a UIO_SYSSPACE here (so no need for p2p * I/O). */ while (uio_resid(uio) > 0) { fdi.iosize = sizeof(*fri); fdisp_make_vp(&fdi, FUSE_READDIR, vp, NULL, NULL); fri = fdi.indata; fri->fh = fufh->fh_id; fri->offset = uio_offset(uio); fri->size = min(uio_resid(uio), FUSE_DEFAULT_IOSIZE); /* mp->max_read */ if ((err = fdisp_wait_answ(&fdi))) { break; } if ((err = fuse_internal_readdir_processdata(uio, fri->size, fdi.answ, fdi.iosize, cookediov))) { break; } } fdisp_destroy(&fdi); return ((err == -1) ? 0 : err); }
/* * Our version of vn_rdwr, here "vp" is not actually a vnode, but a ptr * to the node allocated in getf(). We use the "fp" part of the node to * be able to issue IO. * You must call getf() before calling spl_vn_rdwr(). */ int spl_vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, ssize_t len, offset_t offset, enum uio_seg seg, int ioflag, rlim64_t ulimit, /* meaningful only if rw is UIO_WRITE */ cred_t *cr, ssize_t *residp) { struct spl_fileproc *sfp = (struct spl_fileproc*)vp; uio_t *auio; int spacetype; int error=0; vfs_context_t vctx; spacetype = UIO_SEG_IS_USER_SPACE(seg) ? UIO_USERSPACE32 : UIO_SYSSPACE; vctx = vfs_context_create((vfs_context_t)0); auio = uio_create(1, 0, spacetype, rw); uio_reset(auio, offset, spacetype, rw); uio_addiov(auio, (uint64_t)(uintptr_t)base, len); if (rw == UIO_READ) { error = fo_read(sfp->f_fp, auio, ioflag, vctx); } else { error = fo_write(sfp->f_fp, auio, ioflag, vctx); sfp->f_writes = 1; } if (residp) { *residp = uio_resid(auio); } else { if (uio_resid(auio) && error == 0) error = EIO; } uio_free(auio); vfs_context_rele(vctx); return (error); }
static int nwrite_9p(node_9p *np, uio_t uio) { openfid_9p *op; user_ssize_t resid; uint32_t l, sz; off_t off; char *p; int n, e; TRACE(); op = &np->openfid[OWRITE]; if (op->fid == NOFID) op = &np->openfid[ORDWR]; if (op->fid == NOFID) return EBADF; sz = np->iounit; if (sz == 0) sz = np->nmp->msize-IOHDRSZ; p = malloc_9p(sz); if (p == NULL) return ENOMEM; e = 0; while (uio_resid(uio) > 0) { l = 0; off = uio_offset(uio); resid = uio_resid(uio); n = MIN(resid, sz); if ((e=uiomove(p, n, uio))) break; if ((e=write_9p(np->nmp, op->fid, p, n, off, &l))) break; uio_setoffset(uio, off+l); uio_setresid(uio, resid-l); } free_9p(p); return e; }
// This function uses as many pmem_partial_read() calls as necessary, // to copy uio->resid bytes of physical memory from the physical address, as // specified in uio->offset to the buffer in the uio. static kern_return_t pmem_read_memory(struct uio *uio) { size_t read_bytes = 0; while (uio_resid(uio) > 0) { uio_update(uio, 0); // Try to read as many times as necessary until the uio is full. read_bytes = pmem_partial_read(uio, uio_offset(uio), uio_offset(uio) + uio_curriovlen(uio)); uio_update(uio, read_bytes); } return KERN_SUCCESS; }
int zfs_sa_readlink(znode_t *zp, uio_t *uio) { dmu_buf_t *db = sa_get_db(zp->z_sa_hdl); size_t bufsz; int error; bufsz = zp->z_size; if (bufsz + ZFS_OLD_ZNODE_PHYS_SIZE <= db->db_size) { error = uiomove((caddr_t)db->db_data + ZFS_OLD_ZNODE_PHYS_SIZE, MIN((size_t)bufsz, uio_resid(uio)), UIO_READ, uio); } else { dmu_buf_t *dbp; if ((error = dmu_buf_hold(zp->z_zfsvfs->z_os, zp->z_id, 0, FTAG, &dbp, DMU_READ_NO_PREFETCH)) == 0) { error = uiomove(dbp->db_data, MIN((size_t)bufsz, uio_resid(uio)), UIO_READ, uio); dmu_buf_rele(dbp, FTAG); } } return (error); }
int fticket_pull(struct fuse_ticket *ftick, struct uio *uio) { int err = 0; debug_printf("ftick=%p, uio=%p\n", ftick, uio); if (ftick->tk_aw_ohead.error) { return 0; } err = fuse_body_audit(ftick, uio_resid(uio)); if (!err) { err = fticket_aw_pull_uio(ftick, uio); } return err; }
/* struct vnop_readdir_args { struct vnode *a_vp; struct uio *a_uio; struct ucred *a_cred; int *a_eofflag; int *ncookies; u_long **a_cookies; }; */ static int fuse_vnop_readdir(struct vop_readdir_args *ap) { struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; struct ucred *cred = ap->a_cred; struct fuse_filehandle *fufh = NULL; struct fuse_vnode_data *fvdat; struct fuse_iov cookediov; int err = 0; int freefufh = 0; FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp)); if (fuse_isdeadfs(vp)) { return ENXIO; } if ( /* XXXIP ((uio_iovcnt(uio) > 1)) || */ (uio_resid(uio) < sizeof(struct dirent))) { return EINVAL; } fvdat = VTOFUD(vp); if (!fuse_filehandle_valid(vp, FUFH_RDONLY)) { FS_DEBUG("calling readdir() before open()"); err = fuse_filehandle_open(vp, FUFH_RDONLY, &fufh, NULL, cred); freefufh = 1; } else { err = fuse_filehandle_get(vp, FUFH_RDONLY, &fufh); } if (err) { return (err); } #define DIRCOOKEDSIZE FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + MAXNAMLEN + 1) fiov_init(&cookediov, DIRCOOKEDSIZE); err = fuse_internal_readdir(vp, uio, fufh, &cookediov); fiov_teardown(&cookediov); if (freefufh) { fuse_filehandle_close(vp, FUFH_RDONLY, NULL, cred); } return err; }
/* * Function: receive_packet * Purpose: * Return a received packet or an error if none available. */ static int receive_packet(struct socket * so, void * pp, int psize, int * actual_size) { uio_t auio; int error; int rcvflg; char uio_buf[ UIO_SIZEOF(1) ]; auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(pp), psize); rcvflg = MSG_WAITALL; error = soreceive(so, (struct sockaddr **) 0, auio, 0, 0, &rcvflg); *actual_size = psize - uio_resid(auio); return (error); }
int st_readwrite(dev_t dev, struct uio *uio, int ioflag) { IOSCSITape *st = IOSCSITape::devices[minor(dev)]; IOMemoryDescriptor *dataBuffer = IOMemoryDescriptorFromUIO(uio); int status = ENOSYS; IOReturn opStatus = kIOReturnError; int lastRealizedBytes = 0; if (dataBuffer == 0) return ENOMEM; dataBuffer->prepare(); opStatus = st->ReadWrite(dataBuffer, &lastRealizedBytes); dataBuffer->complete(); dataBuffer->release(); if (opStatus == kIOReturnSuccess) { uio_setresid(uio, uio_resid(uio) - lastRealizedBytes); if (st->blkno != -1) { if (st->IsFixedBlockSize()) st->blkno += (lastRealizedBytes / st->blksize); else st->blkno++; } status = KERN_SUCCESS; } else if (st->sense_flags & SENSE_FILEMARK) { if (st->fileno != -1) { st->fileno++; st->blkno = 0; } status = KERN_SUCCESS; } return status; }
/* * retrieve the whole linkedit segment into target buffer from kernel binary at disk * we keep this buffer until we don't need to solve symbols anymore */ static kern_return_t get_kernel_linkedit(vnode_t kernel_vnode, vfs_context_t ctxt, kernel_info *kinfo) { int error = 0; uio_t uio = uio_create(1, kinfo->linkedit_fileoff, UIO_SYSSPACE, UIO_READ); if (uio == NULL) { return KERN_FAILURE; } error = uio_addiov(uio, CAST_USER_ADDR_T(kinfo->linkedit_buf), kinfo->linkedit_size); if (error) return error; error = VNOP_READ(kernel_vnode, uio, 0, ctxt); if (error) { return error; } else if (uio_resid(uio)) { return EINVAL; } return KERN_SUCCESS; }
static int nullfs_read(struct vnop_read_args * ap) { int error = EIO; struct vnode *vp, *lvp; NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); if (nullfs_checkspecialvp(ap->a_vp)) { return ENOTSUP; /* the special vnodes can't be read */ } vp = ap->a_vp; lvp = NULLVPTOLOWERVP(vp); /* * First some house keeping */ if (vnode_getwithvid(lvp, NULLVPTOLOWERVID(vp)) == 0) { if (!vnode_isreg(lvp) && !vnode_islnk(lvp)) { error = EPERM; goto end; } if (uio_resid(ap->a_uio) == 0) { error = 0; goto end; } /* * Now ask VM/UBC/VFS to do our bidding */ error = VNOP_READ(lvp, ap->a_uio, ap->a_ioflag, ap->a_context); if (error) { NULLFSDEBUG("VNOP_READ failed: %d\n", error); } end: vnode_put(lvp); } return error; }