int tmpfs_access(struct vop_access_args *v) { struct vnode *vp = v->a_vp; accmode_t accmode = v->a_accmode; struct ucred *cred = v->a_cred; int error; struct tmpfs_node *node; MPASS(VOP_ISLOCKED(vp)); node = VP_TO_TMPFS_NODE(vp); switch (vp->v_type) { case VDIR: /* FALLTHROUGH */ case VLNK: /* FALLTHROUGH */ case VREG: if (accmode & VWRITE && vp->v_mount->mnt_flag & MNT_RDONLY) { error = EROFS; goto out; } break; case VBLK: /* FALLTHROUGH */ case VCHR: /* FALLTHROUGH */ case VSOCK: /* FALLTHROUGH */ case VFIFO: break; default: error = EINVAL; goto out; } if (accmode & VWRITE && node->tn_flags & IMMUTABLE) { error = EPERM; goto out; } error = vaccess(vp->v_type, node->tn_mode, node->tn_uid, node->tn_gid, accmode, cred, NULL); out: MPASS(VOP_ISLOCKED(vp)); return error; }
static int tmpfs_advlock(struct vop_advlock_args *ap) { struct tmpfs_node *node; struct vnode *vp = ap->a_vp; int error; node = VP_TO_TMPFS_NODE(vp); error = (lf_advlock(ap, &node->tn_advlock, node->tn_size)); return (error); }
static int tmpfs_write(struct vop_write_args *v) { struct vnode *vp; struct uio *uio; struct tmpfs_node *node; off_t oldsize; int error, ioflag; boolean_t extended; vp = v->a_vp; uio = v->a_uio; ioflag = v->a_ioflag; error = 0; node = VP_TO_TMPFS_NODE(vp); oldsize = node->tn_size; if (uio->uio_offset < 0 || vp->v_type != VREG) return (EINVAL); if (uio->uio_resid == 0) return (0); if (ioflag & IO_APPEND) uio->uio_offset = node->tn_size; if (uio->uio_offset + uio->uio_resid > VFS_TO_TMPFS(vp->v_mount)->tm_maxfilesize) return (EFBIG); if (vn_rlimit_fsize(vp, uio, uio->uio_td)) return (EFBIG); extended = uio->uio_offset + uio->uio_resid > node->tn_size; if (extended) { error = tmpfs_reg_resize(vp, uio->uio_offset + uio->uio_resid, FALSE); if (error != 0) goto out; } error = uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio); node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_MODIFIED | (extended ? TMPFS_NODE_CHANGED : 0); if (node->tn_mode & (S_ISUID | S_ISGID)) { if (priv_check_cred(v->a_cred, PRIV_VFS_RETAINSUGID, 0)) node->tn_mode &= ~(S_ISUID | S_ISGID); } if (error != 0) (void)tmpfs_reg_resize(vp, oldsize, TRUE); out: MPASS(IMPLIES(error == 0, uio->uio_resid == 0)); MPASS(IMPLIES(error != 0, oldsize == node->tn_size)); return (error); }
/* * Destroys the association between the vnode vp and the node it * references. */ void tmpfs_free_vp(struct vnode *vp) { struct tmpfs_node *node; node = VP_TO_TMPFS_NODE(vp); TMPFS_NODE_LOCK(node); KKASSERT(lockinuse(TMPFS_NODE_MTX(node))); node->tn_vnode = NULL; vp->v_data = NULL; TMPFS_NODE_UNLOCK(node); }
int tmpfs_spec_close(void *v) { struct vop_close_args /* { struct vnode *a_vp; int a_fflag; kauth_cred_t a_cred; } */ *ap = v; struct vnode *vp = ap->a_vp; tmpfs_update(VP_TO_TMPFS_NODE(vp), NULL); return (spec_close(ap)); }
static int tmpfs_remove(struct vop_remove_args *v) { struct vnode *dvp = v->a_dvp; struct vnode *vp = v->a_vp; int error; struct tmpfs_dirent *de; struct tmpfs_mount *tmp; struct tmpfs_node *dnode; struct tmpfs_node *node; MPASS(VOP_ISLOCKED(dvp)); MPASS(VOP_ISLOCKED(vp)); if (vp->v_type == VDIR) { error = EISDIR; goto out; } dnode = VP_TO_TMPFS_DIR(dvp); node = VP_TO_TMPFS_NODE(vp); tmp = VFS_TO_TMPFS(vp->v_mount); de = tmpfs_dir_lookup(dnode, node, v->a_cnp); MPASS(de != NULL); /* Files marked as immutable or append-only cannot be deleted. */ if ((node->tn_flags & (IMMUTABLE | APPEND | NOUNLINK)) || (dnode->tn_flags & APPEND)) { error = EPERM; goto out; } /* Remove the entry from the directory; as it is a file, we do not * have to change the number of hard links of the directory. */ tmpfs_dir_detach(dvp, de); if (v->a_cnp->cn_flags & DOWHITEOUT) tmpfs_dir_whiteout_add(dvp, v->a_cnp); /* Free the directory entry we just deleted. Note that the node * referred by it will not be removed until the vnode is really * reclaimed. */ tmpfs_free_dirent(tmp, de, TRUE); node->tn_status |= TMPFS_NODE_ACCESSED | TMPFS_NODE_CHANGED; error = 0; out: return error; }
int tmpfs_setattr(struct vop_setattr_args *v) { struct vnode *vp = v->a_vp; struct vattr *vap = v->a_vap; struct ucred *cred = v->a_cred; struct tmpfs_node *node = VP_TO_TMPFS_NODE(vp); int error = 0; int kflags = 0; if (error == 0 && (vap->va_flags != VNOVAL)) { error = tmpfs_chflags(vp, vap->va_flags, cred); kflags |= NOTE_ATTRIB; } if (error == 0 && (vap->va_size != VNOVAL)) { if (vap->va_size > node->tn_size) kflags |= NOTE_WRITE | NOTE_EXTEND; else kflags |= NOTE_WRITE; error = tmpfs_chsize(vp, vap->va_size, cred); } if (error == 0 && (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL)) { error = tmpfs_chown(vp, vap->va_uid, vap->va_gid, cred); kflags |= NOTE_ATTRIB; } if (error == 0 && (vap->va_mode != (mode_t)VNOVAL)) { error = tmpfs_chmod(vp, vap->va_mode, cred); kflags |= NOTE_ATTRIB; } if (error == 0 && ((vap->va_atime.tv_sec != VNOVAL && vap->va_atime.tv_nsec != VNOVAL) || (vap->va_mtime.tv_sec != VNOVAL && vap->va_mtime.tv_nsec != VNOVAL) )) { error = tmpfs_chtimes(vp, &vap->va_atime, &vap->va_mtime, vap->va_vaflags, cred); kflags |= NOTE_ATTRIB; } /* Update the node times. We give preference to the error codes * generated by this function rather than the ones that may arise * from tmpfs_update. */ tmpfs_update(vp); tmpfs_knote(vp, kflags); return error; }
int tmpfs_spec_write(void *v) { struct vop_write_args /* { struct vnode *a_vp; struct uio *a_uio; int a_ioflag; kauth_cred_t a_cred; } */ *ap = v; struct vnode *vp = ap->a_vp; tmpfs_update(VP_TO_TMPFS_NODE(vp), TMPFS_NODE_MODIFIED); return (spec_write(ap)); }
int tmpfs_fifo_write(void *v) { struct vop_write_args /* { struct vnode *a_vp; struct uio *a_uio; int a_ioflag; kauth_cred_t a_cred; } */ *ap = v; vnode_t *vp = ap->a_vp; VP_TO_TMPFS_NODE(vp)->tn_status |= TMPFS_NODE_MODIFIED; return VOCALL(fifo_vnodeop_p, VOFFSET(vop_write), v); }
static int tmpfs_inactive(struct vop_inactive_args *v) { struct vnode *vp; struct tmpfs_node *node; vp = v->a_vp; node = VP_TO_TMPFS_NODE(vp); if (node->tn_links == 0) vrecycle(vp); else tmpfs_check_mtime(vp); return (0); }
int tmpfs_spec_read(void *v) { struct vop_read_args /* { struct vnode *a_vp; struct uio *a_uio; int a_ioflag; kauth_cred_t a_cred; } */ *ap = v; struct vnode *vp = ap->a_vp; VP_TO_TMPFS_NODE(vp)->tn_status |= TMPFS_NODE_ACCESSED; return (spec_read(ap)); }
int tmpfs_fifo_write(void *v) { struct vop_write_args /* { struct vnode *a_vp; struct uio *a_uio; int a_ioflag; kauth_cred_t a_cred; } */ *ap = v; struct vnode *vp = ap->a_vp; VP_TO_TMPFS_NODE(vp)->tn_status |= TMPFS_NODE_MODIFIED; return (fifo_write(v)); }
static int tmpfs_link(struct vop_link_args *v) { struct vnode *dvp = v->a_tdvp; struct vnode *vp = v->a_vp; struct componentname *cnp = v->a_cnp; int error; struct tmpfs_dirent *de; struct tmpfs_node *node; MPASS(VOP_ISLOCKED(dvp)); MPASS(cnp->cn_flags & HASBUF); MPASS(dvp != vp); /* XXX When can this be false? */ node = VP_TO_TMPFS_NODE(vp); /* Ensure that we do not overflow the maximum number of links imposed * by the system. */ MPASS(node->tn_links <= LINK_MAX); if (node->tn_links == LINK_MAX) { error = EMLINK; goto out; } /* We cannot create links of files marked immutable or append-only. */ if (node->tn_flags & (IMMUTABLE | APPEND)) { error = EPERM; goto out; } /* Allocate a new directory entry to represent the node. */ error = tmpfs_alloc_dirent(VFS_TO_TMPFS(vp->v_mount), node, cnp->cn_nameptr, cnp->cn_namelen, &de); if (error != 0) goto out; /* Insert the new directory entry into the appropriate directory. */ if (cnp->cn_flags & ISWHITEOUT) tmpfs_dir_whiteout_remove(dvp, cnp); tmpfs_dir_attach(dvp, de); /* vp link count has changed, so update node times. */ node->tn_status |= TMPFS_NODE_CHANGED; tmpfs_update(vp); error = 0; out: return error; }
static int tmpfs_vptofh(struct vop_vptofh_args *ap) { struct tmpfs_fid *tfhp; struct tmpfs_node *node; tfhp = (struct tmpfs_fid *)ap->a_fhp; node = VP_TO_TMPFS_NODE(ap->a_vp); tfhp->tf_len = sizeof(struct tmpfs_fid); tfhp->tf_id = node->tn_id; tfhp->tf_gen = node->tn_gen; return (0); }
/* * Change size of the given vnode. * Caller should execute tmpfs_update on vp after a successful execution. * The vnode must be locked on entry and remain locked on exit. */ int tmpfs_chsize(struct vnode *vp, u_quad_t size, struct ucred *cred) { int error; struct tmpfs_node *node; KKASSERT(vn_islocked(vp)); node = VP_TO_TMPFS_NODE(vp); /* Decide whether this is a valid operation based on the file type. */ error = 0; switch (vp->v_type) { case VDIR: return EISDIR; case VREG: if (vp->v_mount->mnt_flag & MNT_RDONLY) return EROFS; break; case VBLK: /* FALLTHROUGH */ case VCHR: /* FALLTHROUGH */ case VFIFO: /* Allow modifications of special files even if in the file * system is mounted read-only (we are not modifying the * files themselves, but the objects they represent). */ return 0; default: /* Anything else is unsupported. */ return EOPNOTSUPP; } /* Immutable or append-only files cannot be modified, either. */ if (node->tn_flags & (IMMUTABLE | APPEND)) return EPERM; error = tmpfs_truncate(vp, size); /* tmpfs_truncate will raise the NOTE_EXTEND and NOTE_ATTRIB kevents * for us, as will update tn_status; no need to do that here. */ KKASSERT(vn_islocked(vp)); return error; }
static int tmpfs_close(struct vop_close_args *v) { struct vnode *vp = v->a_vp; struct tmpfs_node *node; node = VP_TO_TMPFS_NODE(vp); if (node->tn_links > 0) { /* Update node times. No need to do it if the node has * been deleted, because it will vanish after we return. */ tmpfs_update(vp); } return vop_stdclose(v); }
static int tmpfs_inactive(struct vop_inactive_args *v) { struct vnode *vp = v->a_vp; struct tmpfs_node *node; MPASS(VOP_ISLOCKED(vp)); node = VP_TO_TMPFS_NODE(vp); if (node->tn_links == 0) vrecycle(vp); return 0; }
/* * The strategy function is typically only called when memory pressure * forces the system to attempt to pageout pages. It can also be called * by [n]vtruncbuf() when a truncation cuts a page in half. Normal write * operations */ static int tmpfs_strategy(struct vop_strategy_args *ap) { struct bio *bio = ap->a_bio; struct bio *nbio; struct buf *bp = bio->bio_buf; struct vnode *vp = ap->a_vp; struct tmpfs_node *node; vm_object_t uobj; vm_page_t m; int i; if (vp->v_type != VREG) { bp->b_resid = bp->b_bcount; bp->b_flags |= B_ERROR | B_INVAL; bp->b_error = EINVAL; biodone(bio); return(0); } lwkt_gettoken(&vp->v_mount->mnt_token); node = VP_TO_TMPFS_NODE(vp); uobj = node->tn_reg.tn_aobj; /* * Don't bother flushing to swap if there is no swap, just * ensure that the pages are marked as needing a commit (still). */ if (bp->b_cmd == BUF_CMD_WRITE && vm_swap_size == 0) { for (i = 0; i < bp->b_xio.xio_npages; ++i) { m = bp->b_xio.xio_pages[i]; vm_page_need_commit(m); } bp->b_resid = 0; bp->b_error = 0; biodone(bio); } else { nbio = push_bio(bio); nbio->bio_done = tmpfs_strategy_done; nbio->bio_offset = bio->bio_offset; swap_pager_strategy(uobj, nbio); } lwkt_reltoken(&vp->v_mount->mnt_token); return 0; }
static int tmpfs_read(struct vop_read_args *v) { struct vnode *vp; struct uio *uio; struct tmpfs_node *node; vp = v->a_vp; if (vp->v_type != VREG) return (EISDIR); uio = v->a_uio; if (uio->uio_offset < 0) return (EINVAL); node = VP_TO_TMPFS_NODE(vp); node->tn_status |= TMPFS_NODE_ACCESSED; return (uiomove_object(node->tn_reg.tn_aobj, node->tn_size, uio)); }
int tmpfs_access(struct vop_access_args *v) { struct vnode *vp = v->a_vp; int error; struct tmpfs_node *node; node = VP_TO_TMPFS_NODE(vp); switch (vp->v_type) { case VDIR: /* FALLTHROUGH */ case VLNK: /* FALLTHROUGH */ case VREG: if ((v->a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) { error = EROFS; goto out; } break; case VBLK: /* FALLTHROUGH */ case VCHR: /* FALLTHROUGH */ case VSOCK: /* FALLTHROUGH */ case VFIFO: break; default: error = EINVAL; goto out; } if ((v->a_mode & VWRITE) && (node->tn_flags & IMMUTABLE)) { error = EPERM; goto out; } error = vop_helper_access(v, node->tn_uid, node->tn_gid, node->tn_mode, 0); out: return error; }
static int tmpfs_read(struct vop_read_args *v) { struct vnode *vp = v->a_vp; struct uio *uio = v->a_uio; struct tmpfs_node *node; vm_object_t uobj; size_t len; int resid; int error = 0; node = VP_TO_TMPFS_NODE(vp); if (vp->v_type != VREG) { error = EISDIR; goto out; } if (uio->uio_offset < 0) { error = EINVAL; goto out; } node->tn_status |= TMPFS_NODE_ACCESSED; uobj = node->tn_reg.tn_aobj; while ((resid = uio->uio_resid) > 0) { error = 0; if (node->tn_size <= uio->uio_offset) break; len = MIN(node->tn_size - uio->uio_offset, resid); if (len == 0) break; error = tmpfs_mappedread(vp->v_object, uobj, len, uio); if ((error != 0) || (resid == uio->uio_resid)) break; } out: return error; }
/* * fsync is usually a NOP, but we must take action when unmounting or * when recycling. */ static int tmpfs_fsync(struct vop_fsync_args *v) { struct tmpfs_node *node; struct vnode *vp = v->a_vp; node = VP_TO_TMPFS_NODE(vp); tmpfs_update(vp); if (vp->v_type == VREG) { if (vp->v_flag & VRECLAIMED) { if (node->tn_links == 0) tmpfs_truncate(vp, 0); else vfsync(v->a_vp, v->a_waitfor, 1, NULL, NULL); } } return 0; }
static int tmpfs_inactive(struct vop_inactive_args *v) { struct vnode *vp = v->a_vp; struct tmpfs_node *node; struct mount *mp; mp = vp->v_mount; lwkt_gettoken(&mp->mnt_token); node = VP_TO_TMPFS_NODE(vp); /* * Degenerate case */ if (node == NULL) { vrecycle(vp); lwkt_reltoken(&mp->mnt_token); return(0); } /* * Get rid of unreferenced deleted vnodes sooner rather than * later so the data memory can be recovered immediately. * * We must truncate the vnode to prevent the normal reclamation * path from flushing the data for the removed file to disk. */ TMPFS_NODE_LOCK(node); if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 && node->tn_links == 0) { node->tn_vpstate = TMPFS_VNODE_DOOMED; TMPFS_NODE_UNLOCK(node); if (node->tn_type == VREG) tmpfs_truncate(vp, 0); vrecycle(vp); } else { TMPFS_NODE_UNLOCK(node); } lwkt_reltoken(&mp->mnt_token); return 0; }
static int tmpfs_readlink(struct vop_readlink_args *v) { struct vnode *vp = v->a_vp; struct uio *uio = v->a_uio; int error; struct tmpfs_node *node; MPASS(uio->uio_offset == 0); MPASS(vp->v_type == VLNK); node = VP_TO_TMPFS_NODE(vp); error = uiomove(node->tn_link, MIN(node->tn_size, uio->uio_resid), uio); node->tn_status |= TMPFS_NODE_ACCESSED; return error; }
static int tmpfs_fifo_kqfilter(struct vop_kqfilter_args *ap) { struct vnode *vp; struct tmpfs_node *node; vp = ap->a_vp; node = VP_TO_TMPFS_NODE(vp); switch (ap->a_kn->kn_filter){ case EVFILT_READ: node->tn_status |= TMPFS_NODE_ACCESSED; break; case EVFILT_WRITE: node->tn_status |= TMPFS_NODE_MODIFIED; break; } return fifo_specops.vop_kqfilter(ap); }
/* * Change ownership of the given vnode. At least one of uid or gid must * be different than VNOVAL. If one is set to that value, the attribute * is unchanged. * Caller should execute tmpfs_update on vp after a successful execution. * The vnode must be locked on entry and remain locked on exit. */ int tmpfs_chown(struct vnode *vp, uid_t uid, gid_t gid, struct ucred *cred) { mode_t cur_mode; uid_t cur_uid; gid_t cur_gid; struct tmpfs_node *node; int error; KKASSERT(vn_islocked(vp)); node = VP_TO_TMPFS_NODE(vp); /* Disallow this operation if the file system is mounted read-only. */ if (vp->v_mount->mnt_flag & MNT_RDONLY) return EROFS; /* Immutable or append-only files cannot be modified, either. */ if (node->tn_flags & (IMMUTABLE | APPEND)) return EPERM; cur_uid = node->tn_uid; cur_gid = node->tn_gid; cur_mode = node->tn_mode; error = vop_helper_chown(vp, uid, gid, cred, &cur_uid, &cur_gid, &cur_mode); if (error == 0) { TMPFS_NODE_LOCK(node); if (cur_uid != node->tn_uid || cur_gid != node->tn_gid || cur_mode != node->tn_mode) { node->tn_uid = cur_uid; node->tn_gid = cur_gid; node->tn_mode = cur_mode; node->tn_status |= TMPFS_NODE_CHANGED; } TMPFS_NODE_UNLOCK(node); } return error; }
int tmpfs_getattr(struct vop_getattr_args *v) { struct vnode *vp = v->a_vp; struct vattr *vap = v->a_vap; struct tmpfs_node *node; node = VP_TO_TMPFS_NODE(vp); lwkt_gettoken(&vp->v_mount->mnt_token); tmpfs_update(vp); vap->va_type = vp->v_type; vap->va_mode = node->tn_mode; vap->va_nlink = node->tn_links; vap->va_uid = node->tn_uid; vap->va_gid = node->tn_gid; vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; vap->va_fileid = node->tn_id; vap->va_size = node->tn_size; vap->va_blocksize = PAGE_SIZE; vap->va_atime.tv_sec = node->tn_atime; vap->va_atime.tv_nsec = node->tn_atimensec; vap->va_mtime.tv_sec = node->tn_mtime; vap->va_mtime.tv_nsec = node->tn_mtimensec; vap->va_ctime.tv_sec = node->tn_ctime; vap->va_ctime.tv_nsec = node->tn_ctimensec; vap->va_gen = node->tn_gen; vap->va_flags = node->tn_flags; if (vp->v_type == VBLK || vp->v_type == VCHR) { vap->va_rmajor = umajor(node->tn_rdev); vap->va_rminor = uminor(node->tn_rdev); } vap->va_bytes = round_page(node->tn_size); vap->va_filerev = 0; lwkt_reltoken(&vp->v_mount->mnt_token); return 0; }
static int tmpfs_print(struct vop_print_args *v) { struct vnode *vp = v->a_vp; struct tmpfs_node *node; node = VP_TO_TMPFS_NODE(vp); kprintf("tag VT_TMPFS, tmpfs_node %p, flags 0x%x, links %d\n", node, node->tn_flags, node->tn_links); kprintf("\tmode 0%o, owner %d, group %d, size %ju, status 0x%x\n", node->tn_mode, node->tn_uid, node->tn_gid, (uintmax_t)node->tn_size, node->tn_status); if (vp->v_type == VFIFO) fifo_printinfo(vp); kprintf("\n"); return 0; }
int tmpfs_getattr(struct vop_getattr_args *v) { struct vnode *vp = v->a_vp; struct vattr *vap = v->a_vap; vm_object_t obj; struct tmpfs_node *node; node = VP_TO_TMPFS_NODE(vp); tmpfs_update(vp); vap->va_type = vp->v_type; vap->va_mode = node->tn_mode; vap->va_nlink = node->tn_links; vap->va_uid = node->tn_uid; vap->va_gid = node->tn_gid; vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; vap->va_fileid = node->tn_id; vap->va_size = node->tn_size; vap->va_blocksize = PAGE_SIZE; vap->va_atime = node->tn_atime; vap->va_mtime = node->tn_mtime; vap->va_ctime = node->tn_ctime; vap->va_birthtime = node->tn_birthtime; vap->va_gen = node->tn_gen; vap->va_flags = node->tn_flags; vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ? node->tn_rdev : NODEV; if (vp->v_type == VREG) { obj = node->tn_reg.tn_aobj; vap->va_bytes = (u_quad_t)obj->resident_page_count * PAGE_SIZE; } else vap->va_bytes = node->tn_size; vap->va_filerev = 0; return 0; }
int tmpfs_reclaim(struct vop_reclaim_args *v) { struct vnode *vp = v->a_vp; struct tmpfs_mount *tmp; struct tmpfs_node *node; struct mount *mp; mp = vp->v_mount; lwkt_gettoken(&mp->mnt_token); node = VP_TO_TMPFS_NODE(vp); tmp = VFS_TO_TMPFS(vp->v_mount); KKASSERT(mp == tmp->tm_mount); tmpfs_free_vp(vp); /* * If the node referenced by this vnode was deleted by the * user, we must free its associated data structures now that * the vnode is being reclaimed. * * Directories have an extra link ref. */ TMPFS_NODE_LOCK(node); if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 && node->tn_links == 0) { node->tn_vpstate = TMPFS_VNODE_DOOMED; tmpfs_free_node(tmp, node); /* eats the lock */ } else { TMPFS_NODE_UNLOCK(node); } lwkt_reltoken(&mp->mnt_token); KKASSERT(vp->v_data == NULL); return 0; }