static int zpl_xattr_get(struct inode *ip, const char *name, void *value, size_t size) { znode_t *zp = ITOZ(ip); cred_t *cr = CRED(); int error; crhold(cr); rw_enter(&zp->z_xattr_lock, RW_READER); error = __zpl_xattr_get(ip, name, value, size, cr); rw_exit(&zp->z_xattr_lock); crfree(cr); return (error); }
static ssize_t zpl_iter_write_common(struct kiocb *kiocb, const struct iovec *iovp, unsigned long nr_segs, size_t count, uio_seg_t seg, size_t skip) { cred_t *cr = CRED(); struct file *filp = kiocb->ki_filp; ssize_t wrote; crhold(cr); wrote = zpl_write_common_iovec(filp->f_mapping->host, iovp, count, nr_segs, &kiocb->ki_pos, seg, filp->f_flags, cr, skip); crfree(cr); return (wrote); }
int zpl_snapdir_rename(struct inode *sdip, struct dentry *sdentry, struct inode *tdip, struct dentry *tdentry) { cred_t *cr = CRED(); int error; crhold(cr); error = -zfsctl_snapdir_rename(sdip, dname(sdentry), tdip, dname(tdentry), cr, 0); ASSERT3S(error, <=, 0); crfree(cr); return (error); }
/* * Get the "ucred" of a process. */ struct ucred_s * pgetucred(proc_t *p) { cred_t *cr; struct ucred_s *uc; mutex_enter(&p->p_crlock); cr = p->p_cred; crhold(cr); mutex_exit(&p->p_crlock); uc = cred2ucred(cr, p->p_pid, NULL, CRED()); crfree(cr); return (uc); }
/* * When relabeling a process, call out to the policies for the maximum * permission allowed for each object type we know about in its memory space, * and revoke access (in the least surprising ways we know) when necessary. * The process lock is not held here. */ void mac_proc_vm_revoke(struct thread *td) { struct ucred *cred; PROC_LOCK(td->td_proc); cred = crhold(td->td_proc->p_ucred); PROC_UNLOCK(td->td_proc); /* XXX freeze all other threads */ mac_proc_vm_revoke_recurse(td, cred, &td->td_proc->p_vmspace->vm_map); /* XXX allow other threads to continue */ crfree(cred); }
static int zpl_release(struct inode *ip, struct file *filp) { cred_t *cr = CRED(); int error; if (ITOZ(ip)->z_atime_dirty) zfs_mark_inode_dirty(ip); crhold(cr); error = -zfs_close(ip, filp->f_flags, cr); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
static int zpl_sync_fs(struct super_block *sb, int wait) { fstrans_cookie_t cookie; cred_t *cr = CRED(); int error; crhold(cr); cookie = spl_fstrans_mark(); error = -zfs_sync(sb, wait, cr); spl_fstrans_unmark(cookie); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
static int zpl_rmdir(struct inode * dir, struct dentry *dentry) { cred_t *cr = CRED(); int error; fstrans_cookie_t cookie; crhold(cr); cookie = spl_fstrans_mark(); error = -zfs_rmdir(dir, dname(dentry), NULL, cr, 0); spl_fstrans_unmark(cookie); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
static int zpl_commit_metadata(struct inode *inode) { cred_t *cr = CRED(); fstrans_cookie_t cookie; int error; crhold(cr); cookie = spl_fstrans_mark(); error = -zfs_fsync(inode, 0, cr); spl_fstrans_unmark(cookie); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
/* * Linux 2.6.x - 2.6.34 API, * Through 2.6.34 the nfsd kernel server would pass a NULL 'file struct *' * to the fops->fsync() hook. For this reason, we must be careful not to * use filp unconditionally. */ static int zpl_fsync(struct file *filp, struct dentry *dentry, int datasync) { cred_t *cr = CRED(); int error; fstrans_cookie_t cookie; crhold(cr); cookie = spl_fstrans_mark(); error = -zfs_fsync(dentry->d_inode, datasync, cr); spl_fstrans_unmark(cookie); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
static int zpl_iterate(struct file *filp, struct dir_context *ctx) { cred_t *cr = CRED(); int error; fstrans_cookie_t cookie; crhold(cr); cookie = spl_fstrans_mark(); error = -zfs_readdir(file_inode(filp), ctx, cr); spl_fstrans_unmark(cookie); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
static struct dentry *afs_export_get_dentry(struct super_block *sb, void *inump) { struct dentry *dp; cred_t *credp; credp = crref(); AFS_GLOCK(); dp = get_dentry_from_fid(credp, inump); AFS_GUNLOCK(); crfree(credp); return dp; }
static int zpl_xattr_set(struct inode *ip, const char *name, const void *value, size_t size, int flags) { znode_t *zp = ITOZ(ip); zfs_sb_t *zsb = ZTOZSB(zp); cred_t *cr = CRED(); int error; crhold(cr); rw_enter(&ITOZ(ip)->z_xattr_lock, RW_WRITER); /* * Before setting the xattr check to see if it already exists. * This is done to ensure the following optional flags are honored. * * XATTR_CREATE: fail if xattr already exists * XATTR_REPLACE: fail if xattr does not exist */ error = __zpl_xattr_get(ip, name, NULL, 0, cr); if (error < 0) { if (error != -ENODATA) goto out; if ((error == -ENODATA) && (flags & XATTR_REPLACE)) goto out; } else { error = -EEXIST; if (flags & XATTR_CREATE) goto out; } /* Preferentially store the xattr as a SA for better performance */ if (zsb->z_use_sa && zsb->z_xattr_sa && zp->z_is_sa) { error = zpl_xattr_set_sa(ip, name, value, size, flags, cr); if (error == 0) goto out; } error = zpl_xattr_set_dir(ip, name, value, size, flags, cr); out: rw_exit(&ITOZ(ip)->z_xattr_lock); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
/* afs_root - stat the root of the file system. AFS global held on entry. */ static int afs_root(struct super_block *afsp) { afs_int32 code = 0; struct vrequest treq; struct vcache *tvp = 0; AFS_STATCNT(afs_root); if (afs_globalVp && (afs_globalVp->f.states & CStatd)) { tvp = afs_globalVp; } else { cred_t *credp = crref(); if (afs_globalVp) { afs_PutVCache(afs_globalVp); afs_globalVp = NULL; } if (!(code = afs_InitReq(&treq, credp)) && !(code = afs_CheckInit())) { tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL); if (tvp) { struct inode *ip = AFSTOV(tvp); struct vattr vattr; afs_getattr(tvp, &vattr, credp); afs_fill_inode(ip, &vattr); /* setup super_block and mount point inode. */ afs_globalVp = tvp; #if defined(HAVE_LINUX_D_MAKE_ROOT) afsp->s_root = d_make_root(ip); #else afsp->s_root = d_alloc_root(ip); #endif #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP) afsp->s_root->d_op = &afs_dentry_operations; #endif } else code = ENOENT; } crfree(credp); } afs_Trace2(afs_iclSetp, CM_TRACE_VFSROOT, ICL_TYPE_POINTER, afs_globalVp, ICL_TYPE_INT32, code); return code; }
static struct dentry * zpl_get_parent(struct dentry *child) { cred_t *cr = CRED(); struct inode *ip; int error; crhold(cr); error = -zfs_lookup(child->d_inode, "..", &ip, 0, cr, NULL, NULL); crfree(cr); ASSERT3S(error, <=, 0); if (error) return ERR_PTR(error); return zpl_dentry_obtain_alias(ip); }
static int zpl_rename(struct inode *sdip, struct dentry *sdentry, struct inode *tdip, struct dentry *tdentry) { cred_t *cr = CRED(); int error; fstrans_cookie_t cookie; crhold(cr); cookie = spl_fstrans_mark(); error = -zfs_rename(sdip, dname(sdentry), tdip, dname(tdentry), cr, 0); spl_fstrans_unmark(cookie); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
const char * zpl_follow_link(struct dentry *dentry, void **symlink_cookie) #endif { cred_t *cr = CRED(); struct inode *ip = dentry->d_inode; struct iovec iov; uio_t uio; char *link; int error; fstrans_cookie_t cookie; crhold(cr); iov.iov_len = MAXPATHLEN; iov.iov_base = link = kmem_zalloc(MAXPATHLEN, KM_SLEEP); uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_skip = 0; uio.uio_resid = (MAXPATHLEN - 1); uio.uio_segflg = UIO_SYSSPACE; cookie = spl_fstrans_mark(); error = -zfs_readlink(ip, &uio, cr); spl_fstrans_unmark(cookie); if (error) kmem_free(link, MAXPATHLEN); crfree(cr); #ifdef HAVE_FOLLOW_LINK_NAMEIDATA if (error) nd_set_link(nd, ERR_PTR(error)); else nd_set_link(nd, link); return (NULL); #else if (error) return (ERR_PTR(error)); else return (*symlink_cookie = link); #endif }
static int zpl_iterate(struct file *filp, struct dir_context *ctx) { struct dentry *dentry = filp->f_path.dentry; cred_t *cr = CRED(); int error; fstrans_cookie_t cookie; crhold(cr); cookie = spl_fstrans_mark(); error = -zfs_readdir(dentry->d_inode, ctx, cr); spl_fstrans_unmark(cookie); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
/* * Linux 2.6.35 - 3.0 API, * As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed * redundant. The dentry is still accessible via filp->f_path.dentry, * and we are guaranteed that filp will never be NULL. */ static int zpl_fsync(struct file *filp, int datasync) { struct inode *inode = filp->f_mapping->host; cred_t *cr = CRED(); int error; fstrans_cookie_t cookie; crhold(cr); cookie = spl_fstrans_mark(); error = -zfs_fsync(inode, datasync, cr); spl_fstrans_unmark(cookie); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
/* * Passively intercepts the thread switch function to increase the thread * priority from a user priority to a kernel priority, reducing * syscall and trap overhead for the case where no switch occurs. * * Synchronizes td_ucred with p_ucred. This is used by system calls, * signal handling, faults, AST traps, and anything else that enters the * kernel from userland and provides the kernel with a stable read-only * copy of the process ucred. */ static __inline void userenter(struct thread *curtd, struct proc *curp) { struct ucred *ocred; struct ucred *ncred; curtd->td_release = lwkt_passive_release; if (curtd->td_ucred != curp->p_ucred) { ncred = crhold(curp->p_ucred); ocred = curtd->td_ucred; curtd->td_ucred = ncred; if (ocred) crfree(ocred); } }
static void sodealloc(struct socket *so) { if (so->so_rcv.ssb_hiwat) (void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_rcv.ssb_hiwat, 0, RLIM_INFINITY); if (so->so_snd.ssb_hiwat) (void)chgsbsize(so->so_cred->cr_uidinfo, &so->so_snd.ssb_hiwat, 0, RLIM_INFINITY); #ifdef INET /* remove accept filter if present */ if (so->so_accf != NULL) do_setopt_accept_filter(so, NULL); #endif /* INET */ crfree(so->so_cred); kfree(so, M_SOCKET); }
static int zpl_open(struct inode *ip, struct file *filp) { cred_t *cr = CRED(); int error; error = generic_file_open(ip, filp); if (error) return (error); crhold(cr); error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
static ssize_t zpl_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) { cred_t *cr = CRED(); ssize_t read; crhold(cr); read = zpl_read_common(filp->f_mapping->host, buf, len, *ppos, UIO_USERSPACE, filp->f_flags, cr); crfree(cr); if (read < 0) return (read); *ppos += read; return (read); }
static ssize_t zpl_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos) { cred_t *cr = CRED(); ssize_t wrote; crhold(cr); wrote = zpl_write_common(filp->f_mapping->host, buf, len, *ppos, UIO_USERSPACE, filp->f_flags, cr); crfree(cr); if (wrote < 0) return (wrote); *ppos += wrote; return (wrote); }
long zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len) { int error = -EOPNOTSUPP; #if defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE) cred_t *cr = CRED(); flock64_t bf; loff_t olen; fstrans_cookie_t cookie; if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return (error); crhold(cr); if (offset < 0 || len <= 0) return (-EINVAL); spl_inode_lock(ip); olen = i_size_read(ip); if (offset > olen) { spl_inode_unlock(ip); return (0); } if (offset + len > olen) len = olen - offset; bf.l_type = F_WRLCK; bf.l_whence = 0; bf.l_start = offset; bf.l_len = len; bf.l_pid = 0; cookie = spl_fstrans_mark(); error = -zfs_space(ip, F_FREESP, &bf, FWRITE, offset, cr); spl_fstrans_unmark(cookie); spl_inode_unlock(ip); crfree(cr); #endif /* defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE) */ ASSERT3S(error, <=, 0); return (error); }
static void clnt_reconnect_destroy(CLIENT *cl) { struct rc_data *rc = (struct rc_data *)cl->cl_private; SVCXPRT *xprt; if (rc->rc_client) CLNT_DESTROY(rc->rc_client); if (rc->rc_backchannel) { xprt = (SVCXPRT *)rc->rc_backchannel; xprt_unregister(xprt); SVC_RELEASE(xprt); } crfree(rc->rc_ucred); mtx_destroy(&rc->rc_lock); mem_free(rc, sizeof(*rc)); mem_free(cl, sizeof (CLIENT)); }
/* * Linux 3.1 - 3.x API, * As of 3.1 the responsibility to call filemap_write_and_wait_range() has * been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex * lock is no longer held by the caller, for zfs we don't require the lock * to be held so we don't acquire it. */ static int zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync) { struct inode *inode = filp->f_mapping->host; cred_t *cr = CRED(); int error; error = filemap_write_and_wait_range(inode->i_mapping, start, end); if (error) return (error); crhold(cr); error = -zfs_fsync(inode, datasync, cr); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
/* * Release a reference and free on the last one. */ static void filemon_release(struct filemon *filemon) { if (refcount_release(&filemon->refcnt) == 0) return; /* * There are valid cases of releasing while locked, such as in * filemon_untrack_processes, but none which are done where there * is not at least 1 reference remaining. */ sx_assert(&filemon->lock, SA_UNLOCKED); if (filemon->cred != NULL) crfree(filemon->cred); sx_destroy(&filemon->lock); free(filemon, M_FILEMON); }
void vndclear(struct vnd_softc *sc) { struct vnode *vp = sc->sc_vp; struct proc *p = curproc; /* XXX */ DNPRINTF(VDB_FOLLOW, "vndclear(%p): vp %p\n", sc, vp); if (vp == NULL) panic("vndioctl: null vp"); (void) vn_close(vp, VNDRW(sc), sc->sc_cred, p); crfree(sc->sc_cred); sc->sc_flags = 0; sc->sc_vp = NULL; sc->sc_cred = NULL; sc->sc_size = 0; memset(sc->sc_file, 0, sizeof(sc->sc_file)); }
/* * Reclaim an nfsnode so that it can be used for other purposes. */ int nfs_reclaim(struct vop_reclaim_args *ap) { struct vnode *vp = ap->a_vp; struct nfsnode *np = VTONFS(vp); struct nfsdmap *dp, *dp2; /* * If the NLM is running, give it a chance to abort pending * locks. */ if (nfs_reclaim_p) nfs_reclaim_p(ap); /* * Destroy the vm object and flush associated pages. */ vnode_destroy_vobject(vp); vfs_hash_remove(vp); /* * Free up any directory cookie structures and * large file handle structures that might be associated with * this nfs node. */ if (vp->v_type == VDIR) { dp = LIST_FIRST(&np->n_cookies); while (dp) { dp2 = dp; dp = LIST_NEXT(dp, ndm_list); free((caddr_t)dp2, M_NFSDIROFF); } } if (np->n_writecred != NULL) crfree(np->n_writecred); if (np->n_fhsize > NFS_SMALLFH) { free((caddr_t)np->n_fhp, M_NFSBIGFH); } mtx_destroy(&np->n_mtx); uma_zfree(nfsnode_zone, vp->v_data); vp->v_data = NULL; return (0); }