/* * Clean a vnode of filesystem-specific data and prepare it for reuse. */ STATIC int vn_reclaim( struct vnode *vp) { int error; XFS_STATS_INC(vn_reclaim); vn_trace_entry(vp, "vn_reclaim", (inst_t *)__return_address); /* * Only make the VOP_RECLAIM call if there are behaviors * to call. */ if (vp->v_fbhv) { VOP_RECLAIM(vp, error); if (error) return -error; } ASSERT(vp->v_fbhv == NULL); VN_LOCK(vp); vp->v_flag &= (VRECLM|VWAIT); VN_UNLOCK(vp, 0); vp->v_type = VNON; vp->v_fbhv = NULL; #ifdef XFS_VNODE_TRACE ktrace_free(vp->v_trace); vp->v_trace = NULL; #endif return 0; }
/* * Look up an inode by number in the given file system. * The inode is looked up in the cache held in each AG. * If the inode is found in the cache, initialise the vfs inode * if necessary. * * If it is not in core, read it in from the file system's device, * add it to the cache and initialise the vfs inode. * * The inode is locked according to the value of the lock_flags parameter. * This flag parameter indicates how and if the inode's IO lock and inode lock * should be taken. * * mp -- the mount point structure for the current file system. It points * to the inode hash table. * tp -- a pointer to the current transaction if there is one. This is * simply passed through to the xfs_iread() call. * ino -- the number of the inode desired. This is the unique identifier * within the file system for the inode being requested. * lock_flags -- flags indicating how to lock the inode. See the comment * for xfs_ilock() for a list of valid values. */ int xfs_iget( xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint flags, uint lock_flags, xfs_inode_t **ipp) { xfs_inode_t *ip; int error; xfs_perag_t *pag; xfs_agino_t agino; /* reject inode numbers outside existing AGs */ if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) return EINVAL; /* get the perag structure and ensure that it's inode capable */ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); agino = XFS_INO_TO_AGINO(mp, ino); again: error = 0; rcu_read_lock(); ip = radix_tree_lookup(&pag->pag_ici_root, agino); if (ip) { error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); if (error) goto out_error_or_again; } else { rcu_read_unlock(); XFS_STATS_INC(xs_ig_missed); error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, flags, lock_flags); if (error) goto out_error_or_again; } xfs_perag_put(pag); *ipp = ip; /* * If we have a real type for an on-disk inode, we can set ops(&unlock) * now. If it's a new inode being created, xfs_ialloc will handle it. */ if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0) xfs_setup_inode(ip); return 0; out_error_or_again: if (error == EAGAIN) { delay(1); goto again; } xfs_perag_put(pag); return error; }
/* * Read a directory. */ int xfs_readdir( xfs_inode_t *dp, void *dirent, size_t bufsize, xfs_off_t *offset, filldir_t filldir) { int rval; /* return value */ int v; /* type-checking value */ trace_xfs_readdir(dp); if (XFS_FORCED_SHUTDOWN(dp->i_mount)) return XFS_ERROR(EIO); ASSERT(S_ISDIR(dp->i_d.di_mode)); XFS_STATS_INC(xs_dir_getdents); if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) rval = xfs_dir2_sf_getdents(dp, dirent, offset, filldir); else if ((rval = xfs_dir2_isblock(NULL, dp, &v))) ; else if (v) rval = xfs_dir2_block_getdents(dp, dirent, offset, filldir); else rval = xfs_dir2_leaf_getdents(dp, dirent, bufsize, offset, filldir); return rval; }
STATIC ssize_t xfs_file_read_iter( struct kiocb *iocb, struct iov_iter *to) { struct inode *inode = file_inode(iocb->ki_filp); struct xfs_mount *mp = XFS_I(inode)->i_mount; ssize_t ret = 0; XFS_STATS_INC(mp, xs_read_calls); if (XFS_FORCED_SHUTDOWN(mp)) return -EIO; if (IS_DAX(inode)) ret = xfs_file_dax_read(iocb, to); else if (iocb->ki_flags & IOCB_DIRECT) ret = xfs_file_dio_aio_read(iocb, to); else ret = xfs_file_buffered_aio_read(iocb, to); if (ret > 0) XFS_STATS_ADD(mp, xs_read_bytes, ret); return ret; }
STATIC ssize_t xfs_file_splice_read( struct file *infilp, loff_t *ppos, struct pipe_inode_info *pipe, size_t count, unsigned int flags) { struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); int ioflags = 0; ssize_t ret; XFS_STATS_INC(xs_read_calls); if (infilp->f_mode & FMODE_NOCMTIME) ioflags |= IO_INVIS; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -EIO; xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); trace_xfs_file_splice_read(ip, count, *ppos, ioflags); ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); if (ret > 0) XFS_STATS_ADD(xs_read_bytes, ret); xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); return ret; }
STATIC void xfs_qm_dqput_final( struct xfs_dquot *dqp) { struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; struct xfs_dquot *gdqp; trace_xfs_dqput_free(dqp); mutex_lock(&qi->qi_lru_lock); if (list_empty(&dqp->q_lru)) { list_add_tail(&dqp->q_lru, &qi->qi_lru_list); qi->qi_lru_count++; XFS_STATS_INC(xs_qm_dquot_unused); } mutex_unlock(&qi->qi_lru_lock); /* * If we just added a udquot to the freelist, then we want to release * the gdquot reference that it (probably) has. Otherwise it'll keep * the gdquot from getting reclaimed. */ gdqp = dqp->q_gdquot; if (gdqp) { xfs_dqlock(gdqp); dqp->q_gdquot = NULL; } xfs_dqunlock(dqp); /* * If we had a group quota hint, release it now. */ if (gdqp) xfs_qm_dqput(gdqp); }
/* * Get a reference on a vnode. */ vnode_t * vn_get( struct vnode *vp, vmap_t *vmap) { struct inode *inode; XFS_STATS_INC(vn_get); inode = LINVFS_GET_IP(vp); if (inode->i_state & I_FREEING) return NULL; inode = VFS_GET_INODE(vmap->v_vfsp, vmap->v_ino, IGET_NOALLOC); if (!inode) /* Inode not present */ return NULL; /* We do not want to create new inodes via vn_get, * returning NULL here is OK. */ if (inode->i_state & I_NEW) { vn_mark_bad(vp); unlock_new_inode(inode); iput(inode); return NULL; } vn_trace_exit(vp, "vn_get", (inst_t *)__return_address); return vp; }
int xfs_attr_list_int( xfs_attr_list_context_t *context) { int error; xfs_inode_t *dp = context->dp; uint lock_mode; XFS_STATS_INC(dp->i_mount, xs_attr_list); if (XFS_FORCED_SHUTDOWN(dp->i_mount)) return -EIO; /* * Decide on what work routines to call based on the inode size. */ lock_mode = xfs_ilock_attr_map_shared(dp); if (!xfs_inode_hasattr(dp)) { error = 0; } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { error = xfs_attr_shortform_list(context); } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { error = xfs_attr_leaf_list(context); } else { error = xfs_attr_node_list(context); } xfs_iunlock(dp, lock_mode); return error; }
ssize_t xfs_splice_write( bhv_desc_t *bdp, struct pipe_inode_info *pipe, struct file *outfilp, loff_t *ppos, size_t count, int flags, int ioflags, cred_t *credp) { xfs_inode_t *ip = XFS_BHVTOI(bdp); xfs_mount_t *mp = ip->i_mount; ssize_t ret; struct inode *inode = outfilp->f_mapping->host; xfs_fsize_t isize; XFS_STATS_INC(xs_write_calls); if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -EIO; xfs_ilock(ip, XFS_IOLOCK_EXCL); if (DM_EVENT_ENABLED(BHV_TO_VNODE(bdp)->v_vfsp, ip, DM_EVENT_WRITE) && (!(ioflags & IO_INVIS))) { bhv_vrwlock_t locktype = VRWLOCK_WRITE; int error; error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, BHV_TO_VNODE(bdp), *ppos, count, FILP_DELAY_FLAG(outfilp), &locktype); if (error) { xfs_iunlock(ip, XFS_IOLOCK_EXCL); return -error; } } xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, &ip->i_iocore, pipe, count, *ppos, ioflags); ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); if (ret > 0) XFS_STATS_ADD(xs_write_bytes, ret); isize = i_size_read(inode); if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize)) *ppos = isize; if (*ppos > ip->i_d.di_size) { xfs_ilock(ip, XFS_ILOCK_EXCL); if (*ppos > ip->i_d.di_size) { ip->i_d.di_size = *ppos; i_size_write(inode, *ppos); ip->i_update_core = 1; ip->i_update_size = 1; } xfs_iunlock(ip, XFS_ILOCK_EXCL); } xfs_iunlock(ip, XFS_IOLOCK_EXCL); return ret; }
int xfs_iget( xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint flags, uint lock_flags, xfs_inode_t **ipp) { xfs_inode_t *ip; int error; xfs_perag_t *pag; xfs_agino_t agino; ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) return EINVAL; pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); agino = XFS_INO_TO_AGINO(mp, ino); again: error = 0; rcu_read_lock(); ip = radix_tree_lookup(&pag->pag_ici_root, agino); if (ip) { error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); if (error) goto out_error_or_again; } else { rcu_read_unlock(); XFS_STATS_INC(xs_ig_missed); error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, flags, lock_flags); if (error) goto out_error_or_again; } xfs_perag_put(pag); *ipp = ip; if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0) xfs_setup_inode(ip); return 0; out_error_or_again: if (error == EAGAIN) { delay(1); goto again; } xfs_perag_put(pag); return error; }
/* * Remove an entry from a directory. */ int xfs_dir_removename( xfs_trans_t *tp, xfs_inode_t *dp, struct xfs_name *name, xfs_ino_t ino, xfs_fsblock_t *first, /* bmap's firstblock */ xfs_bmap_free_t *flist, /* bmap's freeblock list */ xfs_extlen_t total) /* bmap's total block count */ { struct xfs_da_args *args; int rval; int v; /* type-checking value */ ASSERT(S_ISDIR(dp->i_d.di_mode)); XFS_STATS_INC(xs_dir_remove); args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); if (!args) return -ENOMEM; args->geo = dp->i_mount->m_dir_geo; args->name = name->name; args->namelen = name->len; args->filetype = name->type; args->hashval = dp->i_mount->m_dirnameops->hashname(name); args->inumber = ino; args->dp = dp; args->firstblock = first; args->flist = flist; args->total = total; args->whichfork = XFS_DATA_FORK; args->trans = tp; if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { rval = xfs_dir2_sf_removename(args); goto out_free; } rval = xfs_dir2_isblock(args, &v); if (rval) goto out_free; if (v) { rval = xfs_dir2_block_removename(args); goto out_free; } rval = xfs_dir2_isleaf(args, &v); if (rval) goto out_free; if (v) rval = xfs_dir2_leaf_removename(args); else rval = xfs_dir2_node_removename(args); out_free: kmem_free(args); return rval; }
/* * Implement readdir. */ static int /* error */ xfs_dir_getdents(xfs_trans_t *trans, xfs_inode_t *dp, uio_t *uio, int *eofp) { dirent_t *dbp; int locklen = 0, alignment, retval, is32; xfs_dir_put_t put; int error; XFS_STATS_INC(xs_dir_getdents); ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); /* * If our caller has given us a single contiguous memory buffer, * just work directly within that buffer. If it's in user memory, * lock it down first. */ is32 = ABI_IS_IRIX5(GETDENTS_ABI(get_current_abi(), uio)); alignment = (is32 ? sizeof(xfs32_off_t) : sizeof(xfs_off_t)) - 1; if ((uio->uio_iovcnt == 1) && #if CELL_CAPABLE !KT_CUR_ISXTHREAD() && #endif (((__psint_t)uio->uio_iov[0].iov_base & alignment) == 0) && ((uio->uio_iov[0].iov_len & alignment) == 0)) { dbp = NULL; if (uio->uio_segflag == UIO_SYSSPACE) { put = xfs_dir_put_dirent64_direct; } else { put = is32 ? xfs_dir_put_dirent32_direct : xfs_dir_put_dirent64_direct; } } else { dbp = kmem_alloc(sizeof(*dbp) + MAXNAMELEN, KM_SLEEP); put = is32 ? xfs_dir_put_dirent32_uio : xfs_dir_put_dirent64_uio; } /* * Decide on what work routines to call based on the inode size. */ *eofp = 0; if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { retval = xfs_dir_shortform_getdents(dp, uio, eofp, dbp, put); } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) { retval = xfs_dir_leaf_getdents(trans, dp, uio, eofp, dbp, put); } else { retval = xfs_dir_node_getdents(trans, dp, uio, eofp, dbp, put); } if (dbp != NULL) kmem_free(dbp, sizeof(*dbp) + MAXNAMELEN); return(retval); }
/* Allocate and initialize everything we need for an incore dquot. */ STATIC struct xfs_dquot * xfs_dquot_alloc( struct xfs_mount *mp, xfs_dqid_t id, uint type) { struct xfs_dquot *dqp; dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP); dqp->dq_flags = type; dqp->q_core.d_id = cpu_to_be32(id); dqp->q_mount = mp; INIT_LIST_HEAD(&dqp->q_lru); mutex_init(&dqp->q_qlock); init_waitqueue_head(&dqp->q_pinwait); dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk; /* * Offset of dquot in the (fixed sized) dquot chunk. */ dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) * sizeof(xfs_dqblk_t); /* * Because we want to use a counting completion, complete * the flush completion once to allow a single access to * the flush completion without blocking. */ init_completion(&dqp->q_flush); complete(&dqp->q_flush); /* * Make sure group quotas have a different lock class than user * quotas. */ switch (type) { case XFS_DQ_USER: /* uses the default lock class */ break; case XFS_DQ_GROUP: lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class); break; case XFS_DQ_PROJ: lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class); break; default: ASSERT(0); break; } xfs_qm_dquot_logitem_init(dqp); XFS_STATS_INC(mp, xs_qm_dquot); return dqp; }
ssize_t xfs_sendfile( bhv_desc_t *bdp, struct file *filp, loff_t *offset, int ioflags, size_t count, read_actor_t actor, void *target, cred_t *credp) { ssize_t ret; xfs_fsize_t n; xfs_inode_t *ip; xfs_mount_t *mp; vnode_t *vp; ip = XFS_BHVTOI(bdp); vp = BHV_TO_VNODE(bdp); mp = ip->i_mount; vn_trace_entry(vp, "xfs_sendfile", (inst_t *)__return_address); XFS_STATS_INC(xs_read_calls); n = XFS_MAXIOFFSET(mp) - *offset; if ((n <= 0) || (count == 0)) return 0; if (n < count) count = n; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -EIO; xfs_ilock(ip, XFS_IOLOCK_SHARED); if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) && (!(ioflags & IO_INVIS))) { vrwlock_t locktype = VRWLOCK_READ; int error; error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count, FILP_DELAY_FLAG(filp), &locktype); if (error) { xfs_iunlock(ip, XFS_IOLOCK_SHARED); return -error; } } ret = generic_file_sendfile(filp, offset, count, actor, target); xfs_iunlock(ip, XFS_IOLOCK_SHARED); XFS_STATS_ADD(xs_read_bytes, ret); xfs_ichgtime(ip, XFS_ICHGTIME_ACC); return ret; }
STATIC ssize_t xfs_file_aio_write( struct kiocb *iocb, const struct iovec *iovp, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; struct xfs_inode *ip = XFS_I(inode); ssize_t ret; size_t ocount = 0; XFS_STATS_INC(xs_write_calls); BUG_ON(iocb->ki_pos != pos); ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); if (ret) return ret; if (ocount == 0) return 0; sb_start_write(inode->i_sb); if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { ret = -EIO; goto out; } if (unlikely(file->f_flags & O_DIRECT)) ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount); else ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, ocount); if (ret > 0) { ssize_t err; XFS_STATS_ADD(xs_write_bytes, ret); /* Handle various SYNC-type writes */ err = generic_write_sync(file, pos, ret); if (err < 0) ret = err; } out: sb_end_write(inode->i_sb); return ret; }
STATIC ssize_t xfs_file_splice_from_socket( struct file *file, struct socket *sock, loff_t __user *ppos, size_t count) { struct inode *inode = file->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); ssize_t ret; xfs_fsize_t isize, new_size; XFS_STATS_INC(xs_write_calls); if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -EIO; xfs_ilock(ip, XFS_IOLOCK_EXCL); new_size = *ppos + count; xfs_ilock(ip, XFS_ILOCK_EXCL); if (new_size > ip->i_size) ip->i_new_size = new_size; xfs_iunlock(ip, XFS_ILOCK_EXCL); // xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip, // pipe, count, *ppos, ioflags); ret = generic_splice_from_socket(file, sock, ppos, count); if (ret > 0) XFS_STATS_ADD(xs_write_bytes, ret); isize = i_size_read(inode); if (unlikely(ret < 0 && ret != -EFAULT && *ppos > isize)) *ppos = isize; if (*ppos > ip->i_size) { xfs_ilock(ip, XFS_ILOCK_EXCL); if (*ppos > ip->i_size) ip->i_size = *ppos; xfs_iunlock(ip, XFS_ILOCK_EXCL); } if (ip->i_new_size) { xfs_ilock(ip, XFS_ILOCK_EXCL); ip->i_new_size = 0; if (ip->i_d.di_size > ip->i_size) ip->i_d.di_size = ip->i_size; xfs_iunlock(ip, XFS_ILOCK_EXCL); } xfs_iunlock(ip, XFS_IOLOCK_EXCL); return ret; }
/* * Add a reference to a referenced vnode. */ bhv_vnode_t * vn_hold( bhv_vnode_t *vp) { struct inode *inode; XFS_STATS_INC(vn_hold); inode = igrab(vn_to_inode(vp)); ASSERT(inode); return vp; }
int xfs_dir_lookup( xfs_trans_t *tp, xfs_inode_t *dp, struct xfs_name *name, xfs_ino_t *inum, /* out: inode number */ struct xfs_name *ci_name) /* out: actual name if CI match */ { xfs_da_args_t args; int rval; int v; /* type-checking value */ ASSERT(S_ISDIR(dp->i_d.di_mode)); XFS_STATS_INC(xs_dir_lookup); memset(&args, 0, sizeof(xfs_da_args_t)); args.name = name->name; args.namelen = name->len; args.filetype = name->type; args.hashval = dp->i_mount->m_dirnameops->hashname(name); args.dp = dp; args.whichfork = XFS_DATA_FORK; args.trans = tp; args.op_flags = XFS_DA_OP_OKNOENT; if (ci_name) args.op_flags |= XFS_DA_OP_CILOOKUP; if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) rval = xfs_dir2_sf_lookup(&args); else if ((rval = xfs_dir2_isblock(tp, dp, &v))) return rval; else if (v) rval = xfs_dir2_block_lookup(&args); else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) return rval; else if (v) rval = xfs_dir2_leaf_lookup(&args); else rval = xfs_dir2_node_lookup(&args); if (rval == EEXIST) rval = 0; if (!rval) { *inum = args.inumber; if (ci_name) { ci_name->name = args.value; ci_name->len = args.valuelen; } } return rval; }
STATIC ssize_t xfs_file_aio_write( struct kiocb *iocb, const struct iovec *iovp, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; struct xfs_inode *ip = XFS_I(inode); ssize_t ret; size_t ocount = 0; XFS_STATS_INC(xs_write_calls); BUG_ON(iocb->ki_pos != pos); ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); if (ret) return ret; if (ocount == 0) return 0; xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -EIO; if (unlikely(file->f_flags & O_DIRECT)) ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount); else ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, ocount); if (ret > 0) { ssize_t err; XFS_STATS_ADD(xs_write_bytes, ret); err = generic_write_sync(file, pos, ret); if (err < 0) ret = err; } return ret; }
/* * Add a reference to a referenced vnode. */ struct vnode * vn_hold( struct vnode *vp) { struct inode *inode; XFS_STATS_INC(vn_hold); VN_LOCK(vp); inode = igrab(LINVFS_GET_IP(vp)); ASSERT(inode); VN_UNLOCK(vp, 0); return vp; }
/* Enter a name in a directory. */ int xfs_dir_createname( xfs_trans_t *tp, xfs_inode_t *dp, struct xfs_name *name, xfs_ino_t inum, /* new entry inode number */ xfs_fsblock_t *first, /* bmap's firstblock */ xfs_bmap_free_t *flist, /* bmap's freeblock list */ xfs_extlen_t total) /* bmap's total block count */ { xfs_da_args_t args; int rval; int v; /* type-checking value */ ASSERT(S_ISDIR(dp->i_d.di_mode)); if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) return rval; XFS_STATS_INC(xs_dir_create); memset(&args, 0, sizeof(xfs_da_args_t)); args.name = name->name; args.namelen = name->len; args.filetype = name->type; args.hashval = dp->i_mount->m_dirnameops->hashname(name); args.inumber = inum; args.dp = dp; args.firstblock = first; args.flist = flist; args.total = total; args.whichfork = XFS_DATA_FORK; args.trans = tp; args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT; if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) rval = xfs_dir2_sf_addname(&args); else if ((rval = xfs_dir2_isblock(tp, dp, &v))) return rval; else if (v) rval = xfs_dir2_block_addname(&args); else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) return rval; else if (v) rval = xfs_dir2_leaf_addname(&args); else rval = xfs_dir2_node_addname(&args); return rval; }
/* Enter a name in a directory. */ int xfs_dir_createname( xfs_trans_t *tp, xfs_inode_t *dp, char *name, int namelen, xfs_ino_t inum, /* new entry inode number */ xfs_fsblock_t *first, /* bmap's firstblock */ xfs_bmap_free_t *flist, /* bmap's freeblock list */ xfs_extlen_t total) /* bmap's total block count */ { xfs_da_args_t args; int rval; int v; /* type-checking value */ ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum))) return rval; XFS_STATS_INC(xs_dir_create); args.name = name; args.namelen = namelen; args.hashval = xfs_da_hashname(name, namelen); args.inumber = inum; args.dp = dp; args.firstblock = first; args.flist = flist; args.total = total; args.whichfork = XFS_DATA_FORK; args.trans = tp; args.justcheck = 0; args.addname = args.oknoent = 1; if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) rval = xfs_dir2_sf_addname(&args); else if ((rval = xfs_dir2_isblock(tp, dp, &v))) return rval; else if (v) rval = xfs_dir2_block_addname(&args); else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) return rval; else if (v) rval = xfs_dir2_leaf_addname(&args); else rval = xfs_dir2_node_addname(&args); return rval; }
/* * Lookup a name in a directory, give back the inode number. */ int xfs_dir_lookup( xfs_trans_t *tp, xfs_inode_t *dp, char *name, int namelen, xfs_ino_t *inum) /* out: inode number */ { xfs_da_args_t args; int rval; int v; /* type-checking value */ ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); XFS_STATS_INC(xs_dir_lookup); args.name = name; args.namelen = namelen; args.hashval = xfs_da_hashname(name, namelen); args.inumber = 0; args.dp = dp; args.firstblock = NULL; args.flist = NULL; args.total = 0; args.whichfork = XFS_DATA_FORK; args.trans = tp; args.justcheck = args.addname = 0; args.oknoent = 1; if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) rval = xfs_dir2_sf_lookup(&args); else if ((rval = xfs_dir2_isblock(tp, dp, &v))) return rval; else if (v) rval = xfs_dir2_block_lookup(&args); else if ((rval = xfs_dir2_isleaf(tp, dp, &v))) return rval; else if (v) rval = xfs_dir2_leaf_lookup(&args); else rval = xfs_dir2_node_lookup(&args); if (rval == EEXIST) rval = 0; if (rval == 0) *inum = args.inumber; return rval; }
/* * Allocate and initialise an xfs_inode. */ struct xfs_inode * xfs_inode_alloc( struct xfs_mount *mp, xfs_ino_t ino) { struct xfs_inode *ip; /* * if this didn't occur in transactions, we could use * KM_MAYFAIL and return NULL here on ENOMEM. Set the * code up to do this anyway. */ ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); if (!ip) return NULL; if (inode_init_always(mp->m_super, VFS_I(ip))) { kmem_zone_free(xfs_inode_zone, ip); return NULL; } /* VFS doesn't initialise i_mode! */ VFS_I(ip)->i_mode = 0; XFS_STATS_INC(mp, vn_active); ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(!xfs_isiflocked(ip)); ASSERT(ip->i_ino == 0); mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); /* initialise the xfs inode */ ip->i_ino = ino; ip->i_mount = mp; memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); ip->i_afp = NULL; ip->i_cowfp = NULL; ip->i_cnextents = 0; ip->i_cformat = XFS_DINODE_FMT_EXTENTS; memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); ip->i_flags = 0; ip->i_delayed_blks = 0; memset(&ip->i_d, 0, sizeof(ip->i_d)); return ip; }
STATIC ssize_t xfs_file_write_iter( struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; struct xfs_inode *ip = XFS_I(inode); ssize_t ret; size_t ocount = iov_iter_count(from); XFS_STATS_INC(ip->i_mount, xs_write_calls); if (ocount == 0) return 0; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -EIO; if (IS_DAX(inode)) ret = xfs_file_dax_write(iocb, from); else if (iocb->ki_flags & IOCB_DIRECT) { /* * Allow a directio write to fall back to a buffered * write *only* in the case that we're doing a reflink * CoW. In all other directio scenarios we do not * allow an operation to fall back to buffered mode. */ ret = xfs_file_dio_aio_write(iocb, from); if (ret == -EREMCHG) goto buffered; } else { buffered: ret = xfs_file_buffered_aio_write(iocb, from); } if (ret > 0) { XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret); /* Handle various SYNC-type writes */ ret = generic_write_sync(iocb, ret); } return ret; }
/* * Call VOP_INACTIVE on last reference. */ void vn_rele( struct vnode *vp) { int vcnt; int cache; XFS_STATS_INC(vn_rele); VN_LOCK(vp); vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address); vcnt = vn_count(vp); /* * Since we always get called from put_inode we know * that i_count won't be decremented after we * return. */ if (!vcnt) { /* * As soon as we turn this on, noone can find us in vn_get * until we turn off VINACT or VRECLM */ vp->v_flag |= VINACT; VN_UNLOCK(vp, 0); /* * Do not make the VOP_INACTIVE call if there * are no behaviors attached to the vnode to call. */ if (vp->v_fbhv) VOP_INACTIVE(vp, NULL, cache); VN_LOCK(vp); if (vp->v_flag & VWAIT) sv_broadcast(vptosync(vp)); vp->v_flag &= ~(VINACT|VWAIT|VRECLM|VMODIFIED); } VN_UNLOCK(vp, 0); vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address); }
/* * Release a reference to the dquot (decrement ref-count) and unlock it. * * If there is a group quota attached to this dquot, carefully release that * too without tripping over deadlocks'n'stuff. */ void xfs_qm_dqput( struct xfs_dquot *dqp) { ASSERT(dqp->q_nrefs > 0); ASSERT(XFS_DQ_IS_LOCKED(dqp)); trace_xfs_dqput(dqp); if (--dqp->q_nrefs == 0) { struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo; trace_xfs_dqput_free(dqp); if (list_lru_add(&qi->qi_lru, &dqp->q_lru)) XFS_STATS_INC(xs_qm_dquot_unused); } xfs_dqunlock(dqp); }
STATIC ssize_t xfs_file_splice_read( struct file *infilp, loff_t *ppos, struct pipe_inode_info *pipe, size_t count, unsigned int flags) { struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); struct xfs_mount *mp = ip->i_mount; int ioflags = 0; ssize_t ret; XFS_STATS_INC(xs_read_calls); if (infilp->f_mode & FMODE_NOCMTIME) ioflags |= IO_INVIS; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return -EIO; xfs_ilock(ip, XFS_IOLOCK_SHARED); if (DM_EVENT_ENABLED(ip, DM_EVENT_READ) && !(ioflags & IO_INVIS)) { int iolock = XFS_IOLOCK_SHARED; int error; error = XFS_SEND_DATA(mp, DM_EVENT_READ, ip, *ppos, count, FILP_DELAY_FLAG(infilp), &iolock); if (error) { xfs_iunlock(ip, XFS_IOLOCK_SHARED); return -error; } } trace_xfs_file_splice_read(ip, count, *ppos, ioflags); ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); if (ret > 0) XFS_STATS_ADD(xs_read_bytes, ret); xfs_iunlock(ip, XFS_IOLOCK_SHARED); return ret; }
static int /* error */ xfs_dir_lookup(xfs_trans_t *trans, xfs_inode_t *dp, char *name, int namelen, xfs_ino_t *inum) { xfs_da_args_t args; int retval; ASSERT((dp->i_d.di_mode & IFMT) == IFDIR); if (namelen >= MAXNAMELEN) { return(XFS_ERROR(EINVAL)); } XFS_STATS_INC(xs_dir_lookup); /* * Fill in the arg structure for this request. */ args.name = name; args.namelen = namelen; args.hashval = xfs_da_hashname(name, namelen); args.inumber = 0; args.dp = dp; args.firstblock = NULL; args.flist = NULL; args.total = 0; args.whichfork = XFS_DATA_FORK; args.trans = trans; args.justcheck = args.addname = 0; args.oknoent = 1; /* * Decide on what work routines to call based on the inode size. */ if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) { retval = xfs_dir_shortform_lookup(&args); } else if (xfs_bmap_one_block(dp, XFS_DATA_FORK)) { retval = xfs_dir_leaf_lookup(&args); } else { retval = xfs_dir_node_lookup(&args); } if (retval == EEXIST) retval = 0; *inum = args.inumber; return(retval); }
/* * Read a directory. */ int xfs_dir_getdents( xfs_trans_t *tp, xfs_inode_t *dp, uio_t *uio, /* caller's buffer control */ int *eofp) /* out: eof reached */ { int alignment; /* alignment required for ABI */ xfs_dirent_t *dbp; /* malloc'ed buffer */ xfs_dir2_put_t put; /* entry formatting routine */ int rval; /* return value */ int v; /* type-checking value */ ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR); XFS_STATS_INC(xs_dir_getdents); /* * If our caller has given us a single contiguous aligned memory buffer, * just work directly within that buffer. If it's in user memory, * lock it down first. */ alignment = sizeof(xfs_off_t) - 1; if ((uio->uio_iovcnt == 1) && (((__psint_t)uio->uio_iov[0].iov_base & alignment) == 0) && ((uio->uio_iov[0].iov_len & alignment) == 0)) { dbp = NULL; put = xfs_dir2_put_dirent64_direct; } else { dbp = kmem_alloc(sizeof(*dbp) + MAXNAMELEN, KM_SLEEP); put = xfs_dir2_put_dirent64_uio; } *eofp = 0; if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) rval = xfs_dir2_sf_getdents(dp, uio, eofp, dbp, put); else if ((rval = xfs_dir2_isblock(tp, dp, &v))) ; else if (v) rval = xfs_dir2_block_getdents(tp, dp, uio, eofp, dbp, put); else rval = xfs_dir2_leaf_getdents(tp, dp, uio, eofp, dbp, put); if (dbp != NULL) kmem_free(dbp, sizeof(*dbp) + MAXNAMELEN); return rval; }