/* * Update on-disk file size now that data has been written to disk. */ STATIC int xfs_setfilesize( struct xfs_ioend *ioend) { struct xfs_inode *ip = XFS_I(ioend->io_inode); struct xfs_trans *tp = ioend->io_append_trans; xfs_fsize_t isize; /* * The transaction may have been allocated in the I/O submission thread, * thus we need to mark ourselves as beeing in a transaction manually. * Similarly for freeze protection. */ current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], 0, 1, _THIS_IP_); xfs_ilock(ip, XFS_ILOCK_EXCL); isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size); if (!isize) { xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_trans_cancel(tp, 0); return 0; } trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); ip->i_d.di_size = isize; xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); return xfs_trans_commit(tp, 0); }
/* * lock for reading */ void __sched down_read(struct rw_semaphore *sem) { might_sleep(); rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); LOCK_CONTENDED(sem, __down_read_trylock, __down_read); }
void down_read_nested(struct rw_semaphore *sem, int subclass) { might_sleep(); rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_); LOCK_CONTENDED(sem, __down_read_trylock, __down_read); }
/* * trylock for reading -- returns 1 if successful, 0 if contention */ int down_read_trylock(struct rw_semaphore *sem) { int ret = __down_read_trylock(sem); if (ret == 1) rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_); return ret; }
int down_read_trylock_nested(struct rw_semaphore *sem, int subclass) { int ret = __down_read_trylock(sem); if (ret == 1) rwsem_acquire_read(&sem->dep_map, subclass, 1, _RET_IP_); return ret; }
static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) { struct rt_mutex *lock = &rwsem->lock; rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); if (rt_mutex_real_owner(lock) != current) rt_mutex_lock(&rwsem->lock); rwsem->read_depth++; }
/** * kernfs_get_active - get an active reference to kernfs_node * @kn: kernfs_node to get an active reference to * * Get an active reference of @kn. This function is noop if @kn * is NULL. * * RETURNS: * Pointer to @kn on success, NULL on failure. */ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn) { if (unlikely(!kn)) return NULL; if (!atomic_inc_unless_negative(&kn->active)) return NULL; if (kn->flags & KERNFS_LOCKDEP) rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_); return kn; }
/** * sysfs_get_active - get an active reference to sysfs_dirent * @sd: sysfs_dirent to get an active reference to * * Get an active reference of @sd. This function is noop if @sd * is NULL. * * RETURNS: * Pointer to @sd on success, NULL on failure. */ struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd) { if (unlikely(!sd)) return NULL; if (!atomic_inc_unless_negative(&sd->s_active)) return NULL; if (likely(!sysfs_ignore_lockdep(sd))) rwsem_acquire_read(&sd->dep_map, 0, 1, _RET_IP_); return sd; }
STATIC int xfs_setfilesize_ioend( struct xfs_ioend *ioend) { struct xfs_inode *ip = XFS_I(ioend->io_inode); struct xfs_trans *tp = ioend->io_append_trans; /* * The transaction may have been allocated in the I/O submission thread, * thus we need to mark ourselves as being in a transaction manually. * Similarly for freeze protection. */ current_set_flags_nested(&tp->t_pflags, PF_FSTRANS); rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1], 0, 1, _THIS_IP_); return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size); }
static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) { unsigned long flags; rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); /* * Read locks within the write lock succeed. */ spin_lock_irqsave(&rwsem->lock.wait_lock, flags); if (rt_mutex_real_owner(&rwsem->lock) == current) { spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); rwsem->read_depth++; return; } spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); rt_mutex_lock(&rwsem->lock); }
int fastcall rt_down_read_trylock(struct rw_semaphore *rwsem) { unsigned long flags; int ret; /* * Read locks within the self-held write lock succeed. */ spin_lock_irqsave(&rwsem->lock.wait_lock, flags); if (rt_mutex_real_owner(&rwsem->lock) == current) { spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); rwsem_acquire_read(&rwsem->dep_map, 0, 1, _RET_IP_); rwsem->read_depth++; return 1; } spin_unlock_irqrestore(&rwsem->lock.wait_lock, flags); ret = rt_mutex_trylock(&rwsem->lock); if (ret) rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); return ret; }
/** * sysfs_get_active - get an active reference to sysfs_dirent * @sd: sysfs_dirent to get an active reference to * * Get an active reference of @sd. This function is noop if @sd * is NULL. * * RETURNS: * Pointer to @sd on success, NULL on failure. */ struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd) { if (unlikely(!sd)) return NULL; while (1) { int v, t; v = atomic_read(&sd->s_active); if (unlikely(v < 0)) return NULL; t = atomic_cmpxchg(&sd->s_active, v, v + 1); if (likely(t == v)) { rwsem_acquire_read(&sd->dep_map, 0, 1, _RET_IP_); return sd; } if (t < 0) return NULL; cpu_relax(); } }