/** * Helper function that if necessary adjusts file size (inode->i_size), when * position at the offset \a pos is accessed. File size can be arbitrary stale * on a Lustre client, but client at least knows KMS. If accessed area is * inside [0, KMS], set file size to KMS, otherwise glimpse file size. * * Locking: cl_isize_lock is used to serialize changes to inode size and to * protect consistency between inode size and cl_object * attributes. cl_object_size_lock() protects consistency between cl_attr's of * top-object and sub-objects. */ static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj, struct cl_io *io, loff_t start, size_t count, int *exceed) { struct cl_attr *attr = vvp_env_thread_attr(env); struct inode *inode = vvp_object_inode(obj); loff_t pos = start + count - 1; loff_t kms; int result; /* * Consistency guarantees: following possibilities exist for the * relation between region being accessed and real file size at this * moment: * * (A): the region is completely inside of the file; * * (B-x): x bytes of region are inside of the file, the rest is * outside; * * (C): the region is completely outside of the file. * * This classification is stable under DLM lock already acquired by * the caller, because to change the class, other client has to take * DLM lock conflicting with our lock. Also, any updates to ->i_size * by other threads on this client are serialized by * ll_inode_size_lock(). This guarantees that short reads are handled * correctly in the face of concurrent writes and truncates. */ vvp_object_size_lock(obj); result = cl_object_attr_get(env, obj, attr); if (result == 0) { kms = attr->cat_kms; if (pos > kms) { /* * A glimpse is necessary to determine whether we * return a short read (B) or some zeroes at the end * of the buffer (C) */ vvp_object_size_unlock(obj); result = cl_glimpse_lock(env, io, inode, obj, 0); if (result == 0 && exceed) { /* If objective page index exceed end-of-file * page index, return directly. Do not expect * kernel will check such case correctly. * linux-2.6.18-128.1.1 miss to do that. * --bug 17336 */ loff_t size = i_size_read(inode); loff_t cur_index = start >> PAGE_SHIFT; loff_t size_index = (size - 1) >> PAGE_SHIFT; if ((size == 0 && cur_index != 0) || size_index < cur_index) *exceed = 1; }
int cl_glimpse_size0(struct inode *inode, int agl) { /* * We don't need ast_flags argument to cl_glimpse_size(), because * osc_lock_enqueue() takes care of the possible deadlock that said * argument was introduced to avoid. */ /* * XXX but note that ll_file_seek() passes LDLM_FL_BLOCK_NOWAIT to * cl_glimpse_size(), which doesn't make sense: glimpse locks are not * blocking anyway. */ struct lu_env *env = NULL; struct cl_io *io = NULL; __u16 refcheck; int retried = 0; int result; ENTRY; result = cl_io_get(inode, &env, &io, &refcheck); if (result <= 0) RETURN(result); do { io->ci_ndelay_tried = retried++; io->ci_ndelay = io->ci_verify_layout = 1; result = cl_io_init(env, io, CIT_GLIMPSE, io->ci_obj); if (result > 0) { /* * nothing to do for this io. This currently happens * when stripe sub-object's are not yet created. */ result = io->ci_result; } else if (result == 0) { result = cl_glimpse_lock(env, io, inode, io->ci_obj, agl); if (!agl && result == -EWOULDBLOCK) io->ci_need_restart = 1; } OBD_FAIL_TIMEOUT(OBD_FAIL_GLIMPSE_DELAY, 2); cl_io_fini(env, io); } while (unlikely(io->ci_need_restart)); cl_env_put(env, &refcheck); RETURN(result); }