static struct dentry *ocfs2_get_dentry(struct super_block *sb, struct ocfs2_inode_handle *handle) { struct inode *inode; struct dentry *result; mlog_entry("(0x%p, 0x%p)\n", sb, handle); if (handle->ih_blkno == 0) { mlog_errno(-ESTALE); return ERR_PTR(-ESTALE); } inode = ocfs2_iget(OCFS2_SB(sb), handle->ih_blkno, 0, 0); if (IS_ERR(inode)) return (void *)inode; if (handle->ih_generation != inode->i_generation) { iput(inode); return ERR_PTR(-ESTALE); } result = d_alloc_anon(inode); if (!result) { iput(inode); mlog_errno(-ENOMEM); return ERR_PTR(-ENOMEM); } result->d_op = &ocfs2_dentry_ops; mlog_exit_ptr(result); return result; }
static int ocfs2_file_open(struct inode *inode, struct file *file) { int status; int mode = file->f_flags; struct ocfs2_inode_info *oi = OCFS2_I(inode); mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file, file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name); spin_lock(&oi->ip_lock); /* Check that the inode hasn't been wiped from disk by another * node. If it hasn't then we're safe as long as we hold the * spin lock until our increment of open count. */ if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { spin_unlock(&oi->ip_lock); status = -ENOENT; goto leave; } if (mode & O_DIRECT) oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT; oi->ip_open_count++; spin_unlock(&oi->ip_lock); status = 0; leave: mlog_exit(status); return status; }
/* * NOTE: this should always be called with parent dir i_mutex taken. */ int ocfs2_find_files_on_disk(const char *name, int namelen, u64 *blkno, struct inode *inode, struct buffer_head **dirent_bh, struct ocfs2_dir_entry **dirent) { int status = -ENOENT; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); mlog_entry("(osb=%p, parent=%"MLFu64", name='%.*s', blkno=%p, " "inode=%p)\n", osb, OCFS2_I(inode)->ip_blkno, namelen, name, blkno, inode); *dirent_bh = ocfs2_find_entry(name, namelen, inode, dirent); if (!*dirent_bh || !*dirent) { status = -ENOENT; goto leave; } *blkno = le64_to_cpu((*dirent)->inode); status = 0; leave: if (status < 0) { *dirent = NULL; if (*dirent_bh) { brelse(*dirent_bh); *dirent_bh = NULL; } } mlog_exit(status); return status; }
static struct page *ocfs2_nopage(struct vm_area_struct * area, unsigned long address, int *type) { struct page *page = NOPAGE_SIGBUS; sigset_t blocked, oldset; int ret; mlog_entry("(area=%p, address=%lu, type=%p)\n", area, address, type); /* The best way to deal with signals in this path is * to block them upfront, rather than allowing the * locking paths to return -ERESTARTSYS. */ sigfillset(&blocked); /* We should technically never get a bad ret return * from sigprocmask */ ret = sigprocmask(SIG_BLOCK, &blocked, &oldset); if (ret < 0) { mlog_errno(ret); goto out; } page = filemap_nopage(area, address, type); ret = sigprocmask(SIG_SETMASK, &oldset, NULL); if (ret < 0) mlog_errno(ret); out: mlog_exit_ptr(page); return page; }
static ssize_t ocfs2_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { int ret = 0; struct inode *inode = in->f_path.dentry->d_inode; mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe, (unsigned int)len, in->f_path.dentry->d_name.len, in->f_path.dentry->d_name.name); /* * See the comment in ocfs2_file_aio_read() */ ret = ocfs2_meta_lock(inode, NULL, 0); if (ret < 0) { mlog_errno(ret); goto bail; } ocfs2_meta_unlock(inode, 0); ret = generic_file_splice_read(in, ppos, pipe, len, flags); bail: mlog_exit(ret); return ret; }
void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); if (__dlm_lockres_unused(res)){ if (list_empty(&res->purge)) { mlog(0, "putting lockres %.*s:%p onto purge list\n", res->lockname.len, res->lockname.name, res); res->last_used = jiffies; dlm_lockres_get(res); list_add_tail(&res->purge, &dlm->purge_list); dlm->purge_count++; } } else if (!list_empty(&res->purge)) { mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n", res->lockname.len, res->lockname.name, res, res->owner); list_del_init(&res->purge); dlm_lockres_put(res); dlm->purge_count--; } }
/* * NOTE: this should always be called with parent dir i_mutex taken. */ int ocfs2_find_files_on_disk(const char *name, int namelen, u64 *blkno, struct inode *inode, struct buffer_head **dirent_bh, struct ocfs2_dir_entry **dirent) { int status = -ENOENT; mlog_entry("(name=%.*s, blkno=%p, inode=%p, dirent_bh=%p, dirent=%p)\n", namelen, name, blkno, inode, dirent_bh, dirent); *dirent_bh = ocfs2_find_entry(name, namelen, inode, dirent); if (!*dirent_bh || !*dirent) { status = -ENOENT; goto leave; } *blkno = le64_to_cpu((*dirent)->inode); status = 0; leave: if (status < 0) { *dirent = NULL; if (*dirent_bh) { brelse(*dirent_bh); *dirent_bh = NULL; } } mlog_exit(status); return status; }
/* * initialize the new inode, but don't do anything that would cause * us to sleep. * return 0 on success, 1 on failure */ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque) { struct ocfs2_find_inode_args *args = opaque; static struct lock_class_key ocfs2_quota_ip_alloc_sem_key, ocfs2_file_ip_alloc_sem_key; mlog_entry("inode = %p, opaque = %p\n", inode, opaque); inode->i_ino = args->fi_ino; OCFS2_I(inode)->ip_blkno = args->fi_blkno; if (args->fi_sysfile_type != 0) lockdep_set_class(&inode->i_mutex, &ocfs2_sysfile_lock_key[args->fi_sysfile_type]); if (args->fi_sysfile_type == USER_QUOTA_SYSTEM_INODE || args->fi_sysfile_type == GROUP_QUOTA_SYSTEM_INODE || args->fi_sysfile_type == LOCAL_USER_QUOTA_SYSTEM_INODE || args->fi_sysfile_type == LOCAL_GROUP_QUOTA_SYSTEM_INODE) lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem, &ocfs2_quota_ip_alloc_sem_key); else lockdep_set_class(&OCFS2_I(inode)->ip_alloc_sem, &ocfs2_file_ip_alloc_sem_key); mlog_exit(0); return 0; }
static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) { sigset_t blocked, oldset; int error, ret; mlog_entry("(area=%p, page offset=%lu)\n", area, vmf->pgoff); /* The best way to deal with signals in this path is * to block them upfront, rather than allowing the * locking paths to return -ERESTARTSYS. */ sigfillset(&blocked); /* We should technically never get a bad ret return * from sigprocmask */ error = sigprocmask(SIG_BLOCK, &blocked, &oldset); if (ret < 0) { mlog_errno(error); ret = VM_FAULT_SIGBUS; goto out; } ret = filemap_fault(area, vmf); error = sigprocmask(SIG_SETMASK, &oldset, NULL); if (error < 0) mlog_errno(error); out: mlog_exit_ptr(vmf->page); return ret; }
static int ocfs2_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; int ret, unlock = 1; mlog_entry("(0x%p, %lu)\n", file, (page ? page->index : 0)); ret = ocfs2_meta_lock_with_page(inode, NULL, 0, page); if (ret != 0) { if (ret == AOP_TRUNCATED_PAGE) unlock = 0; mlog_errno(ret); goto out; } if (down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem) == 0) { ret = AOP_TRUNCATED_PAGE; goto out_meta_unlock; } /* * i_size might have just been updated as we grabed the meta lock. We * might now be discovering a truncate that hit on another node. * block_read_full_page->get_block freaks out if it is asked to read * beyond the end of a file, so we check here. Callers * (generic_file_read, vm_ops->fault) are clever enough to check i_size * and notice that the page they just read isn't needed. * * XXX sys_readahead() seems to get that wrong? */ if (start >= i_size_read(inode)) { zero_user_page(page, 0, PAGE_SIZE, KM_USER0); SetPageUptodate(page); ret = 0; goto out_alloc; } ret = ocfs2_data_lock_with_page(inode, 0, page); if (ret != 0) { if (ret == AOP_TRUNCATED_PAGE) unlock = 0; mlog_errno(ret); goto out_alloc; } ret = block_read_full_page(page, ocfs2_get_block); unlock = 0; ocfs2_data_unlock(inode, 0); out_alloc: up_read(&OCFS2_I(inode)->ip_alloc_sem); out_meta_unlock: ocfs2_meta_unlock(inode, 0); out: if (unlock) unlock_page(page); mlog_exit(ret); return ret; }
static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, int connectable) { struct inode *inode = dentry->d_inode; int len = *max_len; int type = 1; u64 blkno; u32 generation; __le32 *fh = (__force __le32 *) fh_in; mlog_entry("(0x%p, '%.*s', 0x%p, %d, %d)\n", dentry, dentry->d_name.len, dentry->d_name.name, fh, len, connectable); if (len < 3 || (connectable && len < 6)) { mlog(ML_ERROR, "fh buffer is too small for encoding\n"); type = 255; goto bail; } blkno = OCFS2_I(inode)->ip_blkno; generation = inode->i_generation; mlog(0, "Encoding fh: blkno: %llu, generation: %u\n", (unsigned long long)blkno, generation); len = 3; fh[0] = cpu_to_le32((u32)(blkno >> 32)); fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff)); fh[2] = cpu_to_le32(generation); if (connectable && !S_ISDIR(inode->i_mode)) { struct inode *parent; spin_lock(&dentry->d_lock); parent = dentry->d_parent->d_inode; blkno = OCFS2_I(parent)->ip_blkno; generation = parent->i_generation; fh[3] = cpu_to_le32((u32)(blkno >> 32)); fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff)); fh[5] = cpu_to_le32(generation); spin_unlock(&dentry->d_lock); len = 6; type = 2; mlog(0, "Encoding parent: blkno: %llu, generation: %u\n", (unsigned long long)blkno, generation); } *max_len = len; bail: mlog_exit(type); return type; }
int ocfs2_journal_access(handle_t *handle, struct inode *inode, struct buffer_head *bh, int type) { int status; BUG_ON(!inode); BUG_ON(!handle); BUG_ON(!bh); mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n", (unsigned long long)bh->b_blocknr, type, (type == OCFS2_JOURNAL_ACCESS_CREATE) ? "OCFS2_JOURNAL_ACCESS_CREATE" : "OCFS2_JOURNAL_ACCESS_WRITE", bh->b_size); /* we can safely remove this assertion after testing. */ if (!buffer_uptodate(bh)) { mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n"); mlog(ML_ERROR, "b_blocknr=%llu\n", (unsigned long long)bh->b_blocknr); BUG(); } /* Set the current transaction information on the inode so * that the locking code knows whether it can drop it's locks * on this inode or not. We're protected from the commit * thread updating the current transaction id until * ocfs2_commit_trans() because ocfs2_start_trans() took * j_trans_barrier for us. */ ocfs2_set_inode_lock_trans(OCFS2_SB(inode->i_sb)->journal, inode); mutex_lock(&OCFS2_I(inode)->ip_io_mutex); switch (type) { case OCFS2_JOURNAL_ACCESS_CREATE: case OCFS2_JOURNAL_ACCESS_WRITE: status = journal_get_write_access(handle, bh); break; case OCFS2_JOURNAL_ACCESS_UNDO: status = journal_get_undo_access(handle, bh); break; default: status = -EINVAL; mlog(ML_ERROR, "Uknown access type!\n"); } mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); if (status < 0) mlog(ML_ERROR, "Error %d getting %d access to buffer!\n", status, type); mlog_exit(status); return status; }
static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb, struct ocfs2_dinode *alloc, u32 numbits) { int numfound, bitoff, left, startoff, lastzero; void *bitmap = NULL; mlog_entry("(numbits wanted = %u)\n", numbits); if (!alloc->id1.bitmap1.i_total) { mlog(0, "No bits in my window!\n"); bitoff = -1; goto bail; } bitmap = OCFS2_LOCAL_ALLOC(alloc)->la_bitmap; numfound = bitoff = startoff = 0; lastzero = -1; left = le32_to_cpu(alloc->id1.bitmap1.i_total); while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) { if (bitoff == left) { /* mlog(0, "bitoff (%d) == left", bitoff); */ break; } /* mlog(0, "Found a zero: bitoff = %d, startoff = %d, " "numfound = %d\n", bitoff, startoff, numfound);*/ /* Ok, we found a zero bit... is it contig. or do we * start over?*/ if (bitoff == startoff) { /* we found a zero */ numfound++; startoff++; } else { /* got a zero after some ones */ numfound = 1; startoff = bitoff+1; } /* we got everything we needed */ if (numfound == numbits) { /* mlog(0, "Found it all!\n"); */ break; } } mlog(0, "Exiting loop, bitoff = %d, numfound = %d\n", bitoff, numfound); if (numfound == numbits) bitoff = startoff - numfound; else bitoff = -1; bail: mlog_exit(bitoff); return bitoff; }
static struct dentry *ocfs2_get_parent(struct dentry *child) { int status; u64 blkno; struct dentry *parent; struct inode *inode; struct inode *dir = child->d_inode; struct buffer_head *dirent_bh = NULL; struct ocfs2_dir_entry *dirent; mlog_entry("(0x%p, '%.*s')\n", child, child->d_name.len, child->d_name.name); mlog(0, "find parent of directory %llu\n", (unsigned long long)OCFS2_I(dir)->ip_blkno); status = ocfs2_meta_lock(dir, NULL, 0); if (status < 0) { if (status != -ENOENT) mlog_errno(status); parent = ERR_PTR(status); goto bail; } status = ocfs2_find_files_on_disk("..", 2, &blkno, dir, &dirent_bh, &dirent); if (status < 0) { parent = ERR_PTR(-ENOENT); goto bail_unlock; } inode = ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0); if (IS_ERR(inode)) { mlog(ML_ERROR, "Unable to create inode %llu\n", (unsigned long long)blkno); parent = ERR_PTR(-EACCES); goto bail_unlock; } parent = d_alloc_anon(inode); if (!parent) { iput(inode); parent = ERR_PTR(-ENOMEM); } parent->d_op = &ocfs2_dentry_ops; bail_unlock: ocfs2_meta_unlock(dir, 0); if (dirent_bh) brelse(dirent_bh); bail: mlog_exit_ptr(parent); return parent; }
void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { mlog_entry("%.*s\n", res->lockname.len, res->lockname.name); spin_lock(&dlm->spinlock); spin_lock(&res->spinlock); __dlm_lockres_calc_usage(dlm, res); spin_unlock(&res->spinlock); spin_unlock(&dlm->spinlock); }
struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags, int sysfile_type) { struct inode *inode = NULL; struct super_block *sb = osb->sb; struct ocfs2_find_inode_args args; mlog_entry("(blkno = %llu)\n", (unsigned long long)blkno); /* Ok. By now we've either got the offsets passed to us by the * caller, or we just pulled them off the bh. Lets do some * sanity checks to make sure they're OK. */ if (blkno == 0) { inode = ERR_PTR(-EINVAL); mlog_errno(PTR_ERR(inode)); goto bail; } args.fi_blkno = blkno; args.fi_flags = flags; args.fi_ino = ino_from_blkno(sb, blkno); args.fi_sysfile_type = sysfile_type; inode = iget5_locked(sb, args.fi_ino, ocfs2_find_actor, ocfs2_init_locked_inode, &args); /* inode was *not* in the inode cache. 2.6.x requires * us to do our own read_inode call and unlock it * afterwards. */ if (inode && inode->i_state & I_NEW) { mlog(0, "Inode was not in inode cache, reading it.\n"); ocfs2_read_locked_inode(inode, &args); unlock_new_inode(inode); } if (inode == NULL) { inode = ERR_PTR(-ENOMEM); mlog_errno(PTR_ERR(inode)); goto bail; } if (is_bad_inode(inode)) { iput(inode); inode = ERR_PTR(-ESTALE); goto bail; } bail: if (!IS_ERR(inode)) { mlog(0, "returning inode with number %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); mlog_exit_ptr(inode); } return inode; }
/* * initialize the new inode, but don't do anything that would cause * us to sleep. * return 0 on success, 1 on failure */ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque) { struct ocfs2_find_inode_args *args = opaque; mlog_entry("inode = %p, opaque = %p\n", inode, opaque); inode->i_ino = args->fi_ino; OCFS2_I(inode)->ip_blkno = args->fi_blkno; mlog_exit(0); return 0; }
/* Note: Because we don't support holes, our allocation has * already happened (allocation writes zeros to the file data) * so we don't have to worry about ordered writes in * ocfs2_writepage. * * ->writepage is called during the process of invalidating the page cache * during blocked lock processing. It can't block on any cluster locks * to during block mapping. It's relying on the fact that the block * mapping can't have disappeared under the dirty pages that it is * being asked to write back. */ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) { int ret; mlog_entry("(0x%p)\n", page); ret = block_write_full_page(page, ocfs2_get_block, wbc); mlog_exit(ret); return ret; }
static int ocfs2_write_remove_suid(struct inode *inode) { int ret; struct buffer_head *bh = NULL; struct ocfs2_inode_info *oi = OCFS2_I(inode); handle_t *handle; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di; mlog_entry("(Inode %llu, mode 0%o)\n", (unsigned long long)oi->ip_blkno, inode->i_mode); handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); if (handle == NULL) { ret = -ENOMEM; mlog_errno(ret); goto out; } ret = ocfs2_read_block(osb, oi->ip_blkno, &bh, OCFS2_BH_CACHED, inode); if (ret < 0) { mlog_errno(ret); goto out_trans; } ret = ocfs2_journal_access(handle, inode, bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) { mlog_errno(ret); goto out_bh; } inode->i_mode &= ~S_ISUID; if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP)) inode->i_mode &= ~S_ISGID; di = (struct ocfs2_dinode *) bh->b_data; di->i_mode = cpu_to_le16(inode->i_mode); ret = ocfs2_journal_dirty(handle, bh); if (ret < 0) mlog_errno(ret); out_bh: brelse(bh); out_trans: ocfs2_commit_trans(osb, handle); out: mlog_exit(ret); return ret; }
static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) { sigset_t oldset; int ret; mlog_entry("(area=%p, page offset=%lu)\n", area, vmf->pgoff); ocfs2_block_signals(&oldset); ret = filemap_fault(area, vmf); ocfs2_unblock_signals(&oldset); mlog_exit_ptr(vmf->page); return ret; }
/* * initialize the new inode, but don't do anything that would cause * us to sleep. * return 0 on success, 1 on failure */ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque) { struct ocfs2_find_inode_args *args = opaque; mlog_entry("inode = %p, opaque = %p\n", inode, opaque); inode->i_ino = args->fi_ino; OCFS2_I(inode)->ip_blkno = args->fi_blkno; if (args->fi_sysfile_type != 0) lockdep_set_class(&inode->i_mutex, &ocfs2_sysfile_lock_key[args->fi_sysfile_type]); mlog_exit(0); return 0; }
int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh, struct inode *inode) { int ret = 0; mlog_entry("(bh->b_blocknr = %llu, inode=%p)\n", (unsigned long long)bh->b_blocknr, inode); BUG_ON(bh->b_blocknr < OCFS2_SUPER_BLOCK_BLKNO); BUG_ON(buffer_jbd(bh)); /* No need to check for a soft readonly file system here. non * journalled writes are only ever done on system files which * can get modified during recovery even if read-only. */ if (ocfs2_is_hard_readonly(osb)) { ret = -EROFS; goto out; } mutex_lock(&OCFS2_I(inode)->ip_io_mutex); lock_buffer(bh); set_buffer_uptodate(bh); /* remove from dirty list before I/O. */ clear_buffer_dirty(bh); get_bh(bh); /* for end_buffer_write_sync() */ bh->b_end_io = end_buffer_write_sync; submit_bh(WRITE, bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) { ocfs2_set_buffer_uptodate(inode, bh); } else { /* We don't need to remove the clustered uptodate * information for this bh as it's not marked locally * uptodate. */ ret = -EIO; put_bh(bh); } mutex_unlock(&OCFS2_I(inode)->ip_io_mutex); out: mlog_exit(ret); return ret; }
/* * Updates a disk inode from a * struct inode. * Only takes ip_lock. */ int ocfs2_mark_inode_dirty(handle_t *handle, struct inode *inode, struct buffer_head *bh) { int status; struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data; mlog_entry("(inode %llu)\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); status = ocfs2_journal_access(handle, inode, bh, OCFS2_JOURNAL_ACCESS_WRITE); if (status < 0) { mlog_errno(status); goto leave; } spin_lock(&OCFS2_I(inode)->ip_lock); fe->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters); ocfs2_get_inode_flags(OCFS2_I(inode)); fe->i_attr = cpu_to_le32(OCFS2_I(inode)->ip_attr); fe->i_dyn_features = cpu_to_le16(OCFS2_I(inode)->ip_dyn_features); spin_unlock(&OCFS2_I(inode)->ip_lock); fe->i_size = cpu_to_le64(i_size_read(inode)); fe->i_links_count = cpu_to_le16(inode->i_nlink); fe->i_uid = cpu_to_le32(inode->i_uid); fe->i_gid = cpu_to_le32(inode->i_gid); fe->i_mode = cpu_to_le16(inode->i_mode); fe->i_atime = cpu_to_le64(inode->i_atime.tv_sec); fe->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec); fe->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); fe->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); fe->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec); fe->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec); status = ocfs2_journal_dirty(handle, bh); if (status < 0) mlog_errno(status); status = 0; leave: mlog_exit(status); return status; }
int ocfs2_journal_dirty(handle_t *handle, struct buffer_head *bh) { int status; mlog_entry("(bh->b_blocknr=%llu)\n", (unsigned long long)bh->b_blocknr); status = journal_dirty_metadata(handle, bh); if (status < 0) mlog(ML_ERROR, "Could not dirty metadata buffer. " "(bh->b_blocknr=%llu)\n", (unsigned long long)bh->b_blocknr); mlog_exit(status); return status; }
static int ocfs2_file_release(struct inode *inode, struct file *file) { struct ocfs2_inode_info *oi = OCFS2_I(inode); mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file, file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name); spin_lock(&oi->ip_lock); if (!--oi->ip_open_count) oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT; spin_unlock(&oi->ip_lock); mlog_exit(0); return 0; }
static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) { sector_t status; u64 p_blkno = 0; int err = 0; struct inode *inode = mapping->host; mlog_entry("(block = %llu)\n", (unsigned long long)block); /* We don't need to lock journal system files, since they aren't * accessed concurrently from multiple nodes. */ if (!INODE_JOURNAL(inode)) { err = ocfs2_inode_lock(inode, NULL, 0); if (err) { if (err != -ENOENT) mlog_errno(err); goto bail; } down_read(&OCFS2_I(inode)->ip_alloc_sem); } if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) err = ocfs2_extent_map_get_blocks(inode, block, &p_blkno, NULL, NULL); if (!INODE_JOURNAL(inode)) { up_read(&OCFS2_I(inode)->ip_alloc_sem); ocfs2_inode_unlock(inode, 0); } if (err) { mlog(ML_ERROR, "get_blocks() failed, block = %llu\n", (unsigned long long)block); mlog_errno(err); goto bail; } bail: status = err ? 0 : p_blkno; mlog_exit((int)status); return status; }
static struct dentry *ocfs2_decode_fh(struct super_block *sb, u32 *fh_in, int fh_len, int fileid_type, int (*acceptable)(void *context, struct dentry *de), void *context) { struct ocfs2_inode_handle handle, parent; struct dentry *ret = NULL; __le32 *fh = (__force __le32 *) fh_in; mlog_entry("(0x%p, 0x%p, %d, %d, 0x%p, 0x%p)\n", sb, fh, fh_len, fileid_type, acceptable, context); if (fh_len < 3 || fileid_type > 2) goto bail; if (fileid_type == 2) { if (fh_len < 6) goto bail; parent.ih_blkno = (u64)le32_to_cpu(fh[3]) << 32; parent.ih_blkno |= (u64)le32_to_cpu(fh[4]); parent.ih_generation = le32_to_cpu(fh[5]); mlog(0, "Decoding parent: blkno: %llu, generation: %u\n", (unsigned long long)parent.ih_blkno, parent.ih_generation); } handle.ih_blkno = (u64)le32_to_cpu(fh[0]) << 32; handle.ih_blkno |= (u64)le32_to_cpu(fh[1]); handle.ih_generation = le32_to_cpu(fh[2]); mlog(0, "Encoding fh: blkno: %llu, generation: %u\n", (unsigned long long)handle.ih_blkno, handle.ih_generation); ret = ocfs2_export_ops.find_exported_dentry(sb, &handle, &parent, acceptable, context); bail: mlog_exit_ptr(ret); return ret; }
static int ocfs2_find_actor(struct inode *inode, void *opaque) { struct ocfs2_find_inode_args *args = NULL; struct ocfs2_inode_info *oi = OCFS2_I(inode); int ret = 0; mlog_entry("(0x%p, %lu, 0x%p)\n", inode, inode->i_ino, opaque); args = opaque; mlog_bug_on_msg(!inode, "No inode in find actor!\n"); if (oi->ip_blkno != args->fi_blkno) goto bail; ret = 1; bail: mlog_exit(ret); return ret; }
static struct dentry *ocfs2_get_parent(struct dentry *child) { int status; u64 blkno; struct dentry *parent; struct inode *dir = child->d_inode; mlog_entry("(0x%p, '%.*s')\n", child, child->d_name.len, child->d_name.name); mlog(0, "find parent of directory %llu\n", (unsigned long long)OCFS2_I(dir)->ip_blkno); status = ocfs2_inode_lock(dir, NULL, 0); if (status < 0) { if (status != -ENOENT) mlog_errno(status); parent = ERR_PTR(status); goto bail; } status = ocfs2_lookup_ino_from_name(dir, "..", 2, &blkno); if (status < 0) { parent = ERR_PTR(-ENOENT); goto bail_unlock; } parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0)); if (!IS_ERR(parent)) parent->d_op = &ocfs2_dentry_ops; bail_unlock: ocfs2_inode_unlock(dir, 0); bail: mlog_exit_ptr(parent); return parent; }
static int ocfs2_fault(struct vm_area_struct *area, struct vm_fault *vmf) { sigset_t blocked, oldset; int error, ret; mlog_entry("(area=%p, page offset=%lu)\n", area, vmf->pgoff); error = ocfs2_vm_op_block_sigs(&blocked, &oldset); if (error < 0) { mlog_errno(error); ret = VM_FAULT_SIGBUS; goto out; } ret = filemap_fault(area, vmf); error = ocfs2_vm_op_unblock_sigs(&oldset); if (error < 0) mlog_errno(error); out: mlog_exit_ptr(vmf->page); return ret; }