struct inode *ocfs2_get_system_file_inode(struct ocfs2_super *osb, int type, u32 slot) { struct inode *inode = NULL; struct inode **arr = NULL; /* avoid the lookup if cached in local system file array */ if (is_in_system_inode_array(osb, type, slot)) arr = &(osb->system_inodes[type]); if (arr && ((inode = *arr) != NULL)) { /* get a ref in addition to the array ref */ inode = igrab(inode); BUG_ON(!inode); return inode; } /* this gets one ref thru iget */ inode = _ocfs2_get_system_file_inode(osb, type, slot); /* add one more if putting into array for first time */ if (arr && inode) { *arr = igrab(inode); BUG_ON(!*arr); } return inode; }
/* * If we're not journaling and this is a just-created file, we have to * sync our parent directory (if it was freshly created) since * otherwise it will only be written by writeback, leaving a huge * window during which a crash may lose the file. This may apply for * the parent directory's parent as well, and so on recursively, if * they are also freshly created. */ static int ext4_sync_parent(struct inode *inode) { struct writeback_control wbc; struct dentry *dentry = NULL; struct inode *next; int ret = 0; if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) return 0; inode = igrab(inode); while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); dentry = d_find_any_alias(inode); if (!dentry) break; next = igrab(dentry->d_parent->d_inode); dput(dentry); if (!next) break; iput(inode); inode = next; ret = sync_mapping_buffers(inode->i_mapping); if (ret) break; memset(&wbc, 0, sizeof(wbc)); wbc.sync_mode = WB_SYNC_ALL; wbc.nr_to_write = 0; /* only write out the inode */ ret = sync_inode(inode, &wbc); if (ret) break; } iput(inode); return ret; }
/* * If we're not journaling and this is a just-created file, we have to * sync our parent directory (if it was freshly created) since * otherwise it will only be written by writeback, leaving a huge * window during which a crash may lose the file. This may apply for * the parent directory's parent as well, and so on recursively, if * they are also freshly created. */ static int ext4_sync_parent(struct inode *inode) { struct dentry *dentry = NULL; struct inode *next; int ret = 0; if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) return 0; inode = igrab(inode); while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); dentry = d_find_any_alias(inode); if (!dentry) break; next = igrab(dentry->d_parent->d_inode); dput(dentry); if (!next) break; iput(inode); inode = next; ret = sync_mapping_buffers(inode->i_mapping); if (ret) break; ret = sync_inode_metadata(inode, 1); if (ret) break; } iput(inode); return ret; }
/* like interpose above, but for an already existing dentry */ void unionfs_reinterpose(struct dentry *dentry) { struct dentry *lower_dentry; struct inode *inode; int bindex, bstart, bend; verify_locked(dentry); /* This is pre-allocated inode */ inode = dentry->d_inode; bstart = dbstart(dentry); bend = dbend(dentry); for (bindex = bstart; bindex <= bend; bindex++) { lower_dentry = unionfs_lower_dentry_idx(dentry, bindex); if (!lower_dentry) continue; if (!lower_dentry->d_inode) continue; if (unionfs_lower_inode_idx(inode, bindex)) continue; unionfs_set_lower_inode_idx(inode, bindex, igrab(lower_dentry->d_inode)); } ibstart(inode) = dbstart(dentry); ibend(inode) = dbend(dentry); }
STATIC int xfs_vn_link( struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode; /* inode of guy being linked to */ struct xfs_name name; int error; inode = old_dentry->d_inode; xfs_dentry_to_name(&name, dentry); igrab(inode); error = xfs_link(XFS_I(dir), XFS_I(inode), &name); if (unlikely(error)) { iput(inode); return -error; } xfs_iflags_set(XFS_I(dir), XFS_IMODIFIED); xfs_validate_fields(inode); d_instantiate(dentry, inode); return 0; }
int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty) { struct nilfs_inode_info *ii = NILFS_I(inode); struct the_nilfs *nilfs = inode->i_sb->s_fs_info; atomic_add(nr_dirty, &nilfs->ns_ndirtyblks); if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) return 0; spin_lock(&nilfs->ns_inode_lock); if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && !test_bit(NILFS_I_BUSY, &ii->i_state)) { if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { nilfs_warning(inode->i_sb, __func__, "cannot get inode (ino=%lu)\n", inode->i_ino); spin_unlock(&nilfs->ns_inode_lock); return -EINVAL; } list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files); set_bit(NILFS_I_QUEUED, &ii->i_state); } spin_unlock(&nilfs->ns_inode_lock); return 0; }
/* must be called with pag_ici_lock held and releases it */ int xfs_sync_inode_valid( struct xfs_inode *ip, struct xfs_perag *pag) { struct inode *inode = VFS_I(ip); int error = EFSCORRUPTED; /* nothing to sync during shutdown */ if (XFS_FORCED_SHUTDOWN(ip->i_mount)) goto out_unlock; /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ error = ENOENT; if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM)) goto out_unlock; /* If we can't grab the inode, it must on it's way to reclaim. */ if (!igrab(inode)) goto out_unlock; if (is_bad_inode(inode)) { IRELE(ip); goto out_unlock; } /* inode is valid */ error = 0; out_unlock: read_unlock(&pag->pag_ici_lock); return error; }
static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres) { struct inode *inode; inode = user_dlm_inode_from_user_lockres(lockres); if (!igrab(inode)) BUG(); }
int dm_path_to_fshdl( char *path, /* any path name */ void *hanp, /* user's data buffer */ size_t *hlenp) /* set to size of data copied */ { /* REFERENCED */ dm_fsreg_t *fsrp; xfs_handle_t handle; vnode_t *vp; size_t hlen; int error; int lc; /* lock cookie */ struct nameidata nd; struct inode *inode; size_t len; char *name; /* XXX get things straightened out so getname() works here? */ len = strnlen_user(path, 2000); name = kmem_alloc(len, KM_SLEEP); if (copy_from_user(name, path, len)) return(EFAULT); error = 0; if (path_init(name, LOOKUP_POSITIVE|LOOKUP_FOLLOW, &nd)) error = path_walk(name, &nd); if (error) return error; kmem_free(name, len); ASSERT(nd.dentry); ASSERT(nd.dentry->d_inode); inode = igrab(nd.dentry->d_inode); path_release(&nd); /* we need the vnode */ vp = LINVFS_GET_VP(inode); if (!vp || !vp->v_vfsp->vfs_altfsid) { /* we're not in XFS anymore, Toto */ iput(inode); return EINVAL; } error = dm_vp_to_handle(vp, &handle); iput(inode);/*was VN_RELE(vp);*/ if (error) return(error); if ((fsrp = dm_find_fsreg_and_lock((fsid_t*)&handle.ha_fsid, &lc)) == NULL) return(EBADF); mutex_spinunlock(&fsrp->fr_lock, lc); hlen = FSHSIZE; if(copy_to_user(hanp, &handle, (int)hlen)) return(EFAULT); return(put_user(hlen,hlenp)); }
/* * Insert a write request into an inode */ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req) { struct nfs_inode *nfsi = NFS_I(inode); int error; error = radix_tree_preload(GFP_NOFS); if (error != 0) goto out; /* Lock the request! */ nfs_lock_request_dontget(req); spin_lock(&inode->i_lock); error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req); BUG_ON(error); if (!nfsi->npages) { igrab(inode); if (nfs_have_delegation(inode, FMODE_WRITE)) nfsi->change_attr++; } SetPagePrivate(req->wb_page); set_page_private(req->wb_page, (unsigned long)req); nfsi->npages++; kref_get(&req->wb_kref); radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED); spin_unlock(&inode->i_lock); radix_tree_preload_end(); out: return error; }
/* * Look up an entry in a directory. * * NOTE: '.' and '..' are handled as special cases because * no directory entries are actually stored for them. If this is * the root of a filesystem, then '.zfs' is also treated as a * special pseudo-directory. */ int zfs_dirlook(znode_t *dzp, char *name, struct inode **ipp, int flags, int *deflg, pathname_t *rpnp) { zfs_dirlock_t *dl; znode_t *zp; int error = 0; uint64_t parent; if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) { *ipp = ZTOI(dzp); igrab(*ipp); } else if (name[0] == '.' && name[1] == '.' && name[2] == 0) { zfs_sb_t *zsb = ZTOZSB(dzp); /* * If we are a snapshot mounted under .zfs, return * the vp for the snapshot directory. */ if ((error = sa_lookup(dzp->z_sa_hdl, SA_ZPL_PARENT(zsb), &parent, sizeof (parent))) != 0) return (error); #ifdef HAVE_SNAPSHOT if (parent == dzp->z_id && zsb->z_parent != zsb) { error = zfsctl_root_lookup(zsb->z_parent->z_ctldir, "snapshot", ipp, NULL, 0, NULL, kcred, NULL, NULL, NULL); return (error); } #endif /* HAVE_SNAPSHOT */ rw_enter(&dzp->z_parent_lock, RW_READER); error = zfs_zget(zsb, parent, &zp); if (error == 0) *ipp = ZTOI(zp); rw_exit(&dzp->z_parent_lock); #ifdef HAVE_SNAPSHOT } else if (zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0) { *ipp = zfsctl_root(dzp); #endif /* HAVE_SNAPSHOT */ } else { int zf; zf = ZEXISTS | ZSHARED; if (flags & FIGNORECASE) zf |= ZCILOOK; error = zfs_dirent_lock(&dl, dzp, name, &zp, zf, deflg, rpnp); if (error == 0) { *ipp = ZTOI(zp); zfs_dirent_unlock(dl); dzp->z_zn_prefetch = B_TRUE; /* enable prefetching */ } rpnp = NULL; } if ((flags & FIGNORECASE) && rpnp && !error) (void) strlcpy(rpnp->pn_buf, name, rpnp->pn_bufsize); return (error); }
static struct inode *dlmfs_get_inode(struct inode *parent, struct dentry *dentry, int mode) { struct super_block *sb = parent->i_sb; struct inode * inode = new_inode(sb); struct dlmfs_inode_private *ip; if (!inode) return NULL; inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; ip = DLMFS_I(inode); ip->ip_conn = DLMFS_I(parent)->ip_conn; switch (mode & S_IFMT) { default: /* for now we don't support anything other than * directories and regular files. */ BUG(); break; case S_IFREG: inode->i_op = &dlmfs_file_inode_operations; inode->i_fop = &dlmfs_file_operations; i_size_write(inode, DLM_LVB_LEN); user_dlm_lock_res_init(&ip->ip_lockres, dentry); /* released at clear_inode time, this insures that we * get to drop the dlm reference on each lock *before* * we call the unregister code for releasing parent * directories. */ ip->ip_parent = igrab(parent); BUG_ON(!ip->ip_parent); break; case S_IFDIR: inode->i_op = &dlmfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == * 2 (for "." entry) */ inc_nlink(inode); break; } if (parent->i_mode & S_ISGID) { inode->i_gid = parent->i_gid; if (S_ISDIR(mode)) inode->i_mode |= S_ISGID; } return inode; }
/* * Given a root znode, retrieve the associated .zfs directory. * Add a hold to the vnode and return it. */ struct inode * zfsctl_root(znode_t *zp) { ASSERT(zfs_has_ctldir(zp)); igrab(ZTOZSB(zp)->z_ctldir); return (ZTOZSB(zp)->z_ctldir); }
static int zpl_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { cred_t *cr = CRED(); struct inode *ip = old_dentry->d_inode; int error; if (ip->i_nlink >= ZFS_LINK_MAX) return -EMLINK; crhold(cr); ip->i_ctime = CURRENT_TIME_SEC; igrab(ip); /* Use ihold() if available */ error = -zfs_link(dir, ip, dname(dentry), cr); if (error) { iput(ip); goto out; } d_instantiate(dentry, ip); out: crfree(cr); ASSERT3S(error, <=, 0); return (error); }
int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty) { struct nilfs_inode_info *ii = NILFS_I(inode); struct the_nilfs *nilfs = inode->i_sb->s_fs_info; atomic_add(nr_dirty, &nilfs->ns_ndirtyblks); if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) return 0; spin_lock(&nilfs->ns_inode_lock); if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && !test_bit(NILFS_I_BUSY, &ii->i_state)) { /* Because this routine may race with nilfs_dispose_list(), we have to check NILFS_I_QUEUED here, too. */ if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { /* This will happen when somebody is freeing this inode. */ nilfs_warning(inode->i_sb, __func__, "cannot get inode (ino=%lu)\n", inode->i_ino); spin_unlock(&nilfs->ns_inode_lock); return -EINVAL; /* NILFS_I_DIRTY may remain for freeing inode */ } list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files); set_bit(NILFS_I_QUEUED, &ii->i_state); } spin_unlock(&nilfs->ns_inode_lock); return 0; }
static int zpl_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { cred_t *cr = CRED(); struct inode *ip = old_dentry->d_inode; int error; fstrans_cookie_t cookie; if (ip->i_nlink >= ZFS_LINK_MAX) return (-EMLINK); crhold(cr); ip->i_ctime = CURRENT_TIME_SEC; igrab(ip); /* Use ihold() if available */ cookie = spl_fstrans_mark(); error = -zfs_link(dir, ip, dname(dentry), cr); if (error) { VN_RELE(ip); goto out; } d_instantiate(dentry, ip); out: spl_fstrans_unmark(cookie); crfree(cr); ASSERT3S(error, <=, 0); return (error); }
/** * write_page_by_ent - ask entd thread to flush this page as part of slum * @page: page to be written * @wbc: writeback control passed to reiser4_writepage * * Creates a request, puts it on entd list of requests, wakeups entd if * necessary, waits until entd completes with the request. */ int write_page_by_ent(struct page *page, struct writeback_control *wbc) { struct super_block *sb; struct inode *inode; entd_context *ent; struct wbq rq; assert("", PageLocked(page)); assert("", page->mapping != NULL); sb = page->mapping->host->i_sb; ent = get_entd_context(sb); assert("", ent && ent->done == 0); /* * we are going to unlock page and ask ent thread to write the * page. Re-dirty page before unlocking so that if ent thread fails to * write it - it will remain dirty */ set_page_dirty_notag(page); /* * pin inode in memory, unlock page, entd_flush will iput. We can not * iput here becasue we can not allow delete_inode to be called here */ inode = igrab(page->mapping->host); unlock_page(page); if (inode == NULL) /* inode is getting freed */ return 0; /* init wbq */ INIT_LIST_HEAD(&rq.link); rq.magic = WBQ_MAGIC; rq.wbc = wbc; rq.page = page; rq.mapping = inode->i_mapping; rq.node = NULL; rq.written = 0; init_completion(&rq.completion); /* add request to entd's list of writepage requests */ spin_lock(&ent->guard); ent->nr_todo_reqs++; list_add_tail(&rq.link, &ent->todo_list); if (ent->nr_todo_reqs == 1) wake_up_process(ent->tsk); spin_unlock(&ent->guard); /* wait until entd finishes */ wait_for_completion(&rq.completion); if (rq.written) /* Eventually ENTD has written the page to disk. */ return 0; return 0; }
struct inode *unionfs_igrab(struct inode *inode, int line, char *file) { atomic_inc(&unionfs_iget_counter); if (inode) atomic_inc(&unionfs_igets_outstanding); printk("IR:%d:%d:%d:%p:%d:%s\n", atomic_read(&unionfs_iget_counter), atomic_read(&unionfs_igets_outstanding), inode ? atomic_read(&inode->i_count) : 0, inode, line, file); return igrab(inode); }
static long ceph_ioctl_set_layout(struct file *file, void __user *arg) { struct inode *inode = file->f_dentry->d_inode; struct inode *parent_inode = file->f_dentry->d_parent->d_inode; struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_mds_request *req; struct ceph_ioctl_layout l; int err, i; /* copy and validate */ if (copy_from_user(&l, arg, sizeof(l))) return -EFAULT; if ((l.object_size & ~PAGE_MASK) || (l.stripe_unit & ~PAGE_MASK) || !l.stripe_unit || (l.object_size && (unsigned)l.object_size % (unsigned)l.stripe_unit)) return -EINVAL; /* make sure it's a valid data pool */ if (l.data_pool > 0) { mutex_lock(&mdsc->mutex); err = -EINVAL; for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++) if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) { err = 0; break; } mutex_unlock(&mdsc->mutex); if (err) return err; } req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT, USE_AUTH_MDS); if (IS_ERR(req)) return PTR_ERR(req); req->r_inode = igrab(inode); req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL; req->r_args.setlayout.layout.fl_stripe_unit = cpu_to_le32(l.stripe_unit); req->r_args.setlayout.layout.fl_stripe_count = cpu_to_le32(l.stripe_count); req->r_args.setlayout.layout.fl_object_size = cpu_to_le32(l.object_size); req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool); req->r_args.setlayout.layout.fl_pg_preferred = cpu_to_le32(l.preferred_osd); err = ceph_mdsc_do_request(mdsc, parent_inode, req); ceph_mdsc_put_request(req); return err; }
/* set lower inode ptr and update bstart & bend if necessary */ static void __set_inode(struct dentry *upper, struct dentry *lower, int bindex) { unionfs_set_lower_inode_idx(upper->d_inode, bindex, igrab(lower->d_inode)); if (likely(ibstart(upper->d_inode) > bindex)) ibstart(upper->d_inode) = bindex; if (likely(ibend(upper->d_inode) < bindex)) ibend(upper->d_inode) = bindex; }
int filter_log_sz_change(struct llog_handle *cathandle, struct ll_fid *mds_fid, __u32 ioepoch, struct llog_cookie *logcookie, struct inode *inode) { struct llog_size_change_rec *lsc; int rc; struct ost_filterdata *ofd; ENTRY; LOCK_INODE_MUTEX(inode); ofd = INODE_PRIVATE_DATA(inode); if (ofd && ofd->ofd_epoch >= ioepoch) { if (ofd->ofd_epoch > ioepoch) CERROR("client sent old epoch %d for obj ino %ld\n", ioepoch, inode->i_ino); UNLOCK_INODE_MUTEX(inode); RETURN(0); } if (ofd && ofd->ofd_epoch < ioepoch) { ofd->ofd_epoch = ioepoch; } else if (!ofd) { OBD_ALLOC(ofd, sizeof(*ofd)); if (!ofd) GOTO(out, rc = -ENOMEM); igrab(inode); INODE_PRIVATE_DATA(inode) = ofd; ofd->ofd_epoch = ioepoch; } /* the decision to write a record is now made, unlock */ UNLOCK_INODE_MUTEX(inode); OBD_ALLOC(lsc, sizeof(*lsc)); if (lsc == NULL) RETURN(-ENOMEM); lsc->lsc_hdr.lrh_len = lsc->lsc_tail.lrt_len = sizeof(*lsc); lsc->lsc_hdr.lrh_type = OST_SZ_REC; lsc->lsc_fid = *mds_fid; lsc->lsc_ioepoch = ioepoch; rc = llog_cat_add_rec(cathandle, &lsc->lsc_hdr, logcookie, NULL); OBD_FREE(lsc, sizeof(*lsc)); if (rc > 0) { LASSERT(rc == sizeof(*logcookie)); rc = 0; } out: RETURN(rc); }
/* * convert connectable fh to dentry */ static struct dentry *__cfh_to_dentry(struct super_block *sb, struct ceph_nfs_confh *cfh) { struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc; struct inode *inode; struct dentry *dentry; struct ceph_vino vino; int err; dout("__cfh_to_dentry %llx (%llx/%x)\n", cfh->ino, cfh->parent_ino, cfh->parent_name_hash); vino.ino = cfh->ino; vino.snap = CEPH_NOSNAP; inode = ceph_find_inode(sb, vino); if (!inode) { struct ceph_mds_request *req; req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPHASH, USE_ANY_MDS); if (IS_ERR(req)) return ERR_CAST(req); req->r_ino1 = vino; req->r_ino2.ino = cfh->parent_ino; req->r_ino2.snap = CEPH_NOSNAP; req->r_path2 = kmalloc(16, GFP_NOFS); snprintf(req->r_path2, 16, "%d", cfh->parent_name_hash); req->r_num_caps = 1; err = ceph_mdsc_do_request(mdsc, NULL, req); inode = req->r_target_inode; if (inode) igrab(inode); ceph_mdsc_put_request(req); if (!inode) return ERR_PTR(err ? err : -ESTALE); } dentry = d_obtain_alias(inode); if (IS_ERR(dentry)) { pr_err("cfh_to_dentry %llx -- inode %p but ENOMEM\n", cfh->ino, inode); iput(inode); return dentry; } err = ceph_init_dentry(dentry); if (err < 0) { iput(inode); return ERR_PTR(err); } dout("__cfh_to_dentry %llx %p dentry %p\n", cfh->ino, inode, dentry); return dentry; }
/* * Write an mmapped page to the server. */ int nfs_writepage(struct page *page) { struct inode *inode = page->mapping->host; unsigned long end_index; unsigned offset = PAGE_CACHE_SIZE; int inode_referenced = 0; int err; /* * Note: We need to ensure that we have a reference to the inode * if we are to do asynchronous writes. If not, waiting * in nfs_wait_on_request() may deadlock with clear_inode(). * * If igrab() fails here, then it is in any case safe to * call nfs_wb_page(), since there will be no pending writes. */ if (igrab(inode) != 0) inode_referenced = 1; end_index = inode->i_size >> PAGE_CACHE_SHIFT; /* Ensure we've flushed out any previous writes */ nfs_wb_page(inode,page); /* easy case */ if (page->index < end_index) goto do_it; /* things got complicated... */ offset = inode->i_size & (PAGE_CACHE_SIZE-1); /* OK, are we completely out? */ err = -EIO; if (page->index >= end_index+1 || !offset) goto out; do_it: lock_kernel(); if (NFS_SERVER(inode)->wsize >= PAGE_CACHE_SIZE && !IS_SYNC(inode) && inode_referenced) { err = nfs_writepage_async(NULL, inode, page, 0, offset); if (err >= 0) err = 0; } else { err = nfs_writepage_sync(NULL, inode, page, 0, offset); if (err == offset) err = 0; } unlock_kernel(); out: UnlockPage(page); if (inode_referenced) iput(inode); return err; }
struct sysfs_dirent *sysfs_create_shadow_dir(struct kobject *kobj) { struct sysfs_dirent *parent_sd = kobj->sd->s_parent; struct dentry *dir, *parent, *shadow; struct inode *inode; struct sysfs_dirent *sd; struct sysfs_addrm_cxt acxt; dir = sysfs_get_dentry(kobj->sd); if (IS_ERR(dir)) { sd = (void *)dir; goto out; } parent = dir->d_parent; inode = dir->d_inode; sd = ERR_PTR(-EINVAL); if (!sysfs_is_shadowed_inode(inode)) goto out_dput; shadow = d_alloc(parent, &dir->d_name); if (!shadow) goto nomem; sd = sysfs_new_dirent("_SHADOW_", inode->i_mode, SYSFS_DIR); if (!sd) goto nomem; sd->s_elem.dir.kobj = kobj; sysfs_addrm_start(&acxt, parent_sd); /* add but don't link into children list */ sysfs_add_one(&acxt, sd); /* attach and instantiate dentry */ sysfs_attach_dentry(sd, shadow); d_instantiate(shadow, igrab(inode)); inc_nlink(inode); /* tj: synchronization? */ sysfs_addrm_finish(&acxt); dget(shadow); /* Extra count - pin the dentry in core */ goto out_dput; nomem: dput(shadow); sd = ERR_PTR(-ENOMEM); out_dput: dput(dir); out: return sd; }
/* * When called with struct file pointer set to NULL, there is no way we could * update file->private_data, but getting it stuck on openFileList provides a * way to access it from cifs_fill_filedata and thereby set file->private_data * from cifs_open. */ struct cifsFileInfo * cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, struct file *file, struct vfsmount *mnt, unsigned int oflags) { int oplock = 0; struct cifsFileInfo *pCifsFile; struct cifsInodeInfo *pCifsInode; struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb); pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); if (pCifsFile == NULL) return pCifsFile; if (oplockEnabled) oplock = REQ_OPLOCK; pCifsFile->netfid = fileHandle; pCifsFile->pid = current->tgid; pCifsFile->pInode = igrab(newinode); pCifsFile->mnt = mnt; pCifsFile->pfile = file; pCifsFile->invalidHandle = false; pCifsFile->closePend = false; mutex_init(&pCifsFile->fh_mutex); mutex_init(&pCifsFile->lock_mutex); INIT_LIST_HEAD(&pCifsFile->llist); atomic_set(&pCifsFile->count, 1); slow_work_init(&pCifsFile->oplock_break, &cifs_oplock_break_ops); write_lock(&GlobalSMBSeslock); list_add(&pCifsFile->tlist, &cifs_sb->tcon->openFileList); pCifsInode = CIFS_I(newinode); if (pCifsInode) { /* if readable file instance put first in list*/ if (oflags & FMODE_READ) list_add(&pCifsFile->flist, &pCifsInode->openFileList); else list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList); if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { pCifsInode->clientCanCacheAll = true; pCifsInode->clientCanCacheRead = true; cFYI(1, "Exclusive Oplock inode %p", newinode); } else if ((oplock & 0xF) == OPLOCK_READ) pCifsInode->clientCanCacheRead = true; } write_unlock(&GlobalSMBSeslock); file->private_data = pCifsFile; return pCifsFile; }
/* * Add a reference to a referenced vnode. */ bhv_vnode_t * vn_hold( bhv_vnode_t *vp) { struct inode *inode; XFS_STATS_INC(vn_hold); inode = igrab(vn_to_inode(vp)); ASSERT(inode); return vp; }
static void unionfs_fill_inode(struct dentry *dentry, struct inode *inode) { struct inode *lower_inode; struct dentry *lower_dentry; int bindex, bstart, bend; bstart = dbstart(dentry); bend = dbend(dentry); for (bindex = bstart; bindex <= bend; bindex++) { lower_dentry = unionfs_lower_dentry_idx(dentry, bindex); if (!lower_dentry) { unionfs_set_lower_inode_idx(inode, bindex, NULL); continue; } /* Initialize the lower inode to the new lower inode. */ if (!lower_dentry->d_inode) continue; unionfs_set_lower_inode_idx(inode, bindex, igrab(lower_dentry->d_inode)); } ibstart(inode) = dbstart(dentry); ibend(inode) = dbend(dentry); /* Use attributes from the first branch. */ lower_inode = unionfs_lower_inode(inode); /* Use different set of inode ops for symlinks & directories */ if (S_ISLNK(lower_inode->i_mode)) inode->i_op = &unionfs_symlink_iops; else if (S_ISDIR(lower_inode->i_mode)) inode->i_op = &unionfs_dir_iops; /* Use different set of file ops for directories */ if (S_ISDIR(lower_inode->i_mode)) inode->i_fop = &unionfs_dir_fops; /* properly initialize special inodes */ if (S_ISBLK(lower_inode->i_mode) || S_ISCHR(lower_inode->i_mode) || S_ISFIFO(lower_inode->i_mode) || S_ISSOCK(lower_inode->i_mode)) init_special_inode(inode, lower_inode->i_mode, lower_inode->i_rdev); /* all well, copy inode attributes */ unionfs_copy_attr_all(inode, lower_inode); fsstack_copy_inode_size(inode, lower_inode); }
static int get_parent_ino(struct inode *inode, nid_t *pino) { struct dentry *dentry; inode = igrab(inode); /* Alex - the following is equivalent to: dentry = d_find_any_alias(inode); */ dentry = NULL; spin_lock(&inode->i_lock); if (!list_empty(&inode->i_dentry)) { dentry = list_first_entry(&inode->i_dentry, struct dentry, d_alias); dget(dentry); }
/* * If we're not journaling and this is a just-created file, we have to * sync our parent directory (if it was freshly created) since * otherwise it will only be written by writeback, leaving a huge * window during which a crash may lose the file. This may apply for * the parent directory's parent as well, and so on recursively, if * they are also freshly created. */ static int ext4_sync_parent(struct inode *inode) { struct dentry *dentry = NULL; struct inode *next; int ret = 0; if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) return 0; inode = igrab(inode); while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) { ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY); dentry = d_find_any_alias(inode); if (!dentry) break; next = igrab(d_inode(dentry->d_parent)); dput(dentry); if (!next) break; iput(inode); inode = next; /* * The directory inode may have gone through rmdir by now. But * the inode itself and its blocks are still allocated (we hold * a reference to the inode so it didn't go through * ext4_evict_inode()) and so we are safe to flush metadata * blocks and the inode. */ ret = sync_mapping_buffers(inode->i_mapping); if (ret) break; ret = sync_inode_metadata(inode, 1); if (ret) break; } iput(inode); return ret; }
/* * Add a reference to a referenced vnode. */ struct vnode * vn_hold( struct vnode *vp) { struct inode *inode; XFS_STATS_INC(vn_hold); VN_LOCK(vp); inode = igrab(LINVFS_GET_IP(vp)); ASSERT(inode); VN_UNLOCK(vp, 0); return vp; }