static void loadpkgdata(char *file, char *pkg, char *data, int len) { char *p, *ep, *prefix, *name, *def; Import *x; file = strdup(file); p = data; ep = data + len; while(parsepkgdata(file, pkg, &p, ep, &prefix, &name, &def) > 0) { x = ilookup(name); if(x->prefix == nil) { x->prefix = prefix; x->def = def; x->file = file; } else if(strcmp(x->prefix, prefix) != 0) { fprint(2, "%s: conflicting definitions for %s\n", argv0, name); fprint(2, "%s:\t%s %s ...\n", x->file, x->prefix, name); fprint(2, "%s:\t%s %s ...\n", file, prefix, name); nerrors++; } else if(strcmp(x->def, def) != 0) { fprint(2, "%s: conflicting definitions for %s\n", argv0, name); fprint(2, "%s:\t%s %s %s\n", x->file, x->prefix, name, x->def); fprint(2, "%s:\t%s %s %s\n", file, prefix, name, def); nerrors++; } } }
/* drop all shared dentries from other superblocks */ void sdcardfs_drop_sb_icache(struct super_block *sb, unsigned long ino) { struct inode *inode = ilookup(sb, ino); struct dentry *dentry, *dir_dentry; if (!inode) return; dentry = d_find_any_alias(inode); if (!dentry) { iput(inode); return; } dir_dentry = lock_parent(dentry); mutex_lock(&inode->i_mutex); set_nlink(inode, sdcardfs_lower_inode(inode)->i_nlink); d_drop(dentry); dont_mount(dentry); mutex_unlock(&inode->i_mutex); /* We don't d_delete() NFS sillyrenamed files--they still exist. */ if (!(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { fsnotify_link_count(inode); d_delete(dentry); } unlock_dir(dir_dentry); dput(dentry); iput(inode); }
static int au_ibusy(struct super_block *sb, struct aufs_ibusy __user *arg) { int err; aufs_bindex_t bstart, bend; struct aufs_ibusy ibusy; struct inode *inode, *h_inode; err = -EPERM; if (unlikely(!capable(CAP_SYS_ADMIN))) goto out; err = copy_from_user(&ibusy, arg, sizeof(ibusy)); if (!err) err = !access_ok(VERIFY_WRITE, &arg->h_ino, sizeof(arg->h_ino)); if (unlikely(err)) { err = -EFAULT; AuTraceErr(err); goto out; } err = -EINVAL; si_read_lock(sb, AuLock_FLUSH); if (unlikely(ibusy.bindex < 0 || ibusy.bindex > au_sbend(sb))) goto out_unlock; err = 0; ibusy.h_ino = 0; /* invalid */ inode = ilookup(sb, ibusy.ino); if (!inode || inode->i_ino == AUFS_ROOT_INO || is_bad_inode(inode)) goto out_unlock; ii_read_lock_child(inode); bstart = au_ibstart(inode); bend = au_ibend(inode); if (bstart <= ibusy.bindex && ibusy.bindex <= bend) { h_inode = au_h_iptr(inode, ibusy.bindex); if (h_inode && au_test_ibusy(inode, bstart, bend)) ibusy.h_ino = h_inode->i_ino; } ii_read_unlock(inode); iput(inode); out_unlock: si_read_unlock(sb); if (!err) { err = __put_user(ibusy.h_ino, &arg->h_ino); if (unlikely(err)) { err = -EFAULT; AuTraceErr(err); } } out: return err; }
/* * Lookup the inode with given id, it will be allocated if needed. */ static struct inode * zfsctl_inode_lookup(zfs_sb_t *zsb, uint64_t id, const struct file_operations *fops, const struct inode_operations *ops) { struct inode *ip = NULL; while (ip == NULL) { ip = ilookup(zsb->z_sb, (unsigned long)id); if (ip) break; /* May fail due to concurrent zfsctl_inode_alloc() */ ip = zfsctl_inode_alloc(zsb, id, fops, ops); } return (ip); }
static struct dentry *decode_by_ino(struct super_block *sb, ino_t ino, ino_t dir_ino) { struct dentry *dentry, *d; struct inode *inode; au_gen_t sigen; LKTRTrace("i%lu, diri%lu\n", (unsigned long)ino, (unsigned long)dir_ino); dentry = NULL; inode = ilookup(sb, ino); if (!inode) goto out; dentry = ERR_PTR(-ESTALE); sigen = au_sigen(sb); if (unlikely(is_bad_inode(inode) || IS_DEADDIR(inode) || sigen != au_iigen(inode))) goto out_iput; dentry = NULL; if (!dir_ino || S_ISDIR(inode->i_mode)) dentry = d_find_alias(inode); else { spin_lock(&dcache_lock); list_for_each_entry(d, &inode->i_dentry, d_alias) if (!au_test_anon(d) && d->d_parent->d_inode->i_ino == dir_ino) { dentry = dget_locked(d); break; } spin_unlock(&dcache_lock); } if (unlikely(dentry && sigen != au_digen(dentry))) { dput(dentry); dentry = ERR_PTR(-ESTALE); } out_iput: iput(inode); out: AuTraceErrPtr(dentry); return dentry; }
void svfs_backing_store_write_dirty(struct svfs_super_block *ssb) { int i = 0; struct backing_store_entry *bse = ssb->bse; struct inode *inode; for (; i < ssb->bs_size; i++, bse++) { if (bse->state & SVFS_BS_DIRTY) { inode = ilookup(ssb->sb, i); if (inode) { /* this is the valid inode, do the data commit */ svfs_backing_store_commit_bse(inode); } else continue; iput(inode); } } }
static struct dentry * zfsctl_lookup(struct inode *dir,struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; if (dentry->d_name.len >= MAXNAMELEN) { return ERR_PTR(-ENAMETOOLONG); } if (strcmp(dentry->d_name.name, ZFS_SNAPDIR_NAME) == 0) { inode = ilookup(dir->i_sb, LZFS_ZFSCTL_INO_SNAPDIR); if(!inode) { return NULL; } return d_splice_alias(inode, dentry); } else { return d_splice_alias(NULL, dentry); } }
static int sf_remount_fs(struct super_block *sb, int *flags, char *data) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 23) struct sf_glob_info *sf_g; struct vbsf_mount_info_new *info; struct sf_inode_info *sf_i; struct inode *iroot; SHFLFSOBJINFO fsinfo; int err; printk(KERN_DEBUG "ENTER: sf_remount_fs\n"); sf_g = GET_GLOB_INFO(sb); BUG_ON(!sf_g); BUG_ON(data[0] != 0); info = (struct vbsf_mount_info_new *)data; BUG_ON( info->signature[0] != VBSF_MOUNT_SIGNATURE_BYTE_0 || info->signature[1] != VBSF_MOUNT_SIGNATURE_BYTE_1 || info->signature[2] != VBSF_MOUNT_SIGNATURE_BYTE_2); sf_g->uid = info->uid; sf_g->gid = info->gid; sf_g->ttl = info->ttl; sf_g->dmode = info->dmode; sf_g->fmode = info->fmode; sf_g->dmask = info->dmask; sf_g->fmask = info->fmask; iroot = ilookup(sb, 0); if (!iroot) { printk(KERN_DEBUG "can't find root inode\n"); return -ENOSYS; } sf_i = GET_INODE_INFO(iroot); err = sf_stat(__func__, sf_g, sf_i->path, &fsinfo, 0); BUG_ON(err != 0); sf_init_inode(sf_g, iroot, &fsinfo); /*unlock_new_inode(iroot);*/ printk(KERN_DEBUG "LEAVE: sf_remount_fs\n"); return 0; #else return -ENOSYS; #endif }
static int sf_remount_fs(struct super_block *sb, int *flags, char *data) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 23) struct sf_glob_info *sf_g; struct sf_inode_info *sf_i; struct inode *iroot; SHFLFSOBJINFO fsinfo; int err; sf_g = GET_GLOB_INFO(sb); BUG_ON(!sf_g); if (data && data[0] != 0) { struct vbsf_mount_info_new *info = (struct vbsf_mount_info_new *)data; if ( info->signature[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 && info->signature[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 && info->signature[2] == VBSF_MOUNT_SIGNATURE_BYTE_2) { sf_g->uid = info->uid; sf_g->gid = info->gid; sf_g->ttl = info->ttl; sf_g->dmode = info->dmode; sf_g->fmode = info->fmode; sf_g->dmask = info->dmask; sf_g->fmask = info->fmask; } } iroot = ilookup(sb, 0); if (!iroot) return -ENOSYS; sf_i = GET_INODE_INFO(iroot); err = sf_stat(__func__, sf_g, sf_i->path, &fsinfo, 0); BUG_ON(err != 0); sf_init_inode(sf_g, iroot, &fsinfo); /*unlock_new_inode(iroot);*/ return 0; #else return -ENOSYS; #endif }
/* * Get a reference on a vnode. */ vnode_t * vn_get( struct vnode *vp, vmap_t *vmap) { struct inode *inode; XFS_STATS_INC(vn_get); inode = LINVFS_GET_IP(vp); if (inode->i_state & I_FREEING) return NULL; inode = ilookup(vmap->v_vfsp->vfs_super, vmap->v_ino); if (!inode) /* Inode not present */ return NULL; vn_trace_exit(vp, "vn_get", (inst_t *)__return_address); return vp; }
/** * sysfs_drop_dentry - drop dentry for the specified sysfs_dirent * @sd: target sysfs_dirent * * Drop dentry for @sd. @sd must have been unlinked from its * parent on entry to this function such that it can't be looked * up anymore. * * @sd->s_dentry which is protected with sysfs_assoc_lock points * to the currently associated dentry but we're not holding a * reference to it and racing with dput(). Grab dcache_lock and * verify dentry before dropping it. If @sd->s_dentry is NULL or * dput() beats us, no need to bother. */ static void sysfs_drop_dentry(struct sysfs_dirent *sd) { struct dentry *dentry = NULL; struct inode *inode; /* We're not holding a reference to ->s_dentry dentry but the * field will stay valid as long as sysfs_assoc_lock is held. */ spin_lock(&sysfs_assoc_lock); spin_lock(&dcache_lock); /* drop dentry if it's there and dput() didn't kill it yet */ if (sd->s_dentry && sd->s_dentry->d_inode) { dentry = dget_locked(sd->s_dentry); spin_lock(&dentry->d_lock); __d_drop(dentry); spin_unlock(&dentry->d_lock); } spin_unlock(&dcache_lock); spin_unlock(&sysfs_assoc_lock); /* dentries for shadowed inodes are pinned, unpin */ if (dentry && sysfs_is_shadowed_inode(dentry->d_inode)) dput(dentry); dput(dentry); /* adjust nlink and update timestamp */ inode = ilookup(sysfs_sb, sd->s_ino); if (inode) { mutex_lock(&inode->i_mutex); inode->i_ctime = CURRENT_TIME; drop_nlink(inode); if (sysfs_type(sd) == SYSFS_DIR) drop_nlink(inode); mutex_unlock(&inode->i_mutex); iput(inode); } }
struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c, int inum, int nlink) { struct inode *inode; struct jffs2_inode_cache *ic; if (!nlink) { /* The inode has zero nlink but its nodes weren't yet marked obsolete. This has to be because we're still waiting for the final (close() and) iput() to happen. There's a possibility that the final iput() could have happened while we were contemplating. In order to ensure that we don't cause a new read_inode() (which would fail) for the inode in question, we use ilookup() in this case instead of iget(). The nlink can't _become_ zero at this point because we're holding the alloc_sem, and jffs2_do_unlink() would also need that while decrementing nlink on any inode. */ inode = ilookup(OFNI_BS_2SFFJ(c), inum); if (!inode) { D1(printk(KERN_DEBUG "ilookup() failed for ino #%u; inode is probably deleted.\n", inum)); spin_lock(&c->inocache_lock); ic = jffs2_get_ino_cache(c, inum); if (!ic) { D1(printk(KERN_DEBUG "Inode cache for ino #%u is gone.\n", inum)); spin_unlock(&c->inocache_lock); return NULL; } if (ic->state != INO_STATE_CHECKEDABSENT) { /* Wait for progress. Don't just loop */ D1(printk(KERN_DEBUG "Waiting for ino #%u in state %d\n", ic->ino, ic->state)); sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock); } else { spin_unlock(&c->inocache_lock); } return NULL; } } else { /* Inode has links to it still; they're not going away because jffs2_do_unlink() would need the alloc_sem and we have it. Just iget() it, and if read_inode() is necessary that's OK. */ inode = iget(OFNI_BS_2SFFJ(c), inum); if (!inode) return ERR_PTR(-ENOMEM); } if (is_bad_inode(inode)) { printk(KERN_NOTICE "Eep. read_inode() failed for ino #%u. nlink %d\n", inum, nlink); /* NB. This will happen again. We need to do something appropriate here. */ iput(inode); return ERR_PTR(-EIO); } return JFFS2_INODE_INFO(inode); }
int zfs_zget(zfs_sb_t *zsb, uint64_t obj_num, znode_t **zpp) { dmu_object_info_t doi; dmu_buf_t *db; znode_t *zp; int err; sa_handle_t *hdl; struct inode *ip; *zpp = NULL; again: ip = ilookup(zsb->z_sb, obj_num); ZFS_OBJ_HOLD_ENTER(zsb, obj_num); err = sa_buf_hold(zsb->z_os, obj_num, NULL, &db); if (err) { ZFS_OBJ_HOLD_EXIT(zsb, obj_num); iput(ip); return (err); } dmu_object_info_from_db(db, &doi); if (doi.doi_bonus_type != DMU_OT_SA && (doi.doi_bonus_type != DMU_OT_ZNODE || (doi.doi_bonus_type == DMU_OT_ZNODE && doi.doi_bonus_size < sizeof (znode_phys_t)))) { sa_buf_rele(db, NULL); ZFS_OBJ_HOLD_EXIT(zsb, obj_num); iput(ip); return (SET_ERROR(EINVAL)); } hdl = dmu_buf_get_user(db); if (hdl != NULL) { if (ip == NULL) { /* * ilookup returned NULL, which means * the znode is dying - but the SA handle isn't * quite dead yet, we need to drop any locks * we're holding, re-schedule the task and try again. */ sa_buf_rele(db, NULL); ZFS_OBJ_HOLD_EXIT(zsb, obj_num); schedule(); goto again; } zp = sa_get_userdata(hdl); /* * Since "SA" does immediate eviction we * should never find a sa handle that doesn't * know about the znode. */ ASSERT3P(zp, !=, NULL); mutex_enter(&zp->z_lock); ASSERT3U(zp->z_id, ==, obj_num); if (zp->z_unlinked) { err = SET_ERROR(ENOENT); } else { igrab(ZTOI(zp)); *zpp = zp; err = 0; } sa_buf_rele(db, NULL); mutex_exit(&zp->z_lock); ZFS_OBJ_HOLD_EXIT(zsb, obj_num); iput(ip); return (err); } ASSERT3P(ip, ==, NULL); /* * Not found create new znode/vnode but only if file exists. * * There is a small window where zfs_vget() could * find this object while a file create is still in * progress. This is checked for in zfs_znode_alloc() * * if zfs_znode_alloc() fails it will drop the hold on the * bonus buffer. */ zp = zfs_znode_alloc(zsb, db, doi.doi_data_block_size, doi.doi_bonus_type, obj_num, NULL, NULL); if (zp == NULL) { err = SET_ERROR(ENOENT); } else { *zpp = zp; } ZFS_OBJ_HOLD_EXIT(zsb, obj_num); return (err); }