static struct regmap *meson_map_resource(struct meson_pinctrl *pc, struct device_node *node, char *name) { struct resource res; void __iomem *base; int i; i = of_property_match_string(node, "reg-names", name); if (of_address_to_resource(node, i, &res)) return ERR_PTR(-ENOENT); base = devm_ioremap_resource(pc->dev, &res); if (IS_ERR(base)) return ERR_CAST(base); meson_regmap_config.max_register = resource_size(&res) - 4; meson_regmap_config.name = devm_kasprintf(pc->dev, GFP_KERNEL, "%s-%s", node->name, name); if (!meson_regmap_config.name) return ERR_PTR(-ENOMEM); return devm_regmap_init_mmio(pc->dev, base, &meson_regmap_config); }
static struct inode *f2fs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct f2fs_sb_info *sbi = F2FS_SB(sb); struct inode *inode; if (check_nid_range(sbi, ino)) return ERR_PTR(-ESTALE); /* * f2fs_iget isn't quite right if the inode is currently unallocated! * However f2fs_iget currently does appropriate checks to handle stale * inodes so everything is OK. */ inode = f2fs_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); if (unlikely(generation && inode->i_generation != generation)) { /* we didn't find the right inode.. */ iput(inode); return ERR_PTR(-ESTALE); } return inode; }
static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) { struct drm_vgem_gem_object *obj; int npages; obj = __vgem_gem_create(dev, attach->dmabuf->size); if (IS_ERR(obj)) return ERR_CAST(obj); npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE; obj->table = sg; obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); if (!obj->pages) { __vgem_gem_destroy(obj); return ERR_PTR(-ENOMEM); } obj->pages_pin_count++; /* perma-pinned */ drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL, npages); return &obj->base; }
static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, u32 *pacllen) { struct cifs_ntsd *pntsd = NULL; int oplock = 0; unsigned int xid; int rc, create_options = 0; __u16 fid; struct cifs_tcon *tcon; struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return ERR_CAST(tlink); tcon = tlink_tcon(tlink); xid = get_xid(); if (backup_cred(cifs_sb)) create_options |= CREATE_OPEN_BACKUP_INTENT; rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, create_options, &fid, &oplock, NULL, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (!rc) { rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen); CIFSSMBClose(xid, tcon, fid); } cifs_put_tlink(tlink); free_xid(xid); cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen); if (rc) return ERR_PTR(rc); return pntsd; }
static struct dentry * isofs_export_iget(struct super_block *sb, unsigned long block, unsigned long offset, __u32 generation) { struct inode *inode; struct dentry *result; if (block == 0) return ERR_PTR(-ESTALE); inode = isofs_iget(sb, block, offset); if (IS_ERR(inode)) return ERR_CAST(inode); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } result = d_alloc_anon(inode); if (!result) { iput(inode); return ERR_PTR(-ENOMEM); } return result; }
static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4, int tos, int oif, const xfrm_address_t *saddr, const xfrm_address_t *daddr, u32 mark) { struct rtable *rt; memset(fl4, 0, sizeof(*fl4)); fl4->daddr = daddr->a4; fl4->flowi4_tos = tos; fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif); fl4->flowi4_mark = mark; if (saddr) fl4->saddr = saddr->a4; fl4->flowi4_flags = FLOWI_FLAG_SKIP_NH_OIF; rt = __ip_route_output_key(net, fl4); if (!IS_ERR(rt)) return &rt->dst; return ERR_CAST(rt); }
static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb, u32 type, u32 mask) { struct crypto_instance *inst; struct crypto_alg *alg; alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK)); if (IS_ERR(alg)) return ERR_CAST(alg); inst = pcrypt_alloc_instance(alg); if (IS_ERR(inst)) goto out_put_alg; inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; inst->alg.cra_type = &crypto_aead_type; inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; inst->alg.cra_aead.geniv = alg->cra_aead.geniv; inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); inst->alg.cra_init = pcrypt_aead_init_tfm; inst->alg.cra_exit = pcrypt_aead_exit_tfm; inst->alg.cra_aead.setkey = pcrypt_aead_setkey; inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize; inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt; inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt; inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt; out_put_alg: crypto_mod_put(alg); return inst; }
/* Returns a dentry corresponding to a specific extended attribute file * for the inode. If flags allow, the file is created. Otherwise, a * valid or negative dentry, or an error is returned. */ static struct dentry *xattr_lookup(struct inode *inode, const char *name, int flags) { struct dentry *xadir, *xafile; int err = 0; xadir = open_xa_dir(inode, flags); if (IS_ERR(xadir)) return ERR_CAST(xadir); mutex_lock_nested(&xadir->d_inode->i_mutex, I_MUTEX_XATTR); xafile = lookup_one_len(name, xadir, strlen(name)); if (IS_ERR(xafile)) { err = PTR_ERR(xafile); goto out; } if (xafile->d_inode && (flags & XATTR_CREATE)) err = -EEXIST; if (!xafile->d_inode) { err = -ENODATA; if (xattr_may_create(flags)) err = xattr_create(xadir->d_inode, xafile, 0700|S_IFREG); } if (err) dput(xafile); out: mutex_unlock(&xadir->d_inode->i_mutex); dput(xadir); if (err) return ERR_PTR(err); return xafile; }
static void add_weight_vector(grn_ctx *ctx, grn_obj *column, grn_obj *value, grn_obj *vector) { unsigned int i, n; grn_obj weight_buffer; n = GRN_UINT32_VALUE(value); GRN_UINT32_INIT(&weight_buffer, 0); for (i = 0; i < n; i += 2) { grn_rc rc; grn_obj *key, *weight; key = value + 1 + i; weight = key + 1; GRN_BULK_REWIND(&weight_buffer); rc = grn_obj_cast(ctx, weight, &weight_buffer, GRN_TRUE); if (rc != GRN_SUCCESS) { grn_obj *range; range = grn_ctx_at(ctx, weight_buffer.header.domain); ERR_CAST(column, range, weight); grn_obj_unlink(ctx, range); break; } grn_vector_add_element(ctx, vector, GRN_BULK_HEAD(key), GRN_BULK_VSIZE(key), GRN_UINT32_VALUE(&weight_buffer), key->header.domain); } GRN_OBJ_FIN(ctx, &weight_buffer); }
static struct dentry * cifs_do_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { int rc; struct super_block *sb; sb = sget(fs_type, NULL, set_anon_super, NULL); cFYI(1, "Devname: %s flags: %d ", dev_name, flags); if (IS_ERR(sb)) return ERR_CAST(sb); sb->s_flags = flags; rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0); if (rc) { deactivate_locked_super(sb); return ERR_PTR(rc); } sb->s_flags |= MS_ACTIVE; return dget(sb->s_root); }
static struct da8xx_usb0_clk48 * da8xx_cfgchip_register_usb0_clk48(struct device *dev, struct regmap *regmap) { const char * const parent_names[] = { "usb_refclkin", "pll0_auxclk" }; struct clk *fck_clk; struct da8xx_usb0_clk48 *usb0; struct clk_init_data init; int ret; fck_clk = devm_clk_get(dev, "fck"); if (IS_ERR(fck_clk)) { if (PTR_ERR(fck_clk) != -EPROBE_DEFER) dev_err(dev, "Missing fck clock\n"); return ERR_CAST(fck_clk); } usb0 = devm_kzalloc(dev, sizeof(*usb0), GFP_KERNEL); if (!usb0) return ERR_PTR(-ENOMEM); init.name = "usb0_clk48"; init.ops = &da8xx_usb0_clk48_ops; init.parent_names = parent_names; init.num_parents = 2; usb0->hw.init = &init; usb0->fck = fck_clk; usb0->regmap = regmap; ret = devm_clk_hw_register(dev, &usb0->hw); if (ret < 0) return ERR_PTR(ret); return usb0; }
static int link_dinode(struct gfs2_inode *dip, const struct qstr *name, struct gfs2_inode *ip, struct gfs2_diradd *da) { struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_alloc_parms ap = { .target = da->nr_blocks, }; int error; if (da->nr_blocks) { error = gfs2_quota_lock_check(dip); if (error) goto fail_quota_locks; error = gfs2_inplace_reserve(dip, &ap); if (error) goto fail_quota_locks; error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(dip, da, 2), 0); if (error) goto fail_ipreserv; } else { error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0); if (error) goto fail_quota_locks; } error = gfs2_dir_add(&dip->i_inode, name, ip, da); if (error) goto fail_end_trans; fail_end_trans: gfs2_trans_end(sdp); fail_ipreserv: gfs2_inplace_release(dip); fail_quota_locks: gfs2_quota_unlock(dip); return error; } static int gfs2_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *fs_info) { const struct xattr *xattr; int err = 0; for (xattr = xattr_array; xattr->name != NULL; xattr++) { err = __gfs2_xattr_set(inode, xattr->name, xattr->value, xattr->value_len, 0, GFS2_EATYPE_SECURITY); if (err < 0) break; } return err; } static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip, const struct qstr *qstr) { return security_inode_init_security(&ip->i_inode, &dip->i_inode, qstr, &gfs2_initxattrs, NULL); } /** * gfs2_create_inode - Create a new inode * @dir: The parent directory * @dentry: The new dentry * @file: If non-NULL, the file which is being opened * @mode: The permissions on the new inode * @dev: For device nodes, this is the device number * @symname: For symlinks, this is the link destination * @size: The initial size of the inode (ignored for directories) * * Returns: 0 on success, or error code */ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, struct file *file, umode_t mode, dev_t dev, const char *symname, unsigned int size, int excl, int *opened) { const struct qstr *name = &dentry->d_name; struct posix_acl *default_acl, *acl; struct gfs2_holder ghs[2]; struct inode *inode = NULL; struct gfs2_inode *dip = GFS2_I(dir), *ip; struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); struct gfs2_glock *io_gl; struct dentry *d; int error; u32 aflags = 0; struct gfs2_diradd da = { .bh = NULL, }; if (!name->len || name->len > GFS2_FNAMESIZE) return -ENAMETOOLONG; error = gfs2_rs_alloc(dip); if (error) return error; error = gfs2_rindex_update(sdp); if (error) return error; error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); if (error) goto fail; error = create_ok(dip, name, mode); if (error) goto fail_gunlock; inode = gfs2_dir_search(dir, &dentry->d_name, !S_ISREG(mode) || excl); error = PTR_ERR(inode); if (!IS_ERR(inode)) { d = d_splice_alias(inode, dentry); error = PTR_ERR(d); if (IS_ERR(d)) { inode = ERR_CAST(d); goto fail_gunlock; } error = 0; if (file) { if (S_ISREG(inode->i_mode)) { WARN_ON(d != NULL); error = finish_open(file, dentry, gfs2_open_common, opened); } else { error = finish_no_open(file, d); } } else { dput(d); } gfs2_glock_dq_uninit(ghs); return error; } else if (error != -ENOENT) { goto fail_gunlock; } error = gfs2_diradd_alloc_required(dir, name, &da); if (error < 0) goto fail_gunlock; inode = new_inode(sdp->sd_vfs); error = -ENOMEM; if (!inode) goto fail_gunlock; error = posix_acl_create(dir, &mode, &default_acl, &acl); if (error) goto fail_free_vfs_inode; ip = GFS2_I(inode); error = gfs2_rs_alloc(ip); if (error) goto fail_free_acls; inode->i_mode = mode; set_nlink(inode, S_ISDIR(mode) ? 2 : 1); inode->i_rdev = dev; inode->i_size = size; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; gfs2_set_inode_blocks(inode, 1); munge_mode_uid_gid(dip, inode); ip->i_goal = dip->i_goal; ip->i_diskflags = 0; ip->i_eattr = 0; ip->i_height = 0; ip->i_depth = 0; ip->i_entries = 0; switch(mode & S_IFMT) { case S_IFREG: if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) || gfs2_tune_get(sdp, gt_new_files_jdata)) ip->i_diskflags |= GFS2_DIF_JDATA; gfs2_set_aops(inode); break; case S_IFDIR: ip->i_diskflags |= (dip->i_diskflags & GFS2_DIF_INHERIT_JDATA); ip->i_diskflags |= GFS2_DIF_JDATA; ip->i_entries = 2; break; } gfs2_set_inode_flags(inode); if ((GFS2_I(sdp->sd_root_dir->d_inode) == dip) || (dip->i_diskflags & GFS2_DIF_TOPDIR)) aflags |= GFS2_AF_ORLOV; error = alloc_dinode(ip, aflags); if (error) goto fail_free_inode; error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); if (error) goto fail_free_inode; ip->i_gl->gl_object = ip; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); if (error) goto fail_free_inode; error = gfs2_trans_begin(sdp, RES_DINODE, 0); if (error) goto fail_gunlock2; init_dinode(dip, ip, symname); gfs2_trans_end(sdp); error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl); if (error) goto fail_gunlock2; error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); if (error) goto fail_gunlock2; ip->i_iopen_gh.gh_gl->gl_object = ip; gfs2_glock_put(io_gl); gfs2_set_iop(inode); insert_inode_hash(inode); if (default_acl) { error = gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); posix_acl_release(default_acl); } if (acl) { if (!error) error = gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS); posix_acl_release(acl); } if (error) goto fail_gunlock3; error = gfs2_security_init(dip, ip, name); if (error) goto fail_gunlock3; error = link_dinode(dip, name, ip, &da); if (error) goto fail_gunlock3; mark_inode_dirty(inode); d_instantiate(dentry, inode); if (file) { *opened |= FILE_CREATED; error = finish_open(file, dentry, gfs2_open_common, opened); } gfs2_glock_dq_uninit(ghs); gfs2_glock_dq_uninit(ghs + 1); return error; fail_gunlock3: gfs2_glock_dq_uninit(ghs + 1); if (ip->i_gl) gfs2_glock_put(ip->i_gl); goto fail_gunlock; fail_gunlock2: gfs2_glock_dq_uninit(ghs + 1); fail_free_inode: if (ip->i_gl) gfs2_glock_put(ip->i_gl); gfs2_rs_delete(ip, NULL); fail_free_acls: if (default_acl) posix_acl_release(default_acl); if (acl) posix_acl_release(acl); fail_free_vfs_inode: free_inode_nonrcu(inode); inode = NULL; fail_gunlock: gfs2_dir_no_add(&da); gfs2_glock_dq_uninit(ghs); if (inode && !IS_ERR(inode)) { clear_nlink(inode); mark_inode_dirty(inode); set_bit(GIF_ALLOC_FAILED, &GFS2_I(inode)->i_flags); iput(inode); } fail: return error; } /** * gfs2_create - Create a file * @dir: The directory in which to create the file * @dentry: The dentry of the new file * @mode: The mode of the new file * * Returns: errno */ static int gfs2_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return gfs2_create_inode(dir, dentry, NULL, S_IFREG | mode, 0, NULL, 0, excl, NULL); } /** * __gfs2_lookup - Look up a filename in a directory and return its inode * @dir: The directory inode * @dentry: The dentry of the new inode * @file: File to be opened * @opened: atomic_open flags * * * Returns: errno */ static struct dentry *__gfs2_lookup(struct inode *dir, struct dentry *dentry, struct file *file, int *opened) { struct inode *inode; struct dentry *d; struct gfs2_holder gh; struct gfs2_glock *gl; int error; inode = gfs2_lookupi(dir, &dentry->d_name, 0); if (!inode) return NULL; if (IS_ERR(inode)) return ERR_CAST(inode); gl = GFS2_I(inode)->i_gl; error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); if (error) { iput(inode); return ERR_PTR(error); } d = d_splice_alias(inode, dentry); if (IS_ERR(d)) { gfs2_glock_dq_uninit(&gh); return d; } if (file && S_ISREG(inode->i_mode)) error = finish_open(file, dentry, gfs2_open_common, opened); gfs2_glock_dq_uninit(&gh); if (error) { dput(d); return ERR_PTR(error); } return d; } static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry, unsigned flags) { return __gfs2_lookup(dir, dentry, NULL, NULL); } /** * gfs2_link - Link to a file * @old_dentry: The inode to link * @dir: Add link to this directory * @dentry: The name of the link * * Link the inode in "old_dentry" into the directory "dir" with the * name in "dentry". * * Returns: errno */ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct gfs2_inode *dip = GFS2_I(dir); struct gfs2_sbd *sdp = GFS2_SB(dir); struct inode *inode = old_dentry->d_inode; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder ghs[2]; struct buffer_head *dibh; struct gfs2_diradd da = { .bh = NULL, }; int error; if (S_ISDIR(inode->i_mode)) return -EPERM; error = gfs2_rs_alloc(dip); if (error) return error; gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); error = gfs2_glock_nq(ghs); /* parent */ if (error) goto out_parent; error = gfs2_glock_nq(ghs + 1); /* child */ if (error) goto out_child; error = -ENOENT; if (inode->i_nlink == 0) goto out_gunlock; error = gfs2_permission(dir, MAY_WRITE | MAY_EXEC); if (error) goto out_gunlock; error = gfs2_dir_check(dir, &dentry->d_name, NULL); switch (error) { case -ENOENT: break; case 0: error = -EEXIST; default: goto out_gunlock; } error = -EINVAL; if (!dip->i_inode.i_nlink) goto out_gunlock; error = -EFBIG; if (dip->i_entries == (u32)-1) goto out_gunlock; error = -EPERM; if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) goto out_gunlock; error = -EINVAL; if (!ip->i_inode.i_nlink) goto out_gunlock; error = -EMLINK; if (ip->i_inode.i_nlink == (u32)-1) goto out_gunlock; error = gfs2_diradd_alloc_required(dir, &dentry->d_name, &da); if (error < 0) goto out_gunlock; if (da.nr_blocks) { struct gfs2_alloc_parms ap = { .target = da.nr_blocks, }; error = gfs2_quota_lock_check(dip); if (error) goto out_gunlock; error = gfs2_inplace_reserve(dip, &ap); if (error) goto out_gunlock_q; error = gfs2_trans_begin(sdp, gfs2_trans_da_blks(dip, &da, 2), 0); if (error) goto out_ipres; } else { error = gfs2_trans_begin(sdp, 2 * RES_DINODE + RES_LEAF, 0); if (error) goto out_ipres; } error = gfs2_meta_inode_buffer(ip, &dibh); if (error) goto out_end_trans; error = gfs2_dir_add(dir, &dentry->d_name, ip, &da); if (error) goto out_brelse; gfs2_trans_add_meta(ip->i_gl, dibh); inc_nlink(&ip->i_inode); ip->i_inode.i_ctime = CURRENT_TIME; ihold(inode); d_instantiate(dentry, inode); mark_inode_dirty(inode); out_brelse: brelse(dibh); out_end_trans: gfs2_trans_end(sdp); out_ipres: if (da.nr_blocks) gfs2_inplace_release(dip); out_gunlock_q: if (da.nr_blocks) gfs2_quota_unlock(dip); out_gunlock: gfs2_dir_no_add(&da); gfs2_glock_dq(ghs + 1); out_child: gfs2_glock_dq(ghs); out_parent: gfs2_holder_uninit(ghs); gfs2_holder_uninit(ghs + 1); return error; } /* * gfs2_unlink_ok - check to see that a inode is still in a directory * @dip: the directory * @name: the name of the file * @ip: the inode * * Assumes that the lock on (at least) @dip is held. * * Returns: 0 if the parent/child relationship is correct, errno if it isn't */ static int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, const struct gfs2_inode *ip) { int error; if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode)) return -EPERM; if ((dip->i_inode.i_mode & S_ISVTX) && !uid_eq(dip->i_inode.i_uid, current_fsuid()) && !uid_eq(ip->i_inode.i_uid, current_fsuid()) && !capable(CAP_FOWNER)) return -EPERM; if (IS_APPEND(&dip->i_inode)) return -EPERM; error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC); if (error) return error; error = gfs2_dir_check(&dip->i_inode, name, ip); if (error) return error; return 0; }
static struct crypto_instance *crypto_rfc3686_alloc(struct rtattr **tb) { struct crypto_attr_type *algt; struct crypto_instance *inst; struct crypto_alg *alg; struct crypto_skcipher_spawn *spawn; const char *cipher_name; int err; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_BLKCIPHER) & algt->mask) return ERR_PTR(-EINVAL); cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) return ERR_CAST(cipher_name); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return ERR_PTR(-ENOMEM); spawn = crypto_instance_ctx(inst); crypto_set_skcipher_spawn(spawn, inst); err = crypto_grab_skcipher(spawn, cipher_name, 0, crypto_requires_sync(algt->type, algt->mask)); if (err) goto err_free_inst; alg = crypto_skcipher_spawn_alg(spawn); /* We only support 16-byte blocks. */ err = -EINVAL; if (alg->cra_ablkcipher.ivsize != CTR_RFC3686_BLOCK_SIZE) goto err_drop_spawn; /* Not a stream cipher? */ if (alg->cra_blocksize != 1) goto err_drop_spawn; err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_spawn; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "rfc3686(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_spawn; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = 1; inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | (alg->cra_flags & CRYPTO_ALG_ASYNC); inst->alg.cra_type = &crypto_ablkcipher_type; inst->alg.cra_ablkcipher.ivsize = CTR_RFC3686_IV_SIZE; inst->alg.cra_ablkcipher.min_keysize = alg->cra_ablkcipher.min_keysize + CTR_RFC3686_NONCE_SIZE; inst->alg.cra_ablkcipher.max_keysize = alg->cra_ablkcipher.max_keysize + CTR_RFC3686_NONCE_SIZE; inst->alg.cra_ablkcipher.geniv = "seqiv"; inst->alg.cra_ablkcipher.setkey = crypto_rfc3686_setkey; inst->alg.cra_ablkcipher.encrypt = crypto_rfc3686_crypt; inst->alg.cra_ablkcipher.decrypt = crypto_rfc3686_crypt; inst->alg.cra_ctxsize = sizeof(struct crypto_rfc3686_ctx); inst->alg.cra_init = crypto_rfc3686_init_tfm; inst->alg.cra_exit = crypto_rfc3686_exit_tfm; return inst; err_drop_spawn: crypto_drop_skcipher(spawn); err_free_inst: kfree(inst); return ERR_PTR(err); }
struct dentry *f2fs_get_parent(struct dentry *child) { struct qstr dotdot = {.len = 2, .name = ".."}; unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot); if (!ino) return ERR_PTR(-ENOENT); return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino)); } static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct f2fs_dir_entry *de; struct page *page; if (dentry->d_name.len > F2FS_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (de) { nid_t ino = le32_to_cpu(de->ino); kunmap(page); f2fs_put_page(page, 0); inode = f2fs_iget(dir->i_sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); stat_inc_inline_inode(inode); } return d_splice_alias(inode, dentry); } static int f2fs_unlink(struct inode *dir, struct dentry *dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode = dentry->d_inode; struct f2fs_dir_entry *de; struct page *page; int err = -ENOENT; trace_f2fs_unlink_enter(dir, dentry); f2fs_balance_fs(sbi); de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) goto fail; f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) { f2fs_unlock_op(sbi); kunmap(page); f2fs_put_page(page, 0); goto fail; } f2fs_delete_entry(de, page, inode); f2fs_unlock_op(sbi); /* In order to evict this inode, we set it dirty */ mark_inode_dirty(inode); fail: trace_f2fs_unlink_exit(inode, err); return err; } static int f2fs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; size_t symlen = strlen(symname) + 1; int err; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_symlink_inode_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); f2fs_unlock_op(sbi); if (err) goto out; err = page_symlink(inode, symname, symlen); alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return err; out: clear_nlink(inode); iget_failed(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &f2fs_dir_inode_operations; inode->i_fop = &f2fs_dir_operations; inode->i_mapping->a_ops = &f2fs_dblock_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO); set_inode_flag(F2FS_I(inode), FI_INC_LINK); f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); f2fs_unlock_op(sbi); if (err) goto out_fail; alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out_fail: clear_inode_flag(F2FS_I(inode), FI_INC_LINK); clear_nlink(inode); iget_failed(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; if (f2fs_empty_dir(inode)) return f2fs_unlink(dir, dentry); return -ENOTEMPTY; } static int f2fs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); struct inode *inode; int err = 0; if (!new_valid_dev(rdev)) return -EINVAL; f2fs_balance_fs(sbi); inode = f2fs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, inode->i_mode, rdev); inode->i_op = &f2fs_special_inode_operations; f2fs_lock_op(sbi); err = f2fs_add_link(dentry, inode); f2fs_unlock_op(sbi); if (err) goto out; alloc_nid_done(sbi, inode->i_ino); d_instantiate(dentry, inode); unlock_new_inode(inode); return 0; out: clear_nlink(inode); iget_failed(inode); alloc_nid_failed(sbi, inode->i_ino); return err; } static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct page *old_dir_page; struct page *old_page, *new_page; struct f2fs_dir_entry *old_dir_entry = NULL; struct f2fs_dir_entry *old_entry; struct f2fs_dir_entry *new_entry; int err = -ENOENT; f2fs_balance_fs(sbi); old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page); if (!old_entry) goto out; if (S_ISDIR(old_inode->i_mode)) { err = -EIO; old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page); if (!old_dir_entry) goto out_old; } if (new_inode) { err = -ENOTEMPTY; if (old_dir_entry && !f2fs_empty_dir(new_inode)) goto out_dir; err = -ENOENT; new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page); if (!new_entry) goto out_dir; f2fs_lock_op(sbi); err = acquire_orphan_inode(sbi); if (err) goto put_out_dir; if (update_dent_inode(old_inode, &new_dentry->d_name)) { release_orphan_inode(sbi); goto put_out_dir; } f2fs_set_link(new_dir, new_entry, new_page, old_inode); new_inode->i_ctime = CURRENT_TIME; down_write(&F2FS_I(new_inode)->i_sem); if (old_dir_entry) drop_nlink(new_inode); drop_nlink(new_inode); up_write(&F2FS_I(new_inode)->i_sem); mark_inode_dirty(new_inode); if (!new_inode->i_nlink) add_orphan_inode(sbi, new_inode->i_ino); else release_orphan_inode(sbi); update_inode_page(old_inode); update_inode_page(new_inode); } else { f2fs_lock_op(sbi); err = f2fs_add_link(new_dentry, old_inode); if (err) { f2fs_unlock_op(sbi); goto out_dir; } if (old_dir_entry) { inc_nlink(new_dir); update_inode_page(new_dir); } } down_write(&F2FS_I(old_inode)->i_sem); file_lost_pino(old_inode); up_write(&F2FS_I(old_inode)->i_sem); old_inode->i_ctime = CURRENT_TIME; mark_inode_dirty(old_inode); f2fs_delete_entry(old_entry, old_page, NULL); if (old_dir_entry) { if (old_dir != new_dir) { f2fs_set_link(old_inode, old_dir_entry, old_dir_page, new_dir); update_inode_page(old_inode); } else { kunmap(old_dir_page); f2fs_put_page(old_dir_page, 0); } drop_nlink(old_dir); mark_inode_dirty(old_dir); update_inode_page(old_dir); } f2fs_unlock_op(sbi); return 0; put_out_dir: f2fs_unlock_op(sbi); kunmap(new_page); f2fs_put_page(new_page, 0); out_dir: if (old_dir_entry) { kunmap(old_dir_page); f2fs_put_page(old_dir_page, 0); } out_old: kunmap(old_page); f2fs_put_page(old_page, 0); out: return err; } const struct inode_operations f2fs_dir_inode_operations = { .create = f2fs_create, .lookup = f2fs_lookup, .link = f2fs_link, .unlink = f2fs_unlink, .symlink = f2fs_symlink, .mkdir = f2fs_mkdir, .rmdir = f2fs_rmdir, .mknod = f2fs_mknod, .rename = f2fs_rename, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif }; const struct inode_operations f2fs_special_inode_operations = { .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, #ifdef CONFIG_F2FS_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = f2fs_listxattr, .removexattr = generic_removexattr, #endif };
static int recover_dentry(struct inode *inode, struct page *ipage, struct list_head *dir_list) { struct f2fs_inode *raw_inode = F2FS_INODE(ipage); nid_t pino = le32_to_cpu(raw_inode->i_pino); struct f2fs_dir_entry *de; struct fscrypt_name fname; struct page *page; struct inode *dir, *einode; struct fsync_inode_entry *entry; int err = 0; char *name; entry = get_fsync_inode(dir_list, pino); if (!entry) { entry = add_fsync_inode(F2FS_I_SB(inode), dir_list, pino, false); if (IS_ERR(entry)) { dir = ERR_CAST(entry); err = PTR_ERR(entry); goto out; } } dir = entry->inode; memset(&fname, 0, sizeof(struct fscrypt_name)); fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen); fname.disk_name.name = raw_inode->i_name; if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) { WARN_ON(1); err = -ENAMETOOLONG; goto out; } retry: de = __f2fs_find_entry(dir, &fname, &page); if (de && inode->i_ino == le32_to_cpu(de->ino)) goto out_put; if (de) { einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino)); if (IS_ERR(einode)) { WARN_ON(1); err = PTR_ERR(einode); if (err == -ENOENT) err = -EEXIST; goto out_put; } err = dquot_initialize(einode); if (err) { iput(einode); goto out_put; } err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode)); if (err) { iput(einode); goto out_put; } f2fs_delete_entry(de, page, dir, einode); iput(einode); goto retry; } else if (IS_ERR(page)) { err = PTR_ERR(page); } else { err = f2fs_add_dentry(dir, &fname, inode, inode->i_ino, inode->i_mode); } if (err == -ENOMEM) goto retry; goto out; out_put: f2fs_put_page(page, 0); out: if (file_enc_name(inode)) name = "<encrypted>"; else name = raw_inode->i_name; f2fs_msg(inode->i_sb, KERN_NOTICE, "%s: ino = %x, name = %s, dir = %lx, err = %d", __func__, ino_of_node(ipage), name, IS_ERR(dir) ? 0 : dir->i_ino, err); return err; }
/* * do a lookup in a directory * - just returns the FID the dentry name maps to if found */ static int afs_do_lookup(struct inode *dir, struct dentry *dentry, struct afs_fid *fid, struct key *key) { struct afs_super_info *as = dir->i_sb->s_fs_info; struct afs_lookup_cookie cookie = { .ctx.actor = afs_lookup_filldir, .name = dentry->d_name, .fid.vid = as->volume->vid }; int ret; _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); /* search the directory */ ret = afs_dir_iterate(dir, &cookie.ctx, key); if (ret < 0) { _leave(" = %d [iter]", ret); return ret; } ret = -ENOENT; if (!cookie.found) { _leave(" = -ENOENT [not found]"); return -ENOENT; } *fid = cookie.fid; _leave(" = 0 { vn=%u u=%u }", fid->vnode, fid->unique); return 0; } /* * Try to auto mount the mountpoint with pseudo directory, if the autocell * operation is setted. */ static struct inode *afs_try_auto_mntpt( int ret, struct dentry *dentry, struct inode *dir, struct key *key, struct afs_fid *fid) { const char *devname = dentry->d_name.name; struct afs_vnode *vnode = AFS_FS_I(dir); struct inode *inode; _enter("%d, %p{%pd}, {%x:%u}, %p", ret, dentry, dentry, vnode->fid.vid, vnode->fid.vnode, key); if (ret != -ENOENT || !test_bit(AFS_VNODE_AUTOCELL, &vnode->flags)) goto out; inode = afs_iget_autocell(dir, devname, strlen(devname), key); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto out; } *fid = AFS_FS_I(inode)->fid; _leave("= %p", inode); return inode; out: _leave("= %d", ret); return ERR_PTR(ret); } /* * look up an entry in a directory */ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct afs_vnode *vnode; struct afs_fid fid; struct inode *inode; struct key *key; int ret; vnode = AFS_FS_I(dir); _enter("{%x:%u},%p{%pd},", vnode->fid.vid, vnode->fid.vnode, dentry, dentry); ASSERTCMP(d_inode(dentry), ==, NULL); if (dentry->d_name.len >= AFSNAMEMAX) { _leave(" = -ENAMETOOLONG"); return ERR_PTR(-ENAMETOOLONG); } if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { _leave(" = -ESTALE"); return ERR_PTR(-ESTALE); } key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) { _leave(" = %ld [key]", PTR_ERR(key)); return ERR_CAST(key); } ret = afs_validate(vnode, key); if (ret < 0) { key_put(key); _leave(" = %d [val]", ret); return ERR_PTR(ret); } ret = afs_do_lookup(dir, dentry, &fid, key); if (ret < 0) { inode = afs_try_auto_mntpt(ret, dentry, dir, key, &fid); if (!IS_ERR(inode)) { key_put(key); goto success; } ret = PTR_ERR(inode); key_put(key); if (ret == -ENOENT) { d_add(dentry, NULL); _leave(" = NULL [negative]"); return NULL; } _leave(" = %d [do]", ret); return ERR_PTR(ret); } dentry->d_fsdata = (void *)(unsigned long) vnode->status.data_version; /* instantiate the dentry */ inode = afs_iget(dir->i_sb, key, &fid, NULL, NULL); key_put(key); if (IS_ERR(inode)) { _leave(" = %ld", PTR_ERR(inode)); return ERR_CAST(inode); } success: d_add(dentry, inode); _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%u }", fid.vnode, fid.unique, d_inode(dentry)->i_ino, d_inode(dentry)->i_generation); return NULL; } /* * check that a dentry lookup hit has found a valid entry * - NOTE! the hit can be a negative hit too, so we can't assume we have an * inode */ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) { struct afs_vnode *vnode, *dir; struct afs_fid uninitialized_var(fid); struct dentry *parent; struct key *key; void *dir_version; int ret; if (flags & LOOKUP_RCU) return -ECHILD; vnode = AFS_FS_I(d_inode(dentry)); if (d_really_is_positive(dentry)) _enter("{v={%x:%u} n=%pd fl=%lx},", vnode->fid.vid, vnode->fid.vnode, dentry, vnode->flags); else _enter("{neg n=%pd}", dentry); key = afs_request_key(AFS_FS_S(dentry->d_sb)->volume->cell); if (IS_ERR(key)) key = NULL; /* lock down the parent dentry so we can peer at it */ parent = dget_parent(dentry); dir = AFS_FS_I(d_inode(parent)); /* validate the parent directory */ if (test_bit(AFS_VNODE_MODIFIED, &dir->flags)) afs_validate(dir, key); if (test_bit(AFS_VNODE_DELETED, &dir->flags)) { _debug("%pd: parent dir deleted", dentry); goto out_bad; } dir_version = (void *) (unsigned long) dir->status.data_version; if (dentry->d_fsdata == dir_version) goto out_valid; /* the dir contents are unchanged */ _debug("dir modified"); /* search the directory for this vnode */ ret = afs_do_lookup(&dir->vfs_inode, dentry, &fid, key); switch (ret) { case 0: /* the filename maps to something */ if (d_really_is_negative(dentry)) goto out_bad; if (is_bad_inode(d_inode(dentry))) { printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n", dentry); goto out_bad; } /* if the vnode ID has changed, then the dirent points to a * different file */ if (fid.vnode != vnode->fid.vnode) { _debug("%pd: dirent changed [%u != %u]", dentry, fid.vnode, vnode->fid.vnode); goto not_found; } /* if the vnode ID uniqifier has changed, then the file has * been deleted and replaced, and the original vnode ID has * been reused */ if (fid.unique != vnode->fid.unique) { _debug("%pd: file deleted (uq %u -> %u I:%u)", dentry, fid.unique, vnode->fid.unique, d_inode(dentry)->i_generation); spin_lock(&vnode->lock); set_bit(AFS_VNODE_DELETED, &vnode->flags); spin_unlock(&vnode->lock); goto not_found; } goto out_valid; case -ENOENT: /* the filename is unknown */ _debug("%pd: dirent not found", dentry); if (d_really_is_positive(dentry)) goto not_found; goto out_valid; default: _debug("failed to iterate dir %pd: %d", parent, ret); goto out_bad; } out_valid: dentry->d_fsdata = dir_version; dput(parent); key_put(key); _leave(" = 1 [valid]"); return 1; /* the dirent, if it exists, now points to a different vnode */ not_found: spin_lock(&dentry->d_lock); dentry->d_flags |= DCACHE_NFSFS_RENAMED; spin_unlock(&dentry->d_lock); out_bad: _debug("dropping dentry %pd2", dentry); dput(parent); key_put(key); _leave(" = 0 [bad]"); return 0; } /* * allow the VFS to enquire as to whether a dentry should be unhashed (mustn't * sleep) * - called from dput() when d_count is going to 0. * - return 1 to request dentry be unhashed, 0 otherwise */ static int afs_d_delete(const struct dentry *dentry) { _enter("%pd", dentry); if (dentry->d_flags & DCACHE_NFSFS_RENAMED) goto zap; if (d_really_is_positive(dentry) && (test_bit(AFS_VNODE_DELETED, &AFS_FS_I(d_inode(dentry))->flags) || test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(d_inode(dentry))->flags))) goto zap; _leave(" = 0 [keep]"); return 0; zap: _leave(" = 1 [zap]"); return 1; }
/* * look up an entry in a directory */ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct afs_vnode *vnode; struct afs_fid fid; struct inode *inode; struct key *key; int ret; vnode = AFS_FS_I(dir); _enter("{%x:%u},%p{%s},", vnode->fid.vid, vnode->fid.vnode, dentry, dentry->d_name.name); ASSERTCMP(dentry->d_inode, ==, NULL); if (dentry->d_name.len >= AFSNAMEMAX) { _leave(" = -ENAMETOOLONG"); return ERR_PTR(-ENAMETOOLONG); } if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { _leave(" = -ESTALE"); return ERR_PTR(-ESTALE); } key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) { _leave(" = %ld [key]", PTR_ERR(key)); return ERR_CAST(key); } ret = afs_validate(vnode, key); if (ret < 0) { key_put(key); _leave(" = %d [val]", ret); return ERR_PTR(ret); } ret = afs_do_lookup(dir, dentry, &fid, key); if (ret < 0) { inode = afs_try_auto_mntpt(ret, dentry, dir, key, &fid); if (!IS_ERR(inode)) { key_put(key); goto success; } ret = PTR_ERR(inode); key_put(key); if (ret == -ENOENT) { d_add(dentry, NULL); _leave(" = NULL [negative]"); return NULL; } _leave(" = %d [do]", ret); return ERR_PTR(ret); } dentry->d_fsdata = (void *)(unsigned long) vnode->status.data_version; /* instantiate the dentry */ inode = afs_iget(dir->i_sb, key, &fid, NULL, NULL); key_put(key); if (IS_ERR(inode)) { _leave(" = %ld", PTR_ERR(inode)); return ERR_CAST(inode); } success: d_add(dentry, inode); _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%u }", fid.vnode, fid.unique, dentry->d_inode->i_ino, dentry->d_inode->i_generation); return NULL; }
static struct crypto_instance *crypto_ccm_alloc_common(struct rtattr **tb, const char *full_name, const char *ctr_name, const char *cipher_name) { struct crypto_attr_type *algt; struct crypto_instance *inst; struct crypto_alg *ctr; struct crypto_alg *cipher; struct ccm_instance_ctx *ictx; int err; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); cipher = crypto_alg_mod_lookup(cipher_name, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(cipher)) return ERR_CAST(cipher); err = -EINVAL; if (cipher->cra_blocksize != 16) goto out_put_cipher; inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL); err = -ENOMEM; if (!inst) goto out_put_cipher; ictx = crypto_instance_ctx(inst); err = crypto_init_spawn(&ictx->cipher, cipher, inst, CRYPTO_ALG_TYPE_MASK); if (err) goto err_free_inst; crypto_set_skcipher_spawn(&ictx->ctr, inst); err = crypto_grab_skcipher(&ictx->ctr, ctr_name, 0, crypto_requires_sync(algt->type, algt->mask)); if (err) goto err_drop_cipher; ctr = crypto_skcipher_spawn_alg(&ictx->ctr); /* Not a stream cipher? */ err = -EINVAL; if (ctr->cra_blocksize != 1) goto err_drop_ctr; /* We want the real thing! */ if (ctr->cra_ablkcipher.ivsize != 16) goto err_drop_ctr; err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "ccm_base(%s,%s)", ctr->cra_driver_name, cipher->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_drop_ctr; memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME); inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC; inst->alg.cra_priority = cipher->cra_priority + ctr->cra_priority; inst->alg.cra_blocksize = 1; inst->alg.cra_alignmask = cipher->cra_alignmask | ctr->cra_alignmask | (__alignof__(u32) - 1); inst->alg.cra_type = &crypto_aead_type; inst->alg.cra_aead.ivsize = 16; inst->alg.cra_aead.maxauthsize = 16; inst->alg.cra_ctxsize = sizeof(struct crypto_ccm_ctx); inst->alg.cra_init = crypto_ccm_init_tfm; inst->alg.cra_exit = crypto_ccm_exit_tfm; inst->alg.cra_aead.setkey = crypto_ccm_setkey; inst->alg.cra_aead.setauthsize = crypto_ccm_setauthsize; inst->alg.cra_aead.encrypt = crypto_ccm_encrypt; inst->alg.cra_aead.decrypt = crypto_ccm_decrypt; out: crypto_mod_put(cipher); return inst; err_drop_ctr: crypto_drop_skcipher(&ictx->ctr); err_drop_cipher: crypto_drop_spawn(&ictx->cipher); err_free_inst: kfree(inst); out_put_cipher: inst = ERR_PTR(err); goto out; }
/* * Create a vfsmount that we can automount */ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) { struct dfs_info3_param *referrals = NULL; unsigned int num_referrals = 0; struct cifs_sb_info *cifs_sb; struct cifs_ses *ses; char *full_path; unsigned int xid; int i; int rc; struct vfsmount *mnt; struct tcon_link *tlink; cifs_dbg(FYI, "in %s\n", __func__); BUG_ON(IS_ROOT(mntpt)); /* * The MSDFS spec states that paths in DFS referral requests and * responses must be prefixed by a single '\' character instead of * the double backslashes usually used in the UNC. This function * gives us the latter, so we must adjust the result. */ mnt = ERR_PTR(-ENOMEM); full_path = build_path_from_dentry(mntpt); if (full_path == NULL) goto cdda_exit; cifs_sb = CIFS_SB(d_inode(mntpt)->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { mnt = ERR_CAST(tlink); goto free_full_path; } ses = tlink_tcon(tlink)->ses; xid = get_xid(); rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls, &num_referrals, &referrals, cifs_remap(cifs_sb)); free_xid(xid); cifs_put_tlink(tlink); mnt = ERR_PTR(-ENOENT); for (i = 0; i < num_referrals; i++) { int len; dump_referral(referrals + i); /* connect to a node */ len = strlen(referrals[i].node_name); if (len < 2) { cifs_dbg(VFS, "%s: Net Address path too short: %s\n", __func__, referrals[i].node_name); mnt = ERR_PTR(-EINVAL); break; } mnt = cifs_dfs_do_refmount(cifs_sb, full_path, referrals + i); cifs_dbg(FYI, "%s: cifs_dfs_do_refmount:%s , mnt:%p\n", __func__, referrals[i].node_name, mnt); if (!IS_ERR(mnt)) goto success; } /* no valid submounts were found; return error from get_dfs_path() by * preference */ if (rc != 0) mnt = ERR_PTR(rc); success: free_dfs_info_array(referrals, num_referrals); free_full_path: kfree(full_path); cdda_exit: cifs_dbg(FYI, "leaving %s\n" , __func__); return mnt; }
static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb, const char *full_name, const char *ctr_name, const char *ghash_name) { struct crypto_attr_type *algt; struct crypto_instance *inst; struct crypto_alg *ctr; struct crypto_alg *ghash_alg; struct ahash_alg *ghash_ahash_alg; struct gcm_instance_ctx *ctx; int err; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type, CRYPTO_ALG_TYPE_HASH, CRYPTO_ALG_TYPE_AHASH_MASK | crypto_requires_sync(algt->type, algt->mask)); if (IS_ERR(ghash_alg)) return ERR_CAST(ghash_alg); err = -ENOMEM; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) goto out_put_ghash; ctx = crypto_instance_ctx(inst); ghash_ahash_alg = container_of(ghash_alg, struct ahash_alg, halg.base); err = crypto_init_ahash_spawn(&ctx->ghash, &ghash_ahash_alg->halg, inst); if (err) goto err_free_inst; crypto_set_skcipher_spawn(&ctx->ctr, inst); err = crypto_grab_skcipher(&ctx->ctr, ctr_name, 0, crypto_requires_sync(algt->type, algt->mask)); if (err) goto err_drop_ghash; ctr = crypto_skcipher_spawn_alg(&ctx->ctr); /* We only support 16-byte blocks. */ if (ctr->cra_ablkcipher.ivsize != 16) goto out_put_ctr; /* Not a stream cipher? */ err = -EINVAL; if (ctr->cra_blocksize != 1) goto out_put_ctr; err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "gcm_base(%s,%s)", ctr->cra_driver_name, ghash_alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_put_ctr; memcpy(inst->alg.cra_name, full_name, CRYPTO_MAX_ALG_NAME); inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; inst->alg.cra_flags |= ctr->cra_flags & CRYPTO_ALG_ASYNC; inst->alg.cra_priority = ctr->cra_priority; inst->alg.cra_blocksize = 1; inst->alg.cra_alignmask = ctr->cra_alignmask | (__alignof__(u64) - 1); inst->alg.cra_type = &crypto_aead_type; inst->alg.cra_aead.ivsize = 16; inst->alg.cra_aead.maxauthsize = 16; inst->alg.cra_ctxsize = sizeof(struct crypto_gcm_ctx); inst->alg.cra_init = crypto_gcm_init_tfm; inst->alg.cra_exit = crypto_gcm_exit_tfm; inst->alg.cra_aead.setkey = crypto_gcm_setkey; inst->alg.cra_aead.setauthsize = crypto_gcm_setauthsize; inst->alg.cra_aead.encrypt = crypto_gcm_encrypt; inst->alg.cra_aead.decrypt = crypto_gcm_decrypt; out: crypto_mod_put(ghash_alg); return inst; out_put_ctr: crypto_drop_skcipher(&ctx->ctr); err_drop_ghash: crypto_drop_ahash(&ctx->ghash); err_free_inst: kfree(inst); out_put_ghash: inst = ERR_PTR(err); goto out; }
static struct dentry * nilfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct nilfs_super_data sd; struct super_block *s; fmode_t mode = FMODE_READ | FMODE_EXCL; struct dentry *root_dentry; int err, s_new = false; if (!(flags & MS_RDONLY)) mode |= FMODE_WRITE; sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type); if (IS_ERR(sd.bdev)) return ERR_CAST(sd.bdev); sd.cno = 0; sd.flags = flags; if (nilfs_identify((char *)data, &sd)) { err = -EINVAL; goto failed; } /* * once the super is inserted into the list by sget, s_umount * will protect the lockfs code from trying to start a snapshot * while we are mounting */ mutex_lock(&sd.bdev->bd_fsfreeze_mutex); if (sd.bdev->bd_fsfreeze_count > 0) { mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); err = -EBUSY; goto failed; } s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, sd.bdev); mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); if (IS_ERR(s)) { err = PTR_ERR(s); goto failed; } if (!s->s_root) { char b[BDEVNAME_SIZE]; s_new = true; /* New superblock instance created */ s->s_flags = flags; s->s_mode = mode; strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id)); sb_set_blocksize(s, block_size(sd.bdev)); err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0); if (err) goto failed_super; s->s_flags |= MS_ACTIVE; } else if (!sd.cno) { int busy = false; if (nilfs_tree_was_touched(s->s_root)) { busy = nilfs_try_to_shrink_tree(s->s_root); if (busy && (flags ^ s->s_flags) & MS_RDONLY) { printk(KERN_ERR "NILFS: the device already " "has a %s mount.\n", (s->s_flags & MS_RDONLY) ? "read-only" : "read/write"); err = -EBUSY; goto failed_super; } } if (!busy) { /* * Try remount to setup mount states if the current * tree is not mounted and only snapshots use this sb. */ err = nilfs_remount(s, &flags, data); if (err) goto failed_super; } } if (sd.cno) { err = nilfs_attach_snapshot(s, sd.cno, &root_dentry); if (err) goto failed_super; } else { root_dentry = dget(s->s_root); } if (!s_new) blkdev_put(sd.bdev, mode); return root_dentry; failed_super: deactivate_locked_super(s); failed: if (!s_new) blkdev_put(sd.bdev, mode); return ERR_PTR(err); }
static int crypt(struct blkcipher_desc *d, struct blkcipher_walk *w, struct priv *ctx, void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) { int err; unsigned int avail; const int bs = LRW_BLOCK_SIZE; struct sinfo s = { .tfm = crypto_cipher_tfm(ctx->child), .fn = fn }; be128 *iv; u8 *wsrc; u8 *wdst; err = blkcipher_walk_virt(d, w); if (!(avail = w->nbytes)) return err; wsrc = w->src.virt.addr; wdst = w->dst.virt.addr; /* calculate first value of T */ iv = (be128 *)w->iv; s.t = *iv; /* T <- I*Key2 */ gf128mul_64k_bbe(&s.t, ctx->table.table); goto first; for (;;) { do { /* T <- I*Key2, using the optimization * discussed in the specification */ be128_xor(&s.t, &s.t, &ctx->table.mulinc[get_index128(iv)]); inc(iv); first: lrw_round(&s, wdst, wsrc); wsrc += bs; wdst += bs; } while ((avail -= bs) >= bs); err = blkcipher_walk_done(d, w, avail); if (!(avail = w->nbytes)) break; wsrc = w->src.virt.addr; wdst = w->dst.virt.addr; } return err; } static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk w; blkcipher_walk_init(&w, dst, src, nbytes); return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->child)->cia_encrypt); } static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk w; blkcipher_walk_init(&w, dst, src, nbytes); return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->child)->cia_decrypt); } int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst, struct scatterlist *ssrc, unsigned int nbytes, struct lrw_crypt_req *req) { const unsigned int bsize = LRW_BLOCK_SIZE; const unsigned int max_blks = req->tbuflen / bsize; struct lrw_table_ctx *ctx = req->table_ctx; struct blkcipher_walk walk; unsigned int nblocks; be128 *iv, *src, *dst, *t; be128 *t_buf = req->tbuf; int err, i; BUG_ON(max_blks < 1); blkcipher_walk_init(&walk, sdst, ssrc, nbytes); err = blkcipher_walk_virt(desc, &walk); nbytes = walk.nbytes; if (!nbytes) return err; nblocks = min(walk.nbytes / bsize, max_blks); src = (be128 *)walk.src.virt.addr; dst = (be128 *)walk.dst.virt.addr; /* calculate first value of T */ iv = (be128 *)walk.iv; t_buf[0] = *iv; /* T <- I*Key2 */ gf128mul_64k_bbe(&t_buf[0], ctx->table); i = 0; goto first; for (;;) { do { for (i = 0; i < nblocks; i++) { /* T <- I*Key2, using the optimization * discussed in the specification */ be128_xor(&t_buf[i], t, &ctx->mulinc[get_index128(iv)]); inc(iv); first: t = &t_buf[i]; /* PP <- T xor P */ be128_xor(dst + i, t, src + i); } /* CC <- E(Key2,PP) */ req->crypt_fn(req->crypt_ctx, (u8 *)dst, nblocks * bsize); /* C <- T xor CC */ for (i = 0; i < nblocks; i++) be128_xor(dst + i, dst + i, &t_buf[i]); src += nblocks; dst += nblocks; nbytes -= nblocks * bsize; nblocks = min(nbytes / bsize, max_blks); } while (nblocks > 0); err = blkcipher_walk_done(desc, &walk, nbytes); nbytes = walk.nbytes; if (!nbytes) break; nblocks = min(nbytes / bsize, max_blks); src = (be128 *)walk.src.virt.addr; dst = (be128 *)walk.dst.virt.addr; } return err; } EXPORT_SYMBOL_GPL(lrw_crypt); static int init_tfm(struct crypto_tfm *tfm) { struct crypto_cipher *cipher; struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct priv *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); if (crypto_cipher_blocksize(cipher) != LRW_BLOCK_SIZE) { *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; crypto_free_cipher(cipher); return -EINVAL; } ctx->child = cipher; return 0; } static void exit_tfm(struct crypto_tfm *tfm) { struct priv *ctx = crypto_tfm_ctx(tfm); lrw_free_table(&ctx->table); crypto_free_cipher(ctx->child); } static struct crypto_instance *alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); if (err) return ERR_PTR(err); alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_CAST(alg); inst = crypto_alloc_instance("lrw", alg); if (IS_ERR(inst)) goto out_put_alg; inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = alg->cra_blocksize; if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7; else inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_blkcipher_type; if (!(alg->cra_blocksize % 4)) inst->alg.cra_alignmask |= 3; inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize + alg->cra_blocksize; inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize + alg->cra_blocksize; inst->alg.cra_ctxsize = sizeof(struct priv); inst->alg.cra_init = init_tfm; inst->alg.cra_exit = exit_tfm; inst->alg.cra_blkcipher.setkey = setkey; inst->alg.cra_blkcipher.encrypt = encrypt; inst->alg.cra_blkcipher.decrypt = decrypt; out_put_alg: crypto_mod_put(alg); return inst; } static void free(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); }
static struct dentry * cifs_do_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { int rc; struct super_block *sb; struct cifs_sb_info *cifs_sb; struct smb_vol *volume_info; struct cifs_mnt_data mnt_data; struct dentry *root; cFYI(1, "Devname: %s flags: %d ", dev_name, flags); rc = cifs_setup_volume_info(&volume_info, (char *)data, dev_name); if (rc) return ERR_PTR(rc); cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); if (cifs_sb == NULL) { root = ERR_PTR(-ENOMEM); goto out; } cifs_setup_cifs_sb(volume_info, cifs_sb); mnt_data.vol = volume_info; mnt_data.cifs_sb = cifs_sb; mnt_data.flags = flags; sb = sget(fs_type, cifs_match_super, set_anon_super, &mnt_data); if (IS_ERR(sb)) { root = ERR_CAST(sb); goto out_cifs_sb; } if (sb->s_fs_info) { cFYI(1, "Use existing superblock"); goto out_shared; } /* * Copy mount params for use in submounts. Better to do * the copy here and deal with the error before cleanup gets * complicated post-mount. */ cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); if (cifs_sb->mountdata == NULL) { root = ERR_PTR(-ENOMEM); goto out_super; } sb->s_flags = flags; /* BB should we make this contingent on mount parm? */ sb->s_flags |= MS_NODIRATIME | MS_NOATIME; sb->s_fs_info = cifs_sb; rc = cifs_read_super(sb, volume_info, dev_name, flags & MS_SILENT ? 1 : 0); if (rc) { root = ERR_PTR(rc); goto out_super; } sb->s_flags |= MS_ACTIVE; root = cifs_get_root(volume_info, sb); if (root == NULL) goto out_super; cFYI(1, "dentry root is: %p", root); goto out; out_shared: root = cifs_get_root(volume_info, sb); if (root) cFYI(1, "dentry root is: %p", root); goto out; out_super: kfree(cifs_sb->mountdata); deactivate_locked_super(sb); out_cifs_sb: unload_nls(cifs_sb->local_nls); kfree(cifs_sb); out: cifs_cleanup_volume_info(&volume_info); return root; }
/* This function is surprisingly simple. The trick is understanding * that "child" is always a directory. So, to find its parent, you * simply need to find its ".." entry, normalize its block and offset, * and return the underlying inode. See the comments for * isofs_normalize_block_and_offset(). */ static struct dentry *isofs_export_get_parent(struct dentry *child) { unsigned long parent_block = 0; unsigned long parent_offset = 0; struct inode *child_inode = child->d_inode; struct iso_inode_info *e_child_inode = ISOFS_I(child_inode); struct inode *parent_inode = NULL; struct iso_directory_record *de = NULL; struct buffer_head * bh = NULL; struct dentry *rv = NULL; /* "child" must always be a directory. */ if (!S_ISDIR(child_inode->i_mode)) { printk(KERN_ERR "isofs: isofs_export_get_parent(): " "child is not a directory!\n"); rv = ERR_PTR(-EACCES); goto out; } /* It is an invariant that the directory offset is zero. If * it is not zero, it means the directory failed to be * normalized for some reason. */ if (e_child_inode->i_iget5_offset != 0) { printk(KERN_ERR "isofs: isofs_export_get_parent(): " "child directory not normalized!\n"); rv = ERR_PTR(-EACCES); goto out; } /* The child inode has been normalized such that its * i_iget5_block value points to the "." entry. Fortunately, * the ".." entry is located in the same block. */ parent_block = e_child_inode->i_iget5_block; /* Get the block in question. */ bh = sb_bread(child_inode->i_sb, parent_block); if (bh == NULL) { rv = ERR_PTR(-EACCES); goto out; } /* This is the "." entry. */ de = (struct iso_directory_record*)bh->b_data; /* The ".." entry is always the second entry. */ parent_offset = (unsigned long)isonum_711(de->length); de = (struct iso_directory_record*)(bh->b_data + parent_offset); /* Verify it is in fact the ".." entry. */ if ((isonum_711(de->name_len) != 1) || (de->name[0] != 1)) { printk(KERN_ERR "isofs: Unable to find the \"..\" " "directory for NFS.\n"); rv = ERR_PTR(-EACCES); goto out; } /* Normalize */ isofs_normalize_block_and_offset(de, &parent_block, &parent_offset); /* Get the inode. */ parent_inode = isofs_iget(child_inode->i_sb, parent_block, parent_offset); if (IS_ERR(parent_inode)) { rv = ERR_CAST(parent_inode); if (rv != ERR_PTR(-ENOMEM)) rv = ERR_PTR(-EACCES); goto out; } /* Allocate the dentry. */ rv = d_alloc_anon(parent_inode); if (rv == NULL) { rv = ERR_PTR(-ENOMEM); goto out; } out: if (bh) { brelse(bh); } return rv; }
static struct crypto_instance *crypto_rfc4309_alloc(struct rtattr **tb) { struct crypto_attr_type *algt; struct crypto_instance *inst; struct crypto_aead_spawn *spawn; struct crypto_alg *alg; const char *ccm_name; int err; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return ERR_CAST(algt); if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & algt->mask) return ERR_PTR(-EINVAL); ccm_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(ccm_name)) return ERR_CAST(ccm_name); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return ERR_PTR(-ENOMEM); spawn = crypto_instance_ctx(inst); crypto_set_aead_spawn(spawn, inst); err = crypto_grab_aead(spawn, ccm_name, 0, crypto_requires_sync(algt->type, algt->mask)); if (err) goto out_free_inst; alg = crypto_aead_spawn_alg(spawn); err = -EINVAL; /* We only support 16-byte blocks. */ if (alg->cra_aead.ivsize != 16) goto out_drop_alg; /* Not a stream cipher? */ if (alg->cra_blocksize != 1) goto out_drop_alg; err = -ENAMETOOLONG; if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "rfc4309(%s)", alg->cra_name) >= CRYPTO_MAX_ALG_NAME || snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "rfc4309(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto out_drop_alg; inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD; inst->alg.cra_flags |= alg->cra_flags & CRYPTO_ALG_ASYNC; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = 1; inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_nivaead_type; inst->alg.cra_aead.ivsize = 8; inst->alg.cra_aead.maxauthsize = 16; inst->alg.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx); inst->alg.cra_init = crypto_rfc4309_init_tfm; inst->alg.cra_exit = crypto_rfc4309_exit_tfm; inst->alg.cra_aead.setkey = crypto_rfc4309_setkey; inst->alg.cra_aead.setauthsize = crypto_rfc4309_setauthsize; inst->alg.cra_aead.encrypt = crypto_rfc4309_encrypt; inst->alg.cra_aead.decrypt = crypto_rfc4309_decrypt; inst->alg.cra_aead.geniv = "seqiv"; out: return inst; out_drop_alg: crypto_drop_aead(spawn); out_free_inst: kfree(inst); inst = ERR_PTR(err); goto out; }
struct simd_aead_alg *simd_aead_create_compat(const char *algname, const char *drvname, const char *basename) { struct simd_aead_alg *salg; struct crypto_aead *tfm; struct aead_alg *ialg; struct aead_alg *alg; int err; tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL, CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return ERR_CAST(tfm); ialg = crypto_aead_alg(tfm); salg = kzalloc(sizeof(*salg), GFP_KERNEL); if (!salg) { salg = ERR_PTR(-ENOMEM); goto out_put_tfm; } salg->ialg_name = basename; alg = &salg->alg; err = -ENAMETOOLONG; if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", drvname) >= CRYPTO_MAX_ALG_NAME) goto out_free_salg; alg->base.cra_flags = CRYPTO_ALG_ASYNC; alg->base.cra_priority = ialg->base.cra_priority; alg->base.cra_blocksize = ialg->base.cra_blocksize; alg->base.cra_alignmask = ialg->base.cra_alignmask; alg->base.cra_module = ialg->base.cra_module; alg->base.cra_ctxsize = sizeof(struct simd_aead_ctx); alg->ivsize = ialg->ivsize; alg->maxauthsize = ialg->maxauthsize; alg->chunksize = ialg->chunksize; alg->init = simd_aead_init; alg->exit = simd_aead_exit; alg->setkey = simd_aead_setkey; alg->setauthsize = simd_aead_setauthsize; alg->encrypt = simd_aead_encrypt; alg->decrypt = simd_aead_decrypt; err = crypto_register_aead(alg); if (err) goto out_free_salg; out_put_tfm: crypto_free_aead(tfm); return salg; out_free_salg: kfree(salg); salg = ERR_PTR(err); goto out_put_tfm; }
/* * Attempt to resolve an object name (dentry->d_name), parent handle, and * fsid into a handle for the object. */ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct orangefs_inode_s *parent = ORANGEFS_I(dir); struct orangefs_kernel_op_s *new_op; struct inode *inode; struct dentry *res; int ret = -EINVAL; /* * in theory we could skip a lookup here (if the intent is to * create) in order to avoid a potentially failed lookup, but * leaving it in can skip a valid lookup and try to create a file * that already exists (e.g. the vfs already handles checking for * -EEXIST on O_EXCL opens, which is broken if we skip this lookup * in the create path) */ gossip_debug(GOSSIP_NAME_DEBUG, "%s called on %s\n", __func__, dentry->d_name.name); if (dentry->d_name.len > (ORANGEFS_NAME_MAX - 1)) return ERR_PTR(-ENAMETOOLONG); new_op = op_alloc(ORANGEFS_VFS_OP_LOOKUP); if (!new_op) return ERR_PTR(-ENOMEM); new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW; gossip_debug(GOSSIP_NAME_DEBUG, "%s:%s:%d using parent %pU\n", __FILE__, __func__, __LINE__, &parent->refn.khandle); new_op->upcall.req.lookup.parent_refn = parent->refn; strncpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name, ORANGEFS_NAME_MAX); gossip_debug(GOSSIP_NAME_DEBUG, "%s: doing lookup on %s under %pU,%d\n", __func__, new_op->upcall.req.lookup.d_name, &new_op->upcall.req.lookup.parent_refn.khandle, new_op->upcall.req.lookup.parent_refn.fs_id); ret = service_operation(new_op, __func__, get_interruptible_flag(dir)); gossip_debug(GOSSIP_NAME_DEBUG, "Lookup Got %pU, fsid %d (ret=%d)\n", &new_op->downcall.resp.lookup.refn.khandle, new_op->downcall.resp.lookup.refn.fs_id, ret); if (ret < 0) { if (ret == -ENOENT) { /* * if no inode was found, add a negative dentry to * dcache anyway; if we don't, we don't hold expected * lookup semantics and we most noticeably break * during directory renames. * * however, if the operation failed or exited, do not * add the dentry (e.g. in the case that a touch is * issued on a file that already exists that was * interrupted during this lookup -- no need to add * another negative dentry for an existing file) */ gossip_debug(GOSSIP_NAME_DEBUG, "orangefs_lookup: Adding *negative* dentry " "%p for %s\n", dentry, dentry->d_name.name); d_add(dentry, NULL); res = NULL; goto out; } /* must be a non-recoverable error */ res = ERR_PTR(ret); goto out; } dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000; inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn); if (IS_ERR(inode)) { gossip_debug(GOSSIP_NAME_DEBUG, "error %ld from iget\n", PTR_ERR(inode)); res = ERR_CAST(inode); goto out; } ORANGEFS_I(inode)->getattr_time = jiffies - 1; gossip_debug(GOSSIP_NAME_DEBUG, "%s:%s:%d " "Found good inode [%lu] with count [%d]\n", __FILE__, __func__, __LINE__, inode->i_ino, (int)atomic_read(&inode->i_count)); /* update dentry/inode pair into dcache */ res = d_splice_alias(inode, dentry); gossip_debug(GOSSIP_NAME_DEBUG, "Lookup success (inode ct = %d)\n", (int)atomic_read(&inode->i_count)); out: op_release(new_op); return res; }
static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) { struct dfs_info3_param *referrals = NULL; unsigned int num_referrals = 0; struct cifs_sb_info *cifs_sb; struct cifs_ses *ses; char *full_path; int xid, i; int rc; struct vfsmount *mnt; struct tcon_link *tlink; cFYI(1, "in %s", __func__); BUG_ON(IS_ROOT(mntpt)); mnt = ERR_PTR(-ENOMEM); full_path = build_path_from_dentry(mntpt); if (full_path == NULL) goto cdda_exit; cifs_sb = CIFS_SB(mntpt->d_inode->i_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { mnt = ERR_CAST(tlink); goto free_full_path; } ses = tlink_tcon(tlink)->ses; xid = GetXid(); rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls, &num_referrals, &referrals, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); FreeXid(xid); cifs_put_tlink(tlink); mnt = ERR_PTR(-ENOENT); for (i = 0; i < num_referrals; i++) { int len; dump_referral(referrals + i); len = strlen(referrals[i].node_name); if (len < 2) { cERROR(1, "%s: Net Address path too short: %s", __func__, referrals[i].node_name); mnt = ERR_PTR(-EINVAL); break; } mnt = cifs_dfs_do_refmount(cifs_sb, full_path, referrals + i); cFYI(1, "%s: cifs_dfs_do_refmount:%s , mnt:%p", __func__, referrals[i].node_name, mnt); if (!IS_ERR(mnt)) goto success; } if (rc != 0) mnt = ERR_PTR(rc); success: free_dfs_info_array(referrals, num_referrals); free_full_path: kfree(full_path); cdda_exit: cFYI(1, "leaving %s" , __func__); return mnt; }
/* * Export operations */ static struct dentry *nilfs_get_parent(struct dentry *child) { unsigned long ino; struct inode *inode; struct qstr dotdot = {.name = "..", .len = 2}; struct nilfs_root *root; ino = nilfs_inode_by_name(child->d_inode, &dotdot); if (!ino) return ERR_PTR(-ENOENT); root = NILFS_I(child->d_inode)->i_root; inode = nilfs_iget(child->d_inode->i_sb, root, ino); if (IS_ERR(inode)) return ERR_CAST(inode); return d_obtain_alias(inode); } static struct dentry *nilfs_get_dentry(struct super_block *sb, u64 cno, u64 ino, u32 gen) { struct nilfs_root *root; struct inode *inode; if (ino < NILFS_FIRST_INO(sb) && ino != NILFS_ROOT_INO) return ERR_PTR(-ESTALE); root = nilfs_lookup_root(sb->s_fs_info, cno); if (!root) return ERR_PTR(-ESTALE); inode = nilfs_iget(sb, root, ino); nilfs_put_root(root); if (IS_ERR(inode)) return ERR_CAST(inode); if (gen && inode->i_generation != gen) { iput(inode); return ERR_PTR(-ESTALE); } return d_obtain_alias(inode); } static struct dentry *nilfs_fh_to_dentry(struct super_block *sb, struct fid *fh, int fh_len, int fh_type) { struct nilfs_fid *fid = (struct nilfs_fid *)fh; if ((fh_len != NILFS_FID_SIZE_NON_CONNECTABLE && fh_len != NILFS_FID_SIZE_CONNECTABLE) || (fh_type != FILEID_NILFS_WITH_PARENT && fh_type != FILEID_NILFS_WITHOUT_PARENT)) return NULL; return nilfs_get_dentry(sb, fid->cno, fid->ino, fid->gen); } static struct dentry *nilfs_fh_to_parent(struct super_block *sb, struct fid *fh, int fh_len, int fh_type) { struct nilfs_fid *fid = (struct nilfs_fid *)fh; if (fh_len != NILFS_FID_SIZE_CONNECTABLE || fh_type != FILEID_NILFS_WITH_PARENT) return NULL; return nilfs_get_dentry(sb, fid->cno, fid->parent_ino, fid->parent_gen); } static int nilfs_encode_fh(struct dentry *dentry, __u32 *fh, int *lenp, int connectable) { struct nilfs_fid *fid = (struct nilfs_fid *)fh; struct inode *inode = dentry->d_inode; struct nilfs_root *root = NILFS_I(inode)->i_root; int type; if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE || (connectable && *lenp < NILFS_FID_SIZE_CONNECTABLE)) return 255; fid->cno = root->cno; fid->ino = inode->i_ino; fid->gen = inode->i_generation; if (connectable && !S_ISDIR(inode->i_mode)) { struct inode *parent; spin_lock(&dentry->d_lock); parent = dentry->d_parent->d_inode; fid->parent_ino = parent->i_ino; fid->parent_gen = parent->i_generation; spin_unlock(&dentry->d_lock); type = FILEID_NILFS_WITH_PARENT; *lenp = NILFS_FID_SIZE_CONNECTABLE; } else { type = FILEID_NILFS_WITHOUT_PARENT; *lenp = NILFS_FID_SIZE_NON_CONNECTABLE; } return type; } const struct inode_operations nilfs_dir_inode_operations = { .create = nilfs_create, .lookup = nilfs_lookup, .link = nilfs_link, .unlink = nilfs_unlink, .symlink = nilfs_symlink, .mkdir = nilfs_mkdir, .rmdir = nilfs_rmdir, .mknod = nilfs_mknod, .rename = nilfs_rename, .setattr = nilfs_setattr, .permission = nilfs_permission, .fiemap = nilfs_fiemap, }; const struct inode_operations nilfs_special_inode_operations = { .setattr = nilfs_setattr, .permission = nilfs_permission, }; const struct inode_operations nilfs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .permission = nilfs_permission, }; const struct export_operations nilfs_export_ops = { .encode_fh = nilfs_encode_fh, .fh_to_dentry = nilfs_fh_to_dentry, .fh_to_parent = nilfs_fh_to_parent, .get_parent = nilfs_get_parent, };
static struct dentry * cifs_do_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { int rc; struct super_block *sb; struct cifs_sb_info *cifs_sb; struct smb_vol *volume_info; struct cifs_mnt_data mnt_data; struct dentry *root; cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags); volume_info = cifs_get_volume_info((char *)data, dev_name); if (IS_ERR(volume_info)) return ERR_CAST(volume_info); cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); if (cifs_sb == NULL) { root = ERR_PTR(-ENOMEM); goto out_nls; } cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); if (cifs_sb->mountdata == NULL) { root = ERR_PTR(-ENOMEM); goto out_cifs_sb; } cifs_setup_cifs_sb(volume_info, cifs_sb); rc = cifs_mount(cifs_sb, volume_info); if (rc) { if (!(flags & MS_SILENT)) cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n", rc); root = ERR_PTR(rc); goto out_mountdata; } mnt_data.vol = volume_info; mnt_data.cifs_sb = cifs_sb; mnt_data.flags = flags; /* BB should we make this contingent on mount parm? */ flags |= MS_NODIRATIME | MS_NOATIME; sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data); if (IS_ERR(sb)) { root = ERR_CAST(sb); cifs_umount(cifs_sb); goto out; } if (sb->s_root) { cifs_dbg(FYI, "Use existing superblock\n"); cifs_umount(cifs_sb); } else { rc = cifs_read_super(sb); if (rc) { root = ERR_PTR(rc); goto out_super; } sb->s_flags |= MS_ACTIVE; } root = cifs_get_root(volume_info, sb); if (IS_ERR(root)) goto out_super; cifs_dbg(FYI, "dentry root is: %p\n", root); goto out; out_super: deactivate_locked_super(sb); out: cifs_cleanup_volume_info(volume_info); return root; out_mountdata: kfree(cifs_sb->mountdata); out_cifs_sb: kfree(cifs_sb); out_nls: unload_nls(volume_info->local_nls); goto out; }