Esempio n. 1
0
/*
 * Marshal credentials.
 * Maybe we should keep a cached credential for performance reasons.
 */
static __be32 *
unx_marshal(struct rpc_task *task, __be32 *p)
{
	struct rpc_clnt	*clnt = task->tk_client;
	struct unx_cred	*cred = container_of(task->tk_rqstp->rq_cred, struct unx_cred, uc_base);
	__be32		*base, *hold;
	int		i;

	*p++ = htonl(RPC_AUTH_UNIX);
	base = p++;
	*p++ = htonl(jiffies/HZ);

	/*
	 * Copy the UTS nodename captured when the client was created.
	 */
	p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);

	*p++ = htonl((u32) from_kuid(&init_user_ns, cred->uc_uid));
	*p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gid));
	hold = p++;
	for (i = 0; i < 16 && gid_valid(cred->uc_gids[i]); i++)
		*p++ = htonl((u32) from_kgid(&init_user_ns, cred->uc_gids[i]));
	*hold = htonl(p - hold - 1);		/* gid array length */
	*base = htonl((p - base - 1) << 2);	/* cred length */

	*p++ = htonl(RPC_AUTH_NULL);
	*p++ = htonl(0);

	return p;
}
Esempio n. 2
0
static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
				 struct md_op_data *op_data)
{
	rec->sa_opcode  = REINT_SETATTR;
	rec->sa_fsuid   = from_kuid(&init_user_ns, current_fsuid());
	rec->sa_fsgid   = from_kgid(&init_user_ns, current_fsgid());
	rec->sa_cap     = cfs_curproc_cap_pack();
	rec->sa_suppgid = -1;

	rec->sa_fid    = op_data->op_fid1;
	rec->sa_valid  = attr_pack(op_data->op_attr.ia_valid);
	rec->sa_mode   = op_data->op_attr.ia_mode;
	rec->sa_uid    = from_kuid(&init_user_ns, op_data->op_attr.ia_uid);
	rec->sa_gid    = from_kgid(&init_user_ns, op_data->op_attr.ia_gid);
	rec->sa_size   = op_data->op_attr.ia_size;
	rec->sa_blocks = op_data->op_attr_blocks;
	rec->sa_atime  = LTIME_S(op_data->op_attr.ia_atime);
	rec->sa_mtime  = LTIME_S(op_data->op_attr.ia_mtime);
	rec->sa_ctime  = LTIME_S(op_data->op_attr.ia_ctime);
	rec->sa_attr_flags = ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags;
	if ((op_data->op_attr.ia_valid & ATTR_GID) &&
	    in_group_p(op_data->op_attr.ia_gid))
		rec->sa_suppgid =
			from_kgid(&init_user_ns, op_data->op_attr.ia_gid);
	else
		rec->sa_suppgid = op_data->op_suppgids[0];

	rec->sa_bias = op_data->op_bias;
}
Esempio n. 3
0
/* a simple shell-metzner sort */
void lustre_groups_sort(struct group_info *group_info)
{
        int base, max, stride;
        int gidsetsize = group_info->ngroups;

        for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
                ; /* nothing */
        stride /= 3;

	while (stride) {
		max = gidsetsize - stride;
		for (base = 0; base < max; base++) {
			int left = base;
			int right = left + stride;
			gid_t tmp = from_kgid(&init_user_ns,
					      CFS_GROUP_AT(group_info, right));

			while (left >= 0 &&
			       tmp < from_kgid(&init_user_ns,
					       CFS_GROUP_AT(group_info, left))) {
				CFS_GROUP_AT(group_info, right) =
					CFS_GROUP_AT(group_info, left);
				right = left;
				left -= stride;
			}
			CFS_GROUP_AT(group_info, right) =
						make_kgid(&init_user_ns, tmp);
		}
		stride /= 3;
	}
}
Esempio n. 4
0
static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid)
{
	LASSERT (b != NULL);

	b->suppgid = suppgid;
	b->uid = from_kuid(&init_user_ns, current_uid());
	b->gid = from_kgid(&init_user_ns, current_gid());
	b->fsuid = from_kuid(&init_user_ns, current_fsuid());
	b->fsgid = from_kgid(&init_user_ns, current_fsgid());
	b->capability = cfs_curproc_cap_pack();
}
Esempio n. 5
0
/*
 * Marshal credentials.
 * Maybe we should keep a cached credential for performance reasons.
 */
static int
unx_marshal(struct rpc_task *task, struct xdr_stream *xdr)
{
	struct rpc_clnt	*clnt = task->tk_client;
	struct rpc_cred	*cred = task->tk_rqstp->rq_cred;
	__be32		*p, *cred_len, *gidarr_len;
	int		i;
	struct group_info *gi = cred->cr_cred->group_info;

	/* Credential */

	p = xdr_reserve_space(xdr, 3 * sizeof(*p));
	if (!p)
		goto marshal_failed;
	*p++ = rpc_auth_unix;
	cred_len = p++;
	*p++ = xdr_zero;	/* stamp */
	if (xdr_stream_encode_opaque(xdr, clnt->cl_nodename,
				     clnt->cl_nodelen) < 0)
		goto marshal_failed;
	p = xdr_reserve_space(xdr, 3 * sizeof(*p));
	if (!p)
		goto marshal_failed;
	*p++ = cpu_to_be32(from_kuid(&init_user_ns, cred->cr_cred->fsuid));
	*p++ = cpu_to_be32(from_kgid(&init_user_ns, cred->cr_cred->fsgid));

	gidarr_len = p++;
	if (gi)
		for (i = 0; i < UNX_NGROUPS && i < gi->ngroups; i++)
			*p++ = cpu_to_be32(from_kgid(&init_user_ns,
						     gi->gid[i]));
	*gidarr_len = cpu_to_be32(p - gidarr_len - 1);
	*cred_len = cpu_to_be32((p - cred_len - 1) << 2);
	p = xdr_reserve_space(xdr, (p - gidarr_len - 1) << 2);
	if (!p)
		goto marshal_failed;

	/* Verifier */

	p = xdr_reserve_space(xdr, 2 * sizeof(*p));
	if (!p)
		goto marshal_failed;
	*p++ = rpc_auth_null;
	*p   = xdr_zero;

	return 0;

marshal_failed:
	return -EMSGSIZE;
}
Esempio n. 6
0
static struct rpc_cred *
unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
{
	struct unx_cred	*cred;
	unsigned int groups = 0;
	unsigned int i;

	dprintk("RPC:       allocating UNIX cred for uid %d gid %d\n",
			from_kuid(&init_user_ns, acred->uid),
			from_kgid(&init_user_ns, acred->gid));

	if (!(cred = kmalloc(sizeof(*cred), gfp)))
		return ERR_PTR(-ENOMEM);

	rpcauth_init_cred(&cred->uc_base, acred, auth, &unix_credops);
	cred->uc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;

	if (acred->group_info != NULL)
		groups = acred->group_info->ngroups;
	if (groups > NFS_NGROUPS)
		groups = NFS_NGROUPS;

	cred->uc_gid = acred->gid;
	for (i = 0; i < groups; i++)
		cred->uc_gids[i] = GROUP_AT(acred->group_info, i);
	if (i < NFS_NGROUPS)
		cred->uc_gids[i] = INVALID_GID;

	return &cred->uc_base;
}
Esempio n. 7
0
static __be32 *
encode_fattr3(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
	      struct kstat *stat)
{
	struct timespec ts;
	*p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
	*p++ = htonl((u32) (stat->mode & S_IALLUGO));
	*p++ = htonl((u32) stat->nlink);
	*p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
	*p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));
	if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) {
		p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
	} else {
		p = xdr_encode_hyper(p, (u64) stat->size);
	}
	p = xdr_encode_hyper(p, ((u64)stat->blocks) << 9);
	*p++ = htonl((u32) MAJOR(stat->rdev));
	*p++ = htonl((u32) MINOR(stat->rdev));
	p = encode_fsid(p, fhp);
	p = xdr_encode_hyper(p, stat->ino);
	ts = timespec64_to_timespec(stat->atime);
	p = encode_time3(p, &ts);
	ts = timespec64_to_timespec(stat->mtime);
	p = encode_time3(p, &ts);
	ts = timespec64_to_timespec(stat->ctime);
	p = encode_time3(p, &ts);

	return p;
}
Esempio n. 8
0
/*
 * Convert acl struct to xattr value.
 */
static void *ocfs2_acl_to_xattr(const struct posix_acl *acl, size_t *size)
{
	struct ocfs2_acl_entry *entry = NULL;
	char *ocfs2_acl;
	size_t n;

	*size = acl->a_count * sizeof(struct posix_acl_entry);

	ocfs2_acl = kmalloc(*size, GFP_NOFS);
	if (!ocfs2_acl)
		return ERR_PTR(-ENOMEM);

	entry = (struct ocfs2_acl_entry *)ocfs2_acl;
	for (n = 0; n < acl->a_count; n++, entry++) {
		entry->e_tag  = cpu_to_le16(acl->a_entries[n].e_tag);
		entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
		switch(acl->a_entries[n].e_tag) {
		case ACL_USER:
			entry->e_id = cpu_to_le32(
				from_kuid(&init_user_ns,
					  acl->a_entries[n].e_uid));
			break;
		case ACL_GROUP:
			entry->e_id = cpu_to_le32(
				from_kgid(&init_user_ns,
					  acl->a_entries[n].e_gid));
			break;
		default:
			entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID);
			break;
		}
	}
	return ocfs2_acl;
}
Esempio n. 9
0
File: dir.c Progetto: Anjali05/linux
static void iattr_to_fattr(struct fuse_conn *fc, struct iattr *iattr,
			   struct fuse_setattr_in *arg, bool trust_local_cmtime)
{
	unsigned ivalid = iattr->ia_valid;

	if (ivalid & ATTR_MODE)
		arg->valid |= FATTR_MODE,   arg->mode = iattr->ia_mode;
	if (ivalid & ATTR_UID)
		arg->valid |= FATTR_UID,    arg->uid = from_kuid(fc->user_ns, iattr->ia_uid);
	if (ivalid & ATTR_GID)
		arg->valid |= FATTR_GID,    arg->gid = from_kgid(fc->user_ns, iattr->ia_gid);
	if (ivalid & ATTR_SIZE)
		arg->valid |= FATTR_SIZE,   arg->size = iattr->ia_size;
	if (ivalid & ATTR_ATIME) {
		arg->valid |= FATTR_ATIME;
		arg->atime = iattr->ia_atime.tv_sec;
		arg->atimensec = iattr->ia_atime.tv_nsec;
		if (!(ivalid & ATTR_ATIME_SET))
			arg->valid |= FATTR_ATIME_NOW;
	}
	if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, trust_local_cmtime)) {
		arg->valid |= FATTR_MTIME;
		arg->mtime = iattr->ia_mtime.tv_sec;
		arg->mtimensec = iattr->ia_mtime.tv_nsec;
		if (!(ivalid & ATTR_MTIME_SET) && !trust_local_cmtime)
			arg->valid |= FATTR_MTIME_NOW;
	}
	if ((ivalid & ATTR_CTIME) && trust_local_cmtime) {
		arg->valid |= FATTR_CTIME;
		arg->ctime = iattr->ia_ctime.tv_sec;
		arg->ctimensec = iattr->ia_ctime.tv_nsec;
	}
}
Esempio n. 10
0
static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg)
{
	unsigned ivalid = iattr->ia_valid;

	if (ivalid & ATTR_MODE)
		arg->valid |= FATTR_MODE,   arg->mode = iattr->ia_mode;
	if (ivalid & ATTR_UID)
		arg->valid |= FATTR_UID,    arg->uid = from_kuid(&init_user_ns, iattr->ia_uid);
	if (ivalid & ATTR_GID)
		arg->valid |= FATTR_GID,    arg->gid = from_kgid(&init_user_ns, iattr->ia_gid);
	if (ivalid & ATTR_SIZE)
		arg->valid |= FATTR_SIZE,   arg->size = iattr->ia_size;
	if (ivalid & ATTR_ATIME) {
		arg->valid |= FATTR_ATIME;
		arg->atime = iattr->ia_atime.tv_sec;
		arg->atimensec = iattr->ia_atime.tv_nsec;
		if (!(ivalid & ATTR_ATIME_SET))
			arg->valid |= FATTR_ATIME_NOW;
	}
	if ((ivalid & ATTR_MTIME) && update_mtime(ivalid)) {
		arg->valid |= FATTR_MTIME;
		arg->mtime = iattr->ia_mtime.tv_sec;
		arg->mtimensec = iattr->ia_mtime.tv_nsec;
		if (!(ivalid & ATTR_MTIME_SET))
			arg->valid |= FATTR_MTIME_NOW;
	}
}
Esempio n. 11
0
void obdo_from_iattr(struct obdo *oa, struct iattr *attr, unsigned int ia_valid)
{
	if (ia_valid & ATTR_ATIME) {
		oa->o_atime = LTIME_S(attr->ia_atime);
		oa->o_valid |= OBD_MD_FLATIME;
	}
	if (ia_valid & ATTR_MTIME) {
		oa->o_mtime = LTIME_S(attr->ia_mtime);
		oa->o_valid |= OBD_MD_FLMTIME;
	}
	if (ia_valid & ATTR_CTIME) {
		oa->o_ctime = LTIME_S(attr->ia_ctime);
		oa->o_valid |= OBD_MD_FLCTIME;
	}
	if (ia_valid & ATTR_SIZE) {
		oa->o_size = attr->ia_size;
		oa->o_valid |= OBD_MD_FLSIZE;
	}
	if (ia_valid & ATTR_MODE) {
		oa->o_mode = attr->ia_mode;
		oa->o_valid |= OBD_MD_FLTYPE | OBD_MD_FLMODE;
		if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
		    !cfs_capable(CFS_CAP_FSETID))
			oa->o_mode &= ~S_ISGID;
	}
	if (ia_valid & ATTR_UID) {
		oa->o_uid = from_kuid(&init_user_ns, attr->ia_uid);
		oa->o_valid |= OBD_MD_FLUID;
	}
	if (ia_valid & ATTR_GID) {
		oa->o_gid = from_kgid(&init_user_ns, attr->ia_gid);
		oa->o_valid |= OBD_MD_FLGID;
	}
}
Esempio n. 12
0
static int
unx_hash_cred(struct auth_cred *acred, unsigned int hashbits)
{
	return hash_64(from_kgid(&init_user_ns, acred->gid) |
		((u64)from_kuid(&init_user_ns, acred->uid) <<
			(sizeof(gid_t) * 8)), hashbits);
}
Esempio n. 13
0
/* WARNING: the file systems must take care not to tinker with
 * attributes they don't manage (such as blocks).
 */
void obdo_from_inode(struct obdo *dst, struct inode *src, u32 valid)
{
	u32 newvalid = 0;

	if (valid & (OBD_MD_FLCTIME | OBD_MD_FLMTIME))
		CDEBUG(D_INODE, "valid %x, new time %lu/%lu\n",
		       valid, LTIME_S(src->i_mtime),
		       LTIME_S(src->i_ctime));

	if (valid & OBD_MD_FLATIME) {
		dst->o_atime = LTIME_S(src->i_atime);
		newvalid |= OBD_MD_FLATIME;
	}
	if (valid & OBD_MD_FLMTIME) {
		dst->o_mtime = LTIME_S(src->i_mtime);
		newvalid |= OBD_MD_FLMTIME;
	}
	if (valid & OBD_MD_FLCTIME) {
		dst->o_ctime = LTIME_S(src->i_ctime);
		newvalid |= OBD_MD_FLCTIME;
	}
	if (valid & OBD_MD_FLSIZE) {
		dst->o_size = i_size_read(src);
		newvalid |= OBD_MD_FLSIZE;
	}
	if (valid & OBD_MD_FLBLOCKS) {  /* allocation of space (x512 bytes) */
		dst->o_blocks = src->i_blocks;
		newvalid |= OBD_MD_FLBLOCKS;
	}
	if (valid & OBD_MD_FLBLKSZ) {   /* optimal block size */
		dst->o_blksize = 1 << src->i_blkbits;
		newvalid |= OBD_MD_FLBLKSZ;
	}
	if (valid & OBD_MD_FLTYPE) {
		dst->o_mode = (dst->o_mode & S_IALLUGO) |
			      (src->i_mode & S_IFMT);
		newvalid |= OBD_MD_FLTYPE;
	}
	if (valid & OBD_MD_FLMODE) {
		dst->o_mode = (dst->o_mode & S_IFMT) |
			      (src->i_mode & S_IALLUGO);
		newvalid |= OBD_MD_FLMODE;
	}
	if (valid & OBD_MD_FLUID) {
		dst->o_uid = from_kuid(&init_user_ns, src->i_uid);
		newvalid |= OBD_MD_FLUID;
	}
	if (valid & OBD_MD_FLGID) {
		dst->o_gid = from_kgid(&init_user_ns, src->i_gid);
		newvalid |= OBD_MD_FLGID;
	}
	if (valid & OBD_MD_FLFLAGS) {
		dst->o_flags = src->i_flags;
		newvalid |= OBD_MD_FLFLAGS;
	}
	dst->o_valid |= newvalid;
}
Esempio n. 14
0
gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
{
	gid_t gid;
	gid = from_kgid(targ, kgid);

	if (gid == (gid_t) -1)
		gid = overflowgid;
	return gid;
}
Esempio n. 15
0
static __be32 *
encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp,
	     struct kstat *stat)
{
	struct dentry	*dentry = fhp->fh_dentry;
	int type;
	struct timespec64 time;
	u32 f;

	type = (stat->mode & S_IFMT);

	*p++ = htonl(nfs_ftypes[type >> 12]);
	*p++ = htonl((u32) stat->mode);
	*p++ = htonl((u32) stat->nlink);
	*p++ = htonl((u32) from_kuid(&init_user_ns, stat->uid));
	*p++ = htonl((u32) from_kgid(&init_user_ns, stat->gid));

	if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) {
		*p++ = htonl(NFS_MAXPATHLEN);
	} else {
		*p++ = htonl((u32) stat->size);
	}
	*p++ = htonl((u32) stat->blksize);
	if (S_ISCHR(type) || S_ISBLK(type))
		*p++ = htonl(new_encode_dev(stat->rdev));
	else
		*p++ = htonl(0xffffffff);
	*p++ = htonl((u32) stat->blocks);
	switch (fsid_source(fhp)) {
	default:
	case FSIDSOURCE_DEV:
		*p++ = htonl(new_encode_dev(stat->dev));
		break;
	case FSIDSOURCE_FSID:
		*p++ = htonl((u32) fhp->fh_export->ex_fsid);
		break;
	case FSIDSOURCE_UUID:
		f = ((u32*)fhp->fh_export->ex_uuid)[0];
		f ^= ((u32*)fhp->fh_export->ex_uuid)[1];
		f ^= ((u32*)fhp->fh_export->ex_uuid)[2];
		f ^= ((u32*)fhp->fh_export->ex_uuid)[3];
		*p++ = htonl(f);
		break;
	}
	*p++ = htonl((u32) stat->ino);
	*p++ = htonl((u32) stat->atime.tv_sec);
	*p++ = htonl(stat->atime.tv_nsec ? stat->atime.tv_nsec / 1000 : 0);
	time = stat->mtime;
	lease_get_mtime(d_inode(dentry), &time); 
	*p++ = htonl((u32) time.tv_sec);
	*p++ = htonl(time.tv_nsec ? time.tv_nsec / 1000 : 0); 
	*p++ = htonl((u32) stat->ctime.tv_sec);
	*p++ = htonl(stat->ctime.tv_nsec ? stat->ctime.tv_nsec / 1000 : 0);

	return p;
}
Esempio n. 16
0
File: acl.c Progetto: mkrufky/linux
static void *f2fs_acl_to_disk(struct f2fs_sb_info *sbi,
				const struct posix_acl *acl, size_t *size)
{
	struct f2fs_acl_header *f2fs_acl;
	struct f2fs_acl_entry *entry;
	int i;

	f2fs_acl = f2fs_kmalloc(sbi, sizeof(struct f2fs_acl_header) +
			acl->a_count * sizeof(struct f2fs_acl_entry),
			GFP_NOFS);
	if (!f2fs_acl)
		return ERR_PTR(-ENOMEM);

	f2fs_acl->a_version = cpu_to_le32(F2FS_ACL_VERSION);
	entry = (struct f2fs_acl_entry *)(f2fs_acl + 1);

	for (i = 0; i < acl->a_count; i++) {

		entry->e_tag  = cpu_to_le16(acl->a_entries[i].e_tag);
		entry->e_perm = cpu_to_le16(acl->a_entries[i].e_perm);

		switch (acl->a_entries[i].e_tag) {
		case ACL_USER:
			entry->e_id = cpu_to_le32(
					from_kuid(&init_user_ns,
						acl->a_entries[i].e_uid));
			entry = (struct f2fs_acl_entry *)((char *)entry +
					sizeof(struct f2fs_acl_entry));
			break;
		case ACL_GROUP:
			entry->e_id = cpu_to_le32(
					from_kgid(&init_user_ns,
						acl->a_entries[i].e_gid));
			entry = (struct f2fs_acl_entry *)((char *)entry +
					sizeof(struct f2fs_acl_entry));
			break;
		case ACL_USER_OBJ:
		case ACL_GROUP_OBJ:
		case ACL_MASK:
		case ACL_OTHER:
			entry = (struct f2fs_acl_entry *)((char *)entry +
					sizeof(struct f2fs_acl_entry_short));
			break;
		default:
			goto fail;
		}
	}
	*size = f2fs_acl_size(acl->a_count);
	return (void *)f2fs_acl;

fail:
	kfree(f2fs_acl);
	return ERR_PTR(-EINVAL);
}
Esempio n. 17
0
DECLINLINE(RTGID) vboxdrvLinuxGid(void)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
    return from_kgid(current_user_ns(), current->cred->gid);
# else
    return current->cred->gid;
# endif
#else
    return current->gid;
#endif
}
Esempio n. 18
0
/* NB: setxid permission is not checked here, instead it's done on
 * MDT when client get remote permission. */
static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
{
	struct hlist_head *head;
	struct ll_remote_perm *lrp;
	int found = 0, rc;

	if (!lli->lli_remote_perms)
		return -ENOENT;

	head = lli->lli_remote_perms +
		remote_perm_hashfunc(from_kuid(&init_user_ns, current_uid()));

	spin_lock(&lli->lli_lock);
	hlist_for_each_entry(lrp, head, lrp_list) {
		if (lrp->lrp_uid != from_kuid(&init_user_ns, current_uid()))
			continue;
		if (lrp->lrp_gid != from_kgid(&init_user_ns, current_gid()))
			continue;
		if (lrp->lrp_fsuid != from_kuid(&init_user_ns, current_fsuid()))
			continue;
		if (lrp->lrp_fsgid != from_kgid(&init_user_ns, current_fsgid()))
			continue;
		found = 1;
		break;
	}

	if (!found) {
		rc = -ENOENT;
		goto out;
	}

	CDEBUG(D_SEC, "found remote perm: %u/%u/%u/%u - %#x\n",
	       lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
	       lrp->lrp_access_perm);
	rc = ((lrp->lrp_access_perm & mask) == mask) ? 0 : -EACCES;

out:
	spin_unlock(&lli->lli_lock);
	return rc;
}
Esempio n. 19
0
/* packing of MDS records */
void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
		   umode_t mode, __u64 rdev, __u64 flags, const void *lmm,
		   size_t lmmlen)
{
	struct mdt_rec_create *rec;
	char *tmp;
	__u64 cr_flags;

	CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create));
	rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);

	/* XXX do something about time, uid, gid */
	rec->cr_opcode = REINT_OPEN;
	rec->cr_fsuid	= from_kuid(&init_user_ns, current_fsuid());
	rec->cr_fsgid	= from_kgid(&init_user_ns, current_fsgid());
	rec->cr_cap    = cfs_curproc_cap_pack();
	rec->cr_mode   = mode;
	cr_flags	= mds_pack_open_flags(flags);
	rec->cr_rdev   = rdev;
	rec->cr_umask  = current_umask();
	if (op_data != NULL) {
		rec->cr_fid1       = op_data->op_fid1;
		rec->cr_fid2       = op_data->op_fid2;
		rec->cr_time       = op_data->op_mod_time;
		rec->cr_suppgid1   = op_data->op_suppgids[0];
		rec->cr_suppgid2   = op_data->op_suppgids[1];
		rec->cr_bias       = op_data->op_bias;
		rec->cr_open_handle_old = op_data->op_open_handle;

		if (op_data->op_name) {
			mdc_pack_name(req, &RMF_NAME, op_data->op_name,
				      op_data->op_namelen);

			if (op_data->op_bias & MDS_CREATE_VOLATILE)
				cr_flags |= MDS_OPEN_VOLATILE;
		}

		mdc_file_secctx_pack(req, op_data->op_file_secctx_name,
				     op_data->op_file_secctx,
				     op_data->op_file_secctx_size);

		/* pack SELinux policy info if any */
		mdc_file_sepol_pack(req);
	}

	if (lmm) {
		cr_flags |= MDS_OPEN_HAS_EA;
		tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
		memcpy(tmp, lmm, lmmlen);
	}
	set_mrc_cr_flags(rec, cr_flags);
}
Esempio n. 20
0
static u64 bpf_get_current_uid_gid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
{
	struct task_struct *task = current;
	kuid_t uid;
	kgid_t gid;

	if (!task)
		return -EINVAL;

	current_uid_gid(&uid, &gid);
	return (u64) from_kgid(&init_user_ns, gid) << 32 |
		from_kuid(&init_user_ns, uid);
}
Esempio n. 21
0
int nfs_map_gid_to_group(const struct nfs_server *server, kgid_t gid, char *buf, size_t buflen)
{
	struct idmap *idmap = server->nfs_client->cl_idmap;
	int ret = -EINVAL;
	__u32 id;

	id = from_kgid(&init_user_ns, gid);
	if (!(server->caps & NFS_CAP_UIDGID_NOMAP))
		ret = nfs_idmap_lookup_name(id, "group", buf, buflen, idmap);
	if (ret < 0)
		ret = nfs_map_numeric_to_string(id, buf, buflen);
	trace_nfs4_map_gid_to_group(buf, ret, id, ret);
	return ret;
}
Esempio n. 22
0
/* packing of MDS records */
void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
		   umode_t mode, __u64 rdev, __u64 flags, const void *lmm,
		   size_t lmmlen)
{
	struct mdt_rec_create *rec;
	char *tmp;
	__u64 cr_flags;

	CLASSERT(sizeof(struct mdt_rec_reint) == sizeof(struct mdt_rec_create));
	rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);

	/* XXX do something about time, uid, gid */
	rec->cr_opcode = REINT_OPEN;
	rec->cr_fsuid	= from_kuid(&init_user_ns, current_fsuid());
	rec->cr_fsgid	= from_kgid(&init_user_ns, current_fsgid());
	rec->cr_cap    = cfs_curproc_cap_pack();
	rec->cr_mode   = mode;
	cr_flags	= mds_pack_open_flags(flags);
	rec->cr_rdev   = rdev;
	rec->cr_umask  = current_umask();
	if (op_data != NULL) {
		rec->cr_fid1       = op_data->op_fid1;
		rec->cr_fid2       = op_data->op_fid2;
		rec->cr_time       = op_data->op_mod_time;
		rec->cr_suppgid1   = op_data->op_suppgids[0];
		rec->cr_suppgid2   = op_data->op_suppgids[1];
		rec->cr_bias       = op_data->op_bias;
		rec->cr_old_handle = op_data->op_handle;

		mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
		/* the next buffer is child capa, which is used for replay,
		 * will be packed from the data in reply message. */

		if (op_data->op_name) {
			mdc_pack_name(req, &RMF_NAME, op_data->op_name,
				      op_data->op_namelen);

			if (op_data->op_bias & MDS_CREATE_VOLATILE)
				cr_flags |= MDS_OPEN_VOLATILE;
		}
	}

	if (lmm) {
		cr_flags |= MDS_OPEN_HAS_EA;
		tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
		memcpy(tmp, lmm, lmmlen);
	}
	set_mrc_cr_flags(rec, cr_flags);
}
/*
 * Convert from in-memory to filesystem representation.
 */
static void *posix_acl_to_disk(const struct posix_acl *acl, size_t * size)
{
	reiserfs_acl_header *ext_acl;
	char *e;
	int n;

	*size = reiserfs_acl_size(acl->a_count);
	ext_acl = kmalloc(sizeof(reiserfs_acl_header) +
						  acl->a_count *
						  sizeof(reiserfs_acl_entry),
						  GFP_NOFS);
	if (!ext_acl)
		return ERR_PTR(-ENOMEM);
	ext_acl->a_version = cpu_to_le32(REISERFS_ACL_VERSION);
	e = (char *)ext_acl + sizeof(reiserfs_acl_header);
	for (n = 0; n < acl->a_count; n++) {
		const struct posix_acl_entry *acl_e = &acl->a_entries[n];
		reiserfs_acl_entry *entry = (reiserfs_acl_entry *) e;
		entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag);
		entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
		switch (acl->a_entries[n].e_tag) {
		case ACL_USER:
			entry->e_id = cpu_to_le32(
				from_kuid(&init_user_ns, acl_e->e_uid));
			e += sizeof(reiserfs_acl_entry);
			break;
		case ACL_GROUP:
			entry->e_id = cpu_to_le32(
				from_kgid(&init_user_ns, acl_e->e_gid));
			e += sizeof(reiserfs_acl_entry);
			break;

		case ACL_USER_OBJ:
		case ACL_GROUP_OBJ:
		case ACL_MASK:
		case ACL_OTHER:
			e += sizeof(reiserfs_acl_entry_short);
			break;

		default:
			goto fail;
		}
	}
	return (char *)ext_acl;

      fail:
	kfree(ext_acl);
	return ERR_PTR(-EINVAL);
}
Esempio n. 24
0
static void *jffs2_acl_to_medium(const struct posix_acl *acl, size_t *size)
{
	struct jffs2_acl_header *header;
	struct jffs2_acl_entry *entry;
	void *e;
	size_t i;

	*size = jffs2_acl_size(acl->a_count);
	header = kmalloc(sizeof(*header) + acl->a_count * sizeof(*entry), GFP_KERNEL);
	if (!header)
		return ERR_PTR(-ENOMEM);
	header->a_version = cpu_to_je32(JFFS2_ACL_VERSION);
	e = header + 1;
	for (i=0; i < acl->a_count; i++) {
		const struct posix_acl_entry *acl_e = &acl->a_entries[i];
		entry = e;
		entry->e_tag = cpu_to_je16(acl_e->e_tag);
		entry->e_perm = cpu_to_je16(acl_e->e_perm);
		switch(acl_e->e_tag) {
			case ACL_USER:
				entry->e_id = cpu_to_je32(
					from_kuid(&init_user_ns, acl_e->e_uid));
				e += sizeof(struct jffs2_acl_entry);
				break;
			case ACL_GROUP:
				entry->e_id = cpu_to_je32(
					from_kgid(&init_user_ns, acl_e->e_gid));
				e += sizeof(struct jffs2_acl_entry);
				break;

			case ACL_USER_OBJ:
			case ACL_GROUP_OBJ:
			case ACL_MASK:
			case ACL_OTHER:
				e += sizeof(struct jffs2_acl_entry_short);
				break;

			default:
				goto fail;
		}
	}
	return header;
 fail:
	kfree(header);
	return ERR_PTR(-EINVAL);
}
Esempio n. 25
0
/* Protect against 'cutting & pasting' security.evm xattr, include inode
 * specific info.
 *
 * (Additional directory/file metadata needs to be added for more complete
 * protection.)
 */
static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
			  char *digest)
{
	struct h_misc {
		unsigned long ino;
		__u32 generation;
		uid_t uid;
		gid_t gid;
		umode_t mode;
	} hmac_misc;

	memset(&hmac_misc, 0, sizeof hmac_misc);
	hmac_misc.ino = inode->i_ino;
	hmac_misc.generation = inode->i_generation;
	hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid);
	hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
	hmac_misc.mode = inode->i_mode;
	crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof hmac_misc);
	crypto_shash_final(desc, digest);
}
Esempio n. 26
0
static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
                        struct cl_attr *attr)
{
	struct inode *inode = vvp_object_inode(obj);

	/*
	 * lov overwrites most of these fields in
	 * lov_attr_get()->...lov_merge_lvb_kms(), except when inode
	 * attributes are newer.
	 */

	attr->cat_size = i_size_read(inode);
	attr->cat_mtime = inode->i_mtime.tv_sec;
	attr->cat_atime = inode->i_atime.tv_sec;
	attr->cat_ctime = inode->i_ctime.tv_sec;
	attr->cat_blocks = inode->i_blocks;
	attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid);
	attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid);
	/* KMS is not known by this layer */
	return 0; /* layers below have to fill in the rest */
}
Esempio n. 27
0
File: inode.c Progetto: 020gzh/linux
int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
{
	struct inode *inode = d_inode(dentry);
	int error = -EINVAL;

	hpfs_lock(inode->i_sb);
	if (inode->i_ino == hpfs_sb(inode->i_sb)->sb_root)
		goto out_unlock;
	if ((attr->ia_valid & ATTR_UID) &&
	    from_kuid(&init_user_ns, attr->ia_uid) >= 0x10000)
		goto out_unlock;
	if ((attr->ia_valid & ATTR_GID) &&
	    from_kgid(&init_user_ns, attr->ia_gid) >= 0x10000)
		goto out_unlock;
	if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size)
		goto out_unlock;

	error = inode_change_ok(inode, attr);
	if (error)
		goto out_unlock;

	if ((attr->ia_valid & ATTR_SIZE) &&
	    attr->ia_size != i_size_read(inode)) {
		error = inode_newsize_ok(inode, attr->ia_size);
		if (error)
			goto out_unlock;

		truncate_setsize(inode, attr->ia_size);
		hpfs_truncate(inode);
	}

	setattr_copy(inode, attr);

	hpfs_write_inode(inode);

 out_unlock:
	hpfs_unlock(inode->i_sb);
	return error;
}
Esempio n. 28
0
/*
 * Convert from in-memory to extended attribute representation.
 */
int
posix_acl_to_xattr(struct user_namespace *user_ns, const struct posix_acl *acl,
		   void *buffer, size_t size)
{
	posix_acl_xattr_header *ext_acl = (posix_acl_xattr_header *)buffer;
	posix_acl_xattr_entry *ext_entry = ext_acl->a_entries;
	int real_size, n;

	real_size = posix_acl_xattr_size(acl->a_count);
	if (!buffer)
		return real_size;
	if (real_size > size)
		return -ERANGE;
	
	ext_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);

	for (n=0; n < acl->a_count; n++, ext_entry++) {
		const struct posix_acl_entry *acl_e = &acl->a_entries[n];
		ext_entry->e_tag  = cpu_to_le16(acl_e->e_tag);
		ext_entry->e_perm = cpu_to_le16(acl_e->e_perm);
		switch(acl_e->e_tag) {
		case ACL_USER:
			ext_entry->e_id =
				cpu_to_le32(from_kuid(user_ns, acl_e->e_uid));
			break;
		case ACL_GROUP:
			ext_entry->e_id =
				cpu_to_le32(from_kgid(user_ns, acl_e->e_gid));
			break;
		default:
			ext_entry->e_id = cpu_to_le32(ACL_UNDEFINED_ID);
			break;
		}
	}
	return real_size;
}
Esempio n. 29
0
/*
 * groups_search() is copied from linux kernel!
 * A simple bsearch.
 */
static int lustre_groups_search(struct group_info *group_info,
				gid_t grp)
{
	int left, right;

	if (!group_info)
		return 0;

	left = 0;
	right = group_info->ngroups;
	while (left < right) {
		int mid = (left + right) / 2;
		int cmp = grp -
			from_kgid(&init_user_ns, CFS_GROUP_AT(group_info, mid));

		if (cmp > 0)
			left = mid + 1;
		else if (cmp < 0)
			right = mid;
		else
			return 1;
	}
	return 0;
}
Esempio n. 30
0
struct orangefs_kernel_op_s *op_alloc(__s32 type)
{
	struct orangefs_kernel_op_s *new_op = NULL;

	new_op = kmem_cache_zalloc(op_cache, GFP_KERNEL);
	if (new_op) {
		INIT_LIST_HEAD(&new_op->list);
		spin_lock_init(&new_op->lock);
		init_completion(&new_op->waitq);

		new_op->upcall.type = ORANGEFS_VFS_OP_INVALID;
		new_op->downcall.type = ORANGEFS_VFS_OP_INVALID;
		new_op->downcall.status = -1;

		new_op->op_state = OP_VFS_STATE_UNKNOWN;

		/* initialize the op specific tag and upcall credentials */
		orangefs_new_tag(new_op);
		new_op->upcall.type = type;
		new_op->attempts = 0;
		gossip_debug(GOSSIP_CACHE_DEBUG,
			     "Alloced OP (%p: %llu %s)\n",
			     new_op,
			     llu(new_op->tag),
			     get_opname_string(new_op));

		new_op->upcall.uid = from_kuid(&init_user_ns,
					       current_fsuid());

		new_op->upcall.gid = from_kgid(&init_user_ns,
					       current_fsgid());
	} else {
		gossip_err("op_alloc: kmem_cache_zalloc failed!\n");
	}
	return new_op;
}