static struct ip6_flowlabel * fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, char __user *optval, int optlen, int *err_p) { struct ip6_flowlabel *fl = NULL; int olen; int addr_type; int err; olen = optlen - CMSG_ALIGN(sizeof(*freq)); err = -EINVAL; if (olen > 64 * 1024) goto done; err = -ENOMEM; fl = kzalloc(sizeof(*fl), GFP_KERNEL); if (!fl) goto done; if (olen > 0) { struct msghdr msg; struct flowi6 flowi6; struct sockcm_cookie sockc_junk; struct ipcm6_cookie ipc6; err = -ENOMEM; fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL); if (!fl->opt) goto done; memset(fl->opt, 0, sizeof(*fl->opt)); fl->opt->tot_len = sizeof(*fl->opt) + olen; err = -EFAULT; if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen)) goto done; msg.msg_controllen = olen; msg.msg_control = (void *)(fl->opt+1); memset(&flowi6, 0, sizeof(flowi6)); ipc6.opt = fl->opt; err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6, &sockc_junk); if (err) goto done; err = -EINVAL; if (fl->opt->opt_flen) goto done; if (fl->opt->opt_nflen == 0) { kfree(fl->opt); fl->opt = NULL; } } fl->fl_net = net; fl->expires = jiffies; err = fl6_renew(fl, freq->flr_linger, freq->flr_expires); if (err) goto done; fl->share = freq->flr_share; addr_type = ipv6_addr_type(&freq->flr_dst); if ((addr_type & IPV6_ADDR_MAPPED) || addr_type == IPV6_ADDR_ANY) { err = -EINVAL; goto done; } fl->dst = freq->flr_dst; atomic_set(&fl->users, 1); switch (fl->share) { case IPV6_FL_S_EXCL: case IPV6_FL_S_ANY: break; case IPV6_FL_S_PROCESS: fl->owner.pid = get_task_pid(current, PIDTYPE_PID); break; case IPV6_FL_S_USER: fl->owner.uid = current_euid(); break; default: err = -EINVAL; goto done; } return fl; done: fl_free(fl); *err_p = err; return NULL; }
static long ugidctl_setgroups(struct ugidctl_context *ctx, void __user *arg) { struct ugidctl_setgroups_rq req; enum pid_type ptype; gid_t __user *list; struct cred *cred; unsigned i, count; gid_t *bulk; pid_t pid; long rc; if (copy_from_user(&req, arg, sizeof(req))) return -EFAULT; arg += sizeof(req); list = arg; count = (unsigned) req.count; if (count > NGROUPS_MAX) return -EINVAL; if (!count) return ugidctl_sys_setgroups(0, arg); if (capable(CAP_SETGID)) return ugidctl_sys_setgroups((int) count, list); if (memcmp(ctx->key, req.key, sizeof(ctx->key))) return -EPERM; mutex_lock(&ctx->lock); ptype = ctx->ptype; pid = ctx->pid; mutex_unlock(&ctx->lock); if (pid != pid_nr(get_task_pid(current, ptype))) return -EPERM; bulk = kmalloc(count > UGIDCTL_BULKSIZE ? sizeof(gid_t) * UGIDCTL_BULKSIZE : sizeof(gid_t) * count, GFP_KERNEL); if (!bulk) return -ENOMEM; while (count) { unsigned size = count > UGIDCTL_BULKSIZE ? UGIDCTL_BULKSIZE : count; if (copy_from_user(bulk, arg, sizeof(gid_t) * size)) return -EFAULT; mutex_lock(&ctx->lock); for (i = 0; i < size; i++) { if (ugidctl_find_gid(ctx, bulk[i])) { mutex_unlock(&ctx->lock); kfree(bulk); return -EPERM; } } mutex_unlock(&ctx->lock); arg += sizeof(gid_t) * size; count -= size; } kfree(bulk); cred = prepare_creds(); if (!cred) return -ENOMEM; cap_raise(cred->cap_effective, CAP_SETGID); commit_creds(cred); rc = ugidctl_sys_setgroups((int) req.count, list); cred = prepare_creds(); if (!cred) { /* unable to restore process capabilities - kill process */ do_exit(SIGKILL); return -ENOMEM; } cap_lower(cred->cap_effective, CAP_SETGID); commit_creds(cred); return rc; }