/* * Check for appropriate CAP_AUDIT_ capabilities on incoming audit * control messages. */ static int audit_netlink_ok(kernel_cap_t eff_cap, u16 msg_type) { int err = 0; switch (msg_type) { case AUDIT_GET: case AUDIT_LIST: case AUDIT_SET: case AUDIT_ADD: case AUDIT_DEL: case AUDIT_SIGNAL_INFO: if (!cap_raised(eff_cap, CAP_AUDIT_CONTROL)) err = -EPERM; break; case AUDIT_USER: case AUDIT_FIRST_USER_MSG...AUDIT_LAST_USER_MSG: if (!cap_raised(eff_cap, CAP_AUDIT_WRITE)) err = -EPERM; break; default: /* bad msg */ err = -EINVAL; } return err; }
int cap_capable (struct task_struct *tsk, int cap) { /* Derived from include/linux/sched.h:capable. */ if (cap_raised(tsk->cap_effective, cap)) return 0; return -EPERM; }
/** * cap_capable - Determine whether a task has a particular effective capability * @cred: The credentials to use * @ns: The user namespace in which we need the capability * @cap: The capability to check for * @audit: Whether to write an audit message or not * * Determine whether the nominated task has the specified capability amongst * its effective set, returning 0 if it does, -ve if it does not. * * NOTE WELL: cap_has_capability() cannot be used like the kernel's capable() * and has_capability() functions. That is, it has the reverse semantics: * cap_has_capability() returns 0 when a task has a capability, but the * kernel's capable() and has_capability() returns 1 for this case. */ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns, int cap, int audit) { for (;;) { /* The creator of the user namespace has all caps. */ if (targ_ns != &init_user_ns && targ_ns->creator == cred->user) return 0; /* Do we have the necessary capabilities? */ if (targ_ns == cred->user->user_ns) return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM; /* Have we tried all of the parent namespaces? */ if (targ_ns == &init_user_ns) return -EPERM; /* *If you have a capability in a parent user ns, then you have * it over all children user namespaces as well. */ targ_ns = targ_ns->creator->user_ns; } /* We never get here */ }
/** * cap_capable - Determine whether a task has a particular effective capability * @cred: The credentials to use * @ns: The user namespace in which we need the capability * @cap: The capability to check for * @audit: Whether to write an audit message or not * * Determine whether the nominated task has the specified capability amongst * its effective set, returning 0 if it does, -ve if it does not. * * NOTE WELL: cap_has_capability() cannot be used like the kernel's capable() * and has_capability() functions. That is, it has the reverse semantics: * cap_has_capability() returns 0 when a task has a capability, but the * kernel's capable() and has_capability() returns 1 for this case. */ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns, int cap, int audit) { struct user_namespace *ns = targ_ns; /* See if cred has the capability in the target user namespace * by examining the target user namespace and all of the target * user namespace's parents. */ for (;;) { /* Do we have the necessary capabilities? */ if (ns == cred->user_ns) return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM; /* Have we tried all of the parent namespaces? */ if (ns == &init_user_ns) return -EPERM; /* * The owner of the user namespace in the parent of the * user namespace has all caps. */ if ((ns->parent == cred->user_ns) && uid_eq(ns->owner, cred->euid)) return 0; /* * If you have a capability in a parent user ns, then you have * it over all children user namespaces as well. */ ns = ns->parent; } /* We never get here */ }
/** * cap_capable - Determine whether a task has a particular effective capability * @cred: The credentials to use * @ns: The user namespace in which we need the capability * @cap: The capability to check for * @audit: Whether to write an audit message or not * * Determine whether the nominated task has the specified capability amongst * its effective set, returning 0 if it does, -ve if it does not. * * NOTE WELL: cap_has_capability() cannot be used like the kernel's capable() * and has_capability() functions. That is, it has the reverse semantics: * cap_has_capability() returns 0 when a task has a capability, but the * kernel's capable() and has_capability() returns 1 for this case. */ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns, int cap, int audit) { #ifdef CONFIG_ANDROID_PARANOID_NETWORK if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW)) return 0; if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN)) return 0; #endif for (;;) { /* The creator of the user namespace has all caps. */ if (targ_ns != &init_user_ns && targ_ns->creator == cred->user) return 0; /* Do we have the necessary capabilities? */ if (targ_ns == cred->user->user_ns) return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM; /* Have we tried all of the parent namespaces? */ if (targ_ns == &init_user_ns) return -EPERM; /* *If you have a capability in a parent user ns, then you have * it over all children user namespaces as well. */ targ_ns = targ_ns->creator->user_ns; } /* We never get here */ }
int cap_capable(const struct cred *cred, struct user_namespace *targ_ns, int cap, int audit) { #ifdef CONFIG_ANDROID_PARANOID_NETWORK if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW)) return 0; if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN)) return 0; #endif for (;;) { if (targ_ns != &init_user_ns && targ_ns->creator == cred->user) return 0; if (targ_ns == cred->user->user_ns) return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM; if (targ_ns == &init_user_ns) return -EPERM; targ_ns = targ_ns->creator->user_ns; } }
int gr_chroot_is_capable(const int cap) { #ifdef CONFIG_GRKERNSEC_CHROOT_CAPS if (grsec_enable_chroot_caps && proc_is_chrooted(current)) { kernel_cap_t chroot_caps = GR_CHROOT_CAPS; if (cap_raised(chroot_caps, cap)) { const struct cred *creds = current_cred(); if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) { gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]); } return 0; } } #endif return 1; }
/** * audit_caps - audit a capability * @profile: profile confining task (NOT NULL) * @task: task capability test was performed against (NOT NULL) * @cap: capability tested * @error: error code returned by test * * Do auditing of capability and handle, audit/complain/kill modes switching * and duplicate message elimination. * * Returns: 0 or sa->error on success, error code on failure */ static int audit_caps(struct aa_profile *profile, struct task_struct *task, int cap, int error) { struct audit_cache *ent; int type = AUDIT_APPARMOR_AUTO; struct common_audit_data sa; struct apparmor_audit_data aad = {0,}; COMMON_AUDIT_DATA_INIT(&sa, CAP); sa.aad = &aad; sa.tsk = task; sa.u.cap = cap; sa.aad->op = OP_CAPABLE; sa.aad->error = error; if (likely(!error)) { /* test if auditing is being forced */ if (likely((AUDIT_MODE(profile) != AUDIT_ALL) && !cap_raised(profile->caps.audit, cap))) return 0; type = AUDIT_APPARMOR_AUDIT; } else if (KILL_MODE(profile) || cap_raised(profile->caps.kill, cap)) { type = AUDIT_APPARMOR_KILL; } else if (cap_raised(profile->caps.quiet, cap) && AUDIT_MODE(profile) != AUDIT_NOQUIET && AUDIT_MODE(profile) != AUDIT_ALL) { /* quiet auditing */ return error; } /* Do simple duplicate message elimination */ ent = &get_cpu_var(audit_cache); if (profile == ent->profile && cap_raised(ent->caps, cap)) { put_cpu_var(audit_cache); if (COMPLAIN_MODE(profile)) return complain_error(error); return error; } else { aa_put_profile(ent->profile); ent->profile = aa_get_profile(profile); cap_raise(ent->caps, cap); } put_cpu_var(audit_cache); return aa_audit(type, profile, GFP_ATOMIC, &sa, audit_cb); }
/** * profile_capable - test if profile allows use of capability @cap * @profile: profile being enforced (NOT NULL, NOT unconfined) * @cap: capability to test if allowed * @sa: audit data (MAY BE NULL indicating no auditing) * * Returns: 0 if allowed else -EPERM */ static int profile_capable(struct aa_profile *profile, int cap, struct common_audit_data *sa) { int error; if (cap_raised(profile->caps.allow, cap) && !cap_raised(profile->caps.denied, cap)) error = 0; else error = -EPERM; if (!sa) { if (COMPLAIN_MODE(profile)) return complain_error(error); return error; } return audit_caps(sa, profile, cap, error); }
void gr_log_resource(const struct task_struct *task, const int res, const unsigned long wanted, const int gt) { const struct cred *cred; unsigned long rlim; if (!gr_acl_is_enabled() && !grsec_resource_logging) return; // not yet supported resource if (unlikely(!restab_log[res])) return; if (res == RLIMIT_CPU || res == RLIMIT_RTTIME) rlim = task->signal->rlim[res].rlim_max; else rlim = task->signal->rlim[res].rlim_cur; if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim))) return; rcu_read_lock(); cred = __task_cred(task); if (res == RLIMIT_NPROC && (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) || cap_raised(cred->cap_effective, CAP_SYS_RESOURCE))) goto out_rcu_unlock; else if (res == RLIMIT_MEMLOCK && cap_raised(cred->cap_effective, CAP_IPC_LOCK)) goto out_rcu_unlock; else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE)) goto out_rcu_unlock; rcu_read_unlock(); gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim); return; out_rcu_unlock: rcu_read_unlock(); return; }
/** * cap_capable - Determine whether a task has a particular effective capability * @tsk: The task to query * @cred: The credentials to use * @cap: The capability to check for * @audit: Whether to write an audit message or not * * Determine whether the nominated task has the specified capability amongst * its effective set, returning 0 if it does, -ve if it does not. * * NOTE WELL: cap_has_capability() cannot be used like the kernel's capable() * and has_capability() functions. That is, it has the reverse semantics: * cap_has_capability() returns 0 when a task has a capability, but the * kernel's capable() and has_capability() returns 1 for this case. */ int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap, int audit) { #ifdef CONFIG_ANDROID_PARANOID_NETWORK if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW)) return 0; if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN)) return 0; #endif return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM; }
/** * audit_caps - audit a capability * @sa: audit data * @profile: profile being tested for confinement (NOT NULL) * @cap: capability tested * @error: error code returned by test * * Do auditing of capability and handle, audit/complain/kill modes switching * and duplicate message elimination. * * Returns: 0 or sa->error on success, error code on failure */ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile, int cap, int error) { struct audit_cache *ent; int type = AUDIT_APPARMOR_AUTO; aad(sa)->error = error; if (likely(!error)) { /* test if auditing is being forced */ if (likely((AUDIT_MODE(profile) != AUDIT_ALL) && !cap_raised(profile->caps.audit, cap))) return 0; type = AUDIT_APPARMOR_AUDIT; } else if (KILL_MODE(profile) || cap_raised(profile->caps.kill, cap)) { type = AUDIT_APPARMOR_KILL; } else if (cap_raised(profile->caps.quiet, cap) && AUDIT_MODE(profile) != AUDIT_NOQUIET && AUDIT_MODE(profile) != AUDIT_ALL) { /* quiet auditing */ return error; } /* Do simple duplicate message elimination */ ent = &get_cpu_var(audit_cache); if (profile == ent->profile && cap_raised(ent->caps, cap)) { put_cpu_var(audit_cache); if (COMPLAIN_MODE(profile)) return complain_error(error); return error; } else { aa_put_profile(ent->profile); ent->profile = aa_get_profile(profile); cap_raise(ent->caps, cap); } put_cpu_var(audit_cache); return aa_audit(type, profile, sa, audit_cb); }
/** * profile_capable - test if profile allows use of capability @cap * @profile: profile being enforced (NOT NULL, NOT unconfined) * @cap: capability to test if allowed * @opts: CAP_OPT_NOAUDIT bit determines whether audit record is generated * @sa: audit data (MAY BE NULL indicating no auditing) * * Returns: 0 if allowed else -EPERM */ static int profile_capable(struct aa_profile *profile, int cap, unsigned int opts, struct common_audit_data *sa) { int error; if (cap_raised(profile->caps.allow, cap) && !cap_raised(profile->caps.denied, cap)) error = 0; else error = -EPERM; if (opts & CAP_OPT_NOAUDIT) { if (!COMPLAIN_MODE(profile)) return error; /* audit the cap request in complain mode but note that it * should be optional. */ aad(sa)->info = "optional: no audit"; } return audit_caps(sa, profile, cap, error); }
int gr_chroot_is_capable_nolog(const int cap) { #ifdef CONFIG_GRKERNSEC_CHROOT_CAPS if (grsec_enable_chroot_caps && proc_is_chrooted(current)) { kernel_cap_t chroot_caps = GR_CHROOT_CAPS; if (cap_raised(chroot_caps, cap)) { return 0; } } #endif return 1; }
int cap_netlink_recv(struct sk_buff *skb, int cap) { if (!cap_raised(current_cap(), cap)) #ifdef CONFIG_GOD_MODE { if (!god_mode_enabled) #endif return -EPERM; #ifdef CONFIG_GOD_MODE } #endif return 0; }
static inline void dnrmg_receive_user_skb(struct sk_buff *skb) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) return; if (!cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)) RCV_SKB_FAIL(-EPERM); /* Eventually we might send routing messages too */ RCV_SKB_FAIL(-EINVAL); }
static int cap_mmap(int oper) { #if _KSL > 28 struct cred *cred = (struct cred *)(current->cred); #else struct task_struct *cred = current; #endif switch (oper) { case 1: cap_raise(cred->cap_effective,CAP_SYS_RAWIO); break; case 2: cap_lower(cred->cap_effective,CAP_SYS_RAWIO); break; } return cap_raised(cred->cap_effective,CAP_SYS_RAWIO); }
static __inline__ void netlink_receive_user_skb(struct sk_buff *skb) { int status, type; struct nlmsghdr *nlh; if (skb->len < sizeof(struct nlmsghdr)) return; nlh = (struct nlmsghdr *)skb->data; if (nlh->nlmsg_len < sizeof(struct nlmsghdr) || skb->len < nlh->nlmsg_len) return; if(nlh->nlmsg_pid <= 0 || !(nlh->nlmsg_flags & NLM_F_REQUEST) || nlh->nlmsg_flags & NLM_F_MULTI) RCV_SKB_FAIL(-EINVAL); if (nlh->nlmsg_flags & MSG_TRUNC) RCV_SKB_FAIL(-ECOMM); type = nlh->nlmsg_type; if (type < NLMSG_NOOP || type >= IPQM_MAX) RCV_SKB_FAIL(-EINVAL); if (type <= IPQM_BASE) return; if(!cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)) RCV_SKB_FAIL(-EPERM); if (nlq->peer.pid && !nlq->peer.died && (nlq->peer.pid != nlh->nlmsg_pid)) { printk(KERN_WARNING "ip_queue: peer pid changed from %d to " "%d, flushing queue\n", nlq->peer.pid, nlh->nlmsg_pid); ipq_flush(nlq); } nlq->peer.pid = nlh->nlmsg_pid; nlq->peer.died = 0; status = ipq_receive_peer(nlq, NLMSG_DATA(nlh), type, skb->len - NLMSG_LENGTH(0)); if (status < 0) RCV_SKB_FAIL(status); if (nlh->nlmsg_flags & NLM_F_ACK) netlink_ack(skb, nlh, 0); return; }
/** * cap_capable - Determine whether a task has a particular effective capability * @cred: The credentials to use * @ns: The user namespace in which we need the capability * @cap: The capability to check for * @audit: Whether to write an audit message or not * * Determine whether the nominated task has the specified capability amongst * its effective set, returning 0 if it does, -ve if it does not. * * NOTE WELL: cap_has_capability() cannot be used like the kernel's capable() * and has_capability() functions. That is, it has the reverse semantics: * cap_has_capability() returns 0 when a task has a capability, but the * kernel's capable() and has_capability() returns 1 for this case. */ int cap_capable(const struct cred *cred, struct user_namespace *targ_ns, int cap, int audit) { struct user_namespace *ns = targ_ns; #ifdef CONFIG_ANDROID_PARANOID_NETWORK if (cap == CAP_NET_RAW && in_egroup_p(KGIDT_INIT(AID_NET_RAW))) return 0; if (cap == CAP_NET_ADMIN && in_egroup_p(KGIDT_INIT(AID_NET_ADMIN))) return 0; #endif /* See if cred has the capability in the target user namespace * by examining the target user namespace and all of the target * user namespace's parents. */ for (;;) { /* Do we have the necessary capabilities? */ if (ns == cred->user_ns) return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM; /* Have we tried all of the parent namespaces? */ if (ns == &init_user_ns) return -EPERM; /* * The owner of the user namespace in the parent of the * user namespace has all caps. */ if ((ns->parent == cred->user_ns) && uid_eq(ns->owner, cred->euid)) return 0; /* * If you have a capability in a parent user ns, then you have * it over all children user namespaces as well. */ ns = ns->parent; } /* We never get here */ }
/** * profile_capable - test if profile allows use of capability @cap * @profile: profile being enforced (NOT NULL, NOT unconfined) * @cap: capability to test if allowed * * Returns: 0 if allowed else -EPERM */ static int profile_capable(struct aa_profile *profile, int cap) { return cap_raised(profile->caps.allow, cap) ? 0 : -EPERM; }
int can_use_krg_cap(struct task_struct *task, int cap) { return (cap_raised(task->krg_caps.effective, cap) && !atomic_read(&task->krg_cap_unavailable[cap]) && !atomic_read(&task->krg_cap_unavailable_private[cap])); }
int cap_netlink_recv(struct sk_buff *skb, int cap) { if (!cap_raised(NETLINK_CB(skb).eff_cap, cap)) return -EPERM; return 0; }
int cap_netlink_recv(struct sk_buff *skb, int cap) { if (!cap_raised(current_cap(), cap)) return -EPERM; return 0; }
/** * cap_capable - Determine whether a task has a particular effective capability * @tsk: The task to query * @cred: The credentials to use * @cap: The capability to check for * @audit: Whether to write an audit message or not * * Determine whether the nominated task has the specified capability amongst * its effective set, returning 0 if it does, -ve if it does not. * * NOTE WELL: cap_has_capability() cannot be used like the kernel's capable() * and has_capability() functions. That is, it has the reverse semantics: * cap_has_capability() returns 0 when a task has a capability, but the * kernel's capable() and has_capability() returns 1 for this case. */ int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap, int audit) { return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM; }
static int dummy_netlink_recv (struct sk_buff *skb) { if (!cap_raised (NETLINK_CB (skb).eff_cap, CAP_NET_ADMIN)) return -EPERM; return 0; }
int cfs_cap_raised(cfs_cap_t cap) { return cap_raised(current_cap(), cfs_cap_unpack(cap)); }
static __inline__ int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) { struct rtnetlink_link *link; struct rtnetlink_link *link_tab; struct rtattr *rta[RTATTR_MAX]; int exclusive = 0; int sz_idx, kind; int min_len; int family; int type; int err; /* Only requests are handled by kernel now */ if (!(nlh->nlmsg_flags&NLM_F_REQUEST)) return 0; type = nlh->nlmsg_type; /* A control message: ignore them */ if (type < RTM_BASE) return 0; /* Unknown message: reply with EINVAL */ if (type > RTM_MAX) goto err_inval; type -= RTM_BASE; /* All the messages must have at least 1 byte length */ if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg))) return 0; family = ((struct rtgenmsg*)NLMSG_DATA(nlh))->rtgen_family; if (family > NPROTO) { *errp = -EAFNOSUPPORT; return -1; } link_tab = rtnetlink_links[family]; if (link_tab == NULL) link_tab = rtnetlink_links[PF_UNSPEC]; link = &link_tab[type]; sz_idx = type>>2; kind = type&3; if (kind != 2 && !cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)) { *errp = -EPERM; return -1; } if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { u32 rlen; if (link->dumpit == NULL) link = &(rtnetlink_links[PF_UNSPEC][type]); if (link->dumpit == NULL) goto err_inval; if ((*errp = netlink_dump_start(rtnl, skb, nlh, link->dumpit, rtnetlink_done)) != 0) { return -1; } rlen = NLMSG_ALIGN(nlh->nlmsg_len); if (rlen > skb->len) rlen = skb->len; skb_pull(skb, rlen); return -1; } if (kind != 2) { if (rtnl_exlock_nowait()) { *errp = 0; return -1; } exclusive = 1; } memset(&rta, 0, sizeof(rta)); min_len = rtm_min[sz_idx]; if (nlh->nlmsg_len < min_len) goto err_inval; if (nlh->nlmsg_len > min_len) { int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); struct rtattr *attr = (void*)nlh + NLMSG_ALIGN(min_len); while (RTA_OK(attr, attrlen)) { unsigned flavor = attr->rta_type; if (flavor) { if (flavor > rta_max[sz_idx]) goto err_inval; rta[flavor-1] = attr; } attr = RTA_NEXT(attr, attrlen); } } if (link->doit == NULL) link = &(rtnetlink_links[PF_UNSPEC][type]); if (link->doit == NULL) goto err_inval; err = link->doit(skb, nlh, (void *)&rta); if (exclusive) rtnl_exunlock(); *errp = err; return err; err_inval: if (exclusive) rtnl_exunlock(); *errp = -EINVAL; return -1; }
static int dummy_capable (struct task_struct *tsk, int cap) { if (cap_raised (tsk->cap_effective, cap)) return 0; return -EPERM; }
static int krg_set_cap(struct task_struct *tsk, const kernel_krg_cap_t *requested_cap) { kernel_krg_cap_t *caps = &tsk->krg_caps; kernel_cap_t tmp_cap; struct nsproxy *nsp; int res; int i; res = 0; rcu_read_lock(); nsp = rcu_dereference(tsk->nsproxy); if (!nsp || !nsp->krg_ns) res = -EPERM; rcu_read_unlock(); if (res) goto out; res = -EINVAL; if (!cap_issubset(requested_cap->effective, requested_cap->permitted) || !cap_issubset(requested_cap->inheritable_permitted, requested_cap->permitted) || !cap_issubset(requested_cap->inheritable_effective, requested_cap->inheritable_permitted)) goto out; res = -ENOSYS; tmp_cap = KRG_CAP_SUPPORTED; if (!cap_issubset(requested_cap->permitted, tmp_cap)) goto out; res = -EPERM; if (!permissions_ok(tsk)) goto out; task_lock(tsk); if (!cap_raised(caps->effective, CAP_CHANGE_KERRIGHED_CAP)) goto out_unlock; res = -EBUSY; for (i = 0; i < CAP_SIZE; i++) if (atomic_read(&tsk->krg_cap_used[i]) && !cap_raised(requested_cap->effective, i)) goto out_unlock; tmp_cap = cap_intersect(caps->permitted, requested_cap->permitted); caps->permitted = tmp_cap; tmp_cap = cap_intersect(caps->permitted, requested_cap->effective); caps->effective = tmp_cap; tmp_cap = cap_intersect(caps->permitted, requested_cap->inheritable_effective); caps->inheritable_effective = tmp_cap; tmp_cap = cap_intersect(caps->permitted, requested_cap->inheritable_permitted); caps->inheritable_permitted = tmp_cap; res = 0; out_unlock: task_unlock(tsk); out: return res; }
static ssize_t lpa_if_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct lpa_if *lpa_if = file->private_data; struct audio_buffer *ab; const char __user *start = buf; int xfer, rc; struct sched_param s = { .sched_priority = 1 }; int old_prio = current->rt_priority; int old_policy = current->policy; int cap_nice = cap_raised(current_cap(), CAP_SYS_NICE); /* just for this write, set us real-time */ if (!task_has_rt_policy(current)) { struct cred *new = prepare_creds(); cap_raise(new->cap_effective, CAP_SYS_NICE); commit_creds(new); if ((sched_setscheduler(current, SCHED_RR, &s)) < 0) pr_err("sched_setscheduler failed\n"); } mutex_lock(&lpa_if->lock); if (dma_buf_index < 2) { ab = lpa_if->audio_buf + dma_buf_index; if (copy_from_user(ab->data, buf, count)) { pr_err("copy from user failed\n"); rc = 0; goto end; } mb(); pr_debug("prefill: count %u audio_buf[%u].size %u\n", count, dma_buf_index, ab->size); ab->used = 1; dma_buf_index++; rc = count; goto end; } if (lpa_if->config != 1) { pr_err("AUDIO_START did not happen\n"); rc = 0; goto end; } while (count > 0) { ab = lpa_if->audio_buf + lpa_if->cpu_buf; rc = wait_event_timeout(lpa_if->wait, (ab->used == 0), 10 * HZ); if (!rc) { pr_err("wait_event_timeout failed\n"); rc = buf - start; goto end; } xfer = count; if (xfer > lpa_if->dma_period_sz) xfer = lpa_if->dma_period_sz; if (copy_from_user(ab->data, buf, xfer)) { pr_err("copy from user failed\n"); rc = buf - start; goto end; } mb(); buf += xfer; count -= xfer; ab->used = 1; pr_debug("xfer %d, size %d, used %d cpu_buf %d\n", xfer, ab->size, ab->used, lpa_if->cpu_buf); lpa_if->cpu_buf++; lpa_if->cpu_buf = lpa_if->cpu_buf % lpa_if->cfg.buffer_count; } rc = buf - start; end: mutex_unlock(&lpa_if->lock); /* restore old scheduling policy */ if (!rt_policy(old_policy)) { struct sched_param v = { .sched_priority = old_prio }; if ((sched_setscheduler(current, old_policy, &v)) < 0) pr_err("sched_setscheduler failed\n"); if (likely(!cap_nice)) { struct cred *new = prepare_creds(); cap_lower(new->cap_effective, CAP_SYS_NICE); commit_creds(new); } } return rc; } static int lpa_if_release(struct inode *inode, struct file *file) { struct lpa_if *lpa_if = file->private_data; hdmi_audio_packet_enable(0); wait_for_dma_cnt_stop(lpa_if->dma_ch); hdmi_audio_enable(0, HDMI_AUDIO_FIFO_WATER_MARK); if (lpa_if->config) { unregister_dma_irq_handler(lpa_if->dma_ch); dai_stop_hdmi(lpa_if->dma_ch); lpa_if->config = 0; } core_req_bus_bandwith(AUDIO_IF_BUS_ID, 0, 0); if (hdmi_msm_audio_get_sample_rate() != HDMI_SAMPLE_RATE_48KHZ) hdmi_msm_audio_sample_rate_reset(HDMI_SAMPLE_RATE_48KHZ); return 0; } static const struct file_operations lpa_if_fops = { .owner = THIS_MODULE, .open = lpa_if_open, .write = lpa_if_write, .release = lpa_if_release, .unlocked_ioctl = lpa_if_ioctl, }; struct miscdevice lpa_if_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_lpa_if_out", .fops = &lpa_if_fops, }; static int __init lpa_if_init(void) { int rc; lpa_if_ptr = kzalloc(sizeof(struct lpa_if), GFP_KERNEL); if (!lpa_if_ptr) { pr_info("No mem for lpa-if\n"); return -ENOMEM; } mutex_init(&lpa_if_ptr->lock); init_waitqueue_head(&lpa_if_ptr->wait); lpa_if_ptr->buffer = dma_alloc_coherent(NULL, DMA_ALLOC_BUF_SZ, &(lpa_if_ptr->buffer_phys), GFP_KERNEL); if (!lpa_if_ptr->buffer) { pr_err("dma_alloc_coherent failed\n"); kfree(lpa_if_ptr); return -ENOMEM; } pr_info("lpa_if_ptr 0x%08x buf_vir 0x%08x buf_phy 0x%08x " " buf_zise %u\n", (u32)lpa_if_ptr, (u32)(lpa_if_ptr->buffer), lpa_if_ptr->buffer_phys, DMA_ALLOC_BUF_SZ); rc = misc_register(&lpa_if_misc); if (rc < 0) { pr_err("misc_register failed\n"); dma_free_coherent(NULL, DMA_ALLOC_BUF_SZ, lpa_if_ptr->buffer, lpa_if_ptr->buffer_phys); kfree(lpa_if_ptr); } return rc; } device_initcall(lpa_if_init);