/*
 * cap_set_pg - set capabilities for all processes in a given process
 * group.  We call this holding task_capability_lock and tasklist_lock.
 */
static inline void cap_set_pg(int pgrp, kernel_cap_t *effective,
			      kernel_cap_t *inheritable,
			      kernel_cap_t *permitted)
{
	task_t *g, *target;
	struct list_head *l;
	struct pid *pid;

	for_each_task_pid(pgrp, PIDTYPE_PGID, g, l, pid) {
		target = g;
		while_each_thread(g, target)
			security_capset_set(target, effective, inheritable, permitted);
	}
Beispiel #2
0
/*
 * cap_set_pg - set capabilities for all processes in a given process
 * group.  We call this holding task_capability_lock and tasklist_lock.
 */
static inline int cap_set_pg(int pgrp, kernel_cap_t *effective,
			      kernel_cap_t *inheritable,
			      kernel_cap_t *permitted)
{
	struct task_struct *g, *target;
	int ret = -EPERM;
	int found = 0;

	do_each_task_pid(pgrp, PIDTYPE_PGID, g) {
		target = g;
		while_each_thread(g, target) {
			if (!security_capset_check(target, effective,
							inheritable,
							permitted)) {
				security_capset_set(target, effective,
							inheritable,
							permitted);
				ret = 0;
			}
			found = 1;
		}
	} while_each_task_pid(pgrp, PIDTYPE_PGID, g);
Beispiel #3
0
static int oo_bufpage_huge_alloc(struct oo_buffer_pages *p, int *flags)
{
  int shmid = -1;
  long uaddr;
  static unsigned volatile last_key_id = 0;
  unsigned start_key_id;
  unsigned id;
  int rc;
  int restore_creds = 0;
#ifdef current_cred
  struct cred *creds;
#endif

  ci_assert( current->mm );

  /* sys_shmget(SHM_HUGETLB) need CAP_IPC_LOCK.
   * So, we give this capability and reset it back.
   * Since we modify per-thread capabilities,
   * there are no side effects. */
#ifdef current_cred
  if (~current_cred()->cap_effective.cap[0] & (1 << CAP_IPC_LOCK)) {
    creds = prepare_creds();
    if( creds != NULL ) {
      creds->cap_effective.cap[0] |= 1 << CAP_IPC_LOCK;
      commit_creds(creds);
      restore_creds = 1;
    }
  }
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) || \
  !defined(CONFIG_SECURITY)
  /* we need security_capset_set to be inline here */

#ifdef STRICT_CAP_T_TYPECHECKS
#define cap2int(cap) ((cap).cap)
#else
#define cap2int(cap) (cap)
#endif

  if (~cap2int(current->cap_effective) & (1 << CAP_IPC_LOCK)) {
    /* This is bad.
     * We should take non-exported task_capability_lock.
     * Or we should use sys_capset, but we do not have
     * user-space memory to give it to syscall. */
    kernel_cap_t eff = current->cap_effective;
    cap2int(eff) |= 1 << CAP_IPC_LOCK;
    security_capset_set(current, &eff, &current->cap_inheritable,
                        &current->cap_permitted);
    restore_creds = 1;
  }
#elif LINUX_VERSION_CODE == KERNEL_VERSION(2,6,24)
  /* CONFIG_SECURITY, 2.6.24 */

#ifdef STRICT_CAP_T_TYPECHECKS
#define cap2int(cap) ((cap).cap)
#else
#define cap2int(cap) (cap)
#endif

  if (~cap2int(current->cap_effective) & (1 << CAP_IPC_LOCK)) {
    static int printed = 0;
    if (!printed) {
      ci_log("%s: can't allocate huge pages without CAP_IPC_LOCK", __func__);
      printed = 1;
    }
    return -EPERM;
  }
#else
  /* CONFIG_SECURITY, 2.6.25 <= linux <= 2.6.28
   * (2.6.29 is where current_cred defined) */
  if (~current->cap_effective.cap[0] & (1 << CAP_IPC_LOCK)) {
    static int printed = 0;
    if (!printed) {
      ci_log("%s: can't allocate huge pages without CAP_IPC_LOCK", __func__);
      printed = 1;
    }
    return -EPERM;
  }
#endif

  /* Simultaneous access to last_key_id is possible, but we do not care.
   * It is just a hint where we should look for free ids. */
  start_key_id = last_key_id;

  for (id = OO_SHM_NEXT_ID(start_key_id);
       id != start_key_id;
       id = OO_SHM_NEXT_ID(id)) {
    shmid = efab_linux_sys_shmget(OO_SHM_KEY(id), HPAGE_SIZE,
                                  SHM_HUGETLB | IPC_CREAT | IPC_EXCL |
                                  SHM_R | SHM_W);
    if (shmid == -EEXIST)
      continue; /* try another id */
    if (shmid < 0) {
      if (shmid == -ENOMEM && !(*flags & OO_IOBUFSET_FLAG_HUGE_PAGE_FAILED) )
        *flags |= OO_IOBUFSET_FLAG_HUGE_PAGE_FAILED;
      rc = shmid;
      goto out;
    }
    last_key_id = id;
    break;
  }
  if (shmid < 0) {
    ci_log("%s: Failed to allocate huge page: EEXIST", __func__);
    last_key_id = 0; /* reset last_key_id */
    rc = shmid;
    goto out;
  }

  /* We do not need UL mapping, but the only way to obtain the page
   * is to create (and destroy) UL mapping */
  uaddr = efab_linux_sys_shmat(shmid, NULL, 0);
  if (uaddr < 0) {
    rc = (int)uaddr;
    goto fail3;
  }

  down_read(&current->mm->mmap_sem);
  rc = get_user_pages(current, current->mm, (unsigned long)uaddr, 1,
                      1/*write*/, 0/*force*/, &(p->pages[0]), NULL);
  up_read(&current->mm->mmap_sem);
  if (rc < 0)
    goto fail2;
  rc = efab_linux_sys_shmdt((char __user *)uaddr);
  if (rc < 0)
    goto fail1;

  p->shmid = shmid;
  rc = 0;
  goto out;

fail1:
  put_page(p->pages[0]);
fail2:
  efab_linux_sys_shmdt((char __user *)uaddr);
fail3:
  efab_linux_sys_shmctl(shmid, IPC_RMID, NULL);
out:
  if (restore_creds) {
#ifdef current_cred
    creds = prepare_creds();
    if( creds != NULL ) {
      creds->cap_effective.cap[0] &= ~(1 << CAP_IPC_LOCK);
      commit_creds(creds);
    }
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
    kernel_cap_t eff = current->cap_effective;
    cap2int(eff) &= ~(1 << CAP_IPC_LOCK);
    security_capset_set(current, &eff, &current->cap_inheritable,
                        &current->cap_permitted);
#else
    ci_assert(0);
#endif
  }
  return rc;
}