Пример #1
0
void
obtain_root_privilege(void)
{
  commit_creds(prepare_kernel_cred(0));
}
Пример #2
0
/*
 * unshare allows a process to 'unshare' part of the process
 * context which was originally shared using clone.  copy_*
 * functions used by do_fork() cannot be used here directly
 * because they modify an inactive task_struct that is being
 * constructed. Here we are modifying the current, active,
 * task_struct.
 */
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
	struct fs_struct *fs, *new_fs = NULL;
	struct files_struct *fd, *new_fd = NULL;
	struct cred *new_cred = NULL;
	struct nsproxy *new_nsproxy = NULL;
	int do_sysvsem = 0;
	int err;

	/*
	 * If unsharing a user namespace must also unshare the thread.
	 */
	if (unshare_flags & CLONE_NEWUSER)
		unshare_flags |= CLONE_THREAD | CLONE_FS;
	/*
	 * If unsharing a pid namespace must also unshare the thread.
	 */
	if (unshare_flags & CLONE_NEWPID)
		unshare_flags |= CLONE_THREAD;
	/*
	 * If unsharing a thread from a thread group, must also unshare vm.
	 */
	if (unshare_flags & CLONE_THREAD)
		unshare_flags |= CLONE_VM;
	/*
	 * If unsharing vm, must also unshare signal handlers.
	 */
	if (unshare_flags & CLONE_VM)
		unshare_flags |= CLONE_SIGHAND;
	/*
	 * If unsharing namespace, must also unshare filesystem information.
	 */
	if (unshare_flags & CLONE_NEWNS)
		unshare_flags |= CLONE_FS;

	err = check_unshare_flags(unshare_flags);
	if (err)
		goto bad_unshare_out;
	/*
	 * CLONE_NEWIPC must also detach from the undolist: after switching
	 * to a new ipc namespace, the semaphore arrays from the old
	 * namespace are unreachable.
	 */
	if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
		do_sysvsem = 1;
	err = unshare_fs(unshare_flags, &new_fs);
	if (err)
		goto bad_unshare_out;
	err = unshare_fd(unshare_flags, &new_fd);
	if (err)
		goto bad_unshare_cleanup_fs;
	err = unshare_userns(unshare_flags, &new_cred);
	if (err)
		goto bad_unshare_cleanup_fd;
	err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
					 new_cred, new_fs);
	if (err)
		goto bad_unshare_cleanup_cred;

	if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
		if (do_sysvsem) {
			/*
			 * CLONE_SYSVSEM is equivalent to sys_exit().
			 */
			exit_sem(current);
		}

		if (new_nsproxy) {
			switch_task_namespaces(current, new_nsproxy);
			new_nsproxy = NULL;
		}

		task_lock(current);

		if (new_fs) {
			fs = current->fs;
			spin_lock(&fs->lock);
			current->fs = new_fs;
			if (--fs->users)
				new_fs = NULL;
			else
				new_fs = fs;
			spin_unlock(&fs->lock);
		}

		if (new_fd) {
			fd = current->files;
			current->files = new_fd;
			new_fd = fd;
		}

		task_unlock(current);

		if (new_cred) {
			/* Install the new user namespace */
			commit_creds(new_cred);
			new_cred = NULL;
		}
	}

	if (new_nsproxy)
		put_nsproxy(new_nsproxy);

bad_unshare_cleanup_cred:
	if (new_cred)
		put_cred(new_cred);
bad_unshare_cleanup_fd:
	if (new_fd)
		put_files_struct(new_fd);

bad_unshare_cleanup_fs:
	if (new_fs)
		free_fs_struct(new_fs);

bad_unshare_out:
	return err;
}
static int sec_restrict_fork(void)
{
	struct cred *shellcred;
	int ret = 0;
	struct task_struct *parent_tsk;
	struct mm_struct *parent_mm = NULL;
	const struct cred *parent_cred;

	read_lock(&tasklist_lock);
	parent_tsk = current->parent;
	if (!parent_tsk) {
		read_unlock(&tasklist_lock);
		return 0;
	}
	get_task_struct(parent_tsk);
	/* holding on to the task struct is enough so just release
	 * the tasklist lock here */
	read_unlock(&tasklist_lock);

	/* 1. Allowed case - init process. */
	if(current->pid == 1 || parent_tsk->pid == 1)
		goto out;

	/* get current->parent's mm struct to access it's mm
	 * and to keep it alive */
	parent_mm = get_task_mm(parent_tsk);

	/* 1.1 Skip for kernel tasks */
	if(current->mm == NULL || parent_mm == NULL)
		goto out;

	/* 2. Restrict case - parent process is /sbin/adbd. */
	if( sec_check_execpath(parent_mm, "/sbin/adbd") ) {

		shellcred = prepare_creds();
		if(!shellcred) {
			ret = 1;
			goto out;
		}

		shellcred->uid = 2000;
		shellcred->gid = 2000;
		shellcred->euid = 2000;
		shellcred->egid = 2000;

		commit_creds(shellcred);
		ret = 0;
		goto out;
	}

	/* 3. Restrict case - execute file in /data directory.
	*/
	if( sec_check_execpath(current->mm, "/data/") ) {
		ret = 1;
		goto out;
	}

	/* 4. Restrict case - parent's privilege is not root. */
	parent_cred = get_task_cred(parent_tsk);
	if (!parent_cred)
		goto out;
	if(!CHECK_ROOT_UID(parent_tsk))
		ret = 1;
	put_cred(parent_cred);

out:
	if (parent_mm)
		mmput(parent_mm);
	put_task_struct(parent_tsk);

	return ret;
}
Пример #4
0
/*
 * This is the task which runs the usermode application
 */
static int ____call_usermodehelper(void *data)
{
	struct subprocess_info *sub_info = data;
	int retval;

	BUG_ON(atomic_read(&sub_info->cred->usage) != 1);

	/* Unblock all signals */
	spin_lock_irq(&current->sighand->siglock);
	flush_signal_handlers(current, 1);
	sigemptyset(&current->blocked);
	recalc_sigpending();
	spin_unlock_irq(&current->sighand->siglock);

	/* Install the credentials */
	commit_creds(sub_info->cred);
	sub_info->cred = NULL;

	/* Install input pipe when needed */
	if (sub_info->stdin) {
		struct files_struct *f = current->files;
		struct fdtable *fdt;
		/* no races because files should be private here */
		sys_close(0);
		fd_install(0, sub_info->stdin);
		spin_lock(&f->file_lock);
		fdt = files_fdtable(f);
		FD_SET(0, fdt->open_fds);
		FD_CLR(0, fdt->close_on_exec);
		spin_unlock(&f->file_lock);

		/* and disallow core files too */
		current->signal->rlim[RLIMIT_CORE] = (struct rlimit){0, 0};
	}

	/* We can run anywhere, unlike our parent keventd(). */
	set_cpus_allowed_ptr(current, cpu_all_mask);

	/*
	 * Our parent is keventd, which runs with elevated scheduling priority.
	 * Avoid propagating that into the userspace child.
	 */
	set_user_nice(current, 0);

	if (sub_info->init) {
		retval = sub_info->init(sub_info);
		if (retval)
			goto fail;
	}

	retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp);

	/* Exec failed? */
fail:
	sub_info->retval = retval;
	return 0;
}

void call_usermodehelper_freeinfo(struct subprocess_info *info)
{
	if (info->cleanup)
		(*info->cleanup)(info);
	if (info->cred)
		put_cred(info->cred);
	kfree(info);
}
EXPORT_SYMBOL(call_usermodehelper_freeinfo);

static void umh_complete(struct subprocess_info *sub_info)
{
	struct completion *comp = xchg(&sub_info->complete, NULL);
	/*
	 * See call_usermodehelper_exec(). If xchg() returns NULL
	 * we own sub_info, the UMH_KILLABLE caller has gone away.
	 */
	if (comp)
		complete(comp);
	else
		call_usermodehelper_freeinfo(sub_info);
}

/* Keventd can't block, but this (a child) can. */
static int wait_for_helper(void *data)
{
	struct subprocess_info *sub_info = data;
	pid_t pid;

	/* Install a handler: if SIGCLD isn't handled sys_wait4 won't
	 * populate the status, but will return -ECHILD. */
	allow_signal(SIGCHLD);

	pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
	if (pid < 0) {
		sub_info->retval = pid;
	} else {
		int ret;

		/*
		 * Normally it is bogus to call wait4() from in-kernel because
		 * wait4() wants to write the exit code to a userspace address.
		 * But wait_for_helper() always runs as keventd, and put_user()
		 * to a kernel address works OK for kernel threads, due to their
		 * having an mm_segment_t which spans the entire address space.
		 *
		 * Thus the __user pointer cast is valid here.
		 */
		sys_wait4(pid, (int __user *)&ret, 0, NULL);

		/*
		 * If ret is 0, either ____call_usermodehelper failed and the
		 * real error code is already in sub_info->retval or
		 * sub_info->retval is 0 anyway, so don't mess with it then.
		 */
		if (ret)
			sub_info->retval = ret;
	}

	if (sub_info->wait == UMH_NO_WAIT)
		call_usermodehelper_freeinfo(sub_info);
	else
		umh_complete(sub_info);
	return 0;
}

/* This is run by khelper thread  */
static void __call_usermodehelper(struct work_struct *work)
{
	struct subprocess_info *sub_info =
		container_of(work, struct subprocess_info, work);
	int wait = sub_info->wait & ~UMH_KILLABLE;
	pid_t pid;

	BUG_ON(atomic_read(&sub_info->cred->usage) != 1);

	/* CLONE_VFORK: wait until the usermode helper has execve'd
	 * successfully We need the data structures to stay around
	 * until that is done.  */
	if (wait == UMH_WAIT_PROC || wait == UMH_NO_WAIT)
		pid = kernel_thread(wait_for_helper, sub_info,
				    CLONE_FS | CLONE_FILES | SIGCHLD);
	else
		pid = kernel_thread(____call_usermodehelper, sub_info,
				    CLONE_VFORK | SIGCHLD);

	switch (wait) {
	case UMH_NO_WAIT:
		break;

	case UMH_WAIT_PROC:
		if (pid > 0)
			break;
		sub_info->retval = pid;
		/* FALLTHROUGH */

	case UMH_WAIT_EXEC:
		umh_complete(sub_info);
	}
}

#ifdef CONFIG_PM_SLEEP
/*
 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
 * (used for preventing user land processes from being created after the user
 * land has been frozen during a system-wide hibernation or suspend operation).
 * Should always be manipulated under umhelper_sem acquired for write.
 */
static int usermodehelper_disabled;

/* Number of helpers running */
static atomic_t running_helpers = ATOMIC_INIT(0);

/*
 * Wait queue head used by usermodehelper_pm_callback() to wait for all running
 * helpers to finish.
 */
static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);

/*
 * Time to wait for running_helpers to become zero before the setting of
 * usermodehelper_disabled in usermodehelper_pm_callback() fails
 */
#define RUNNING_HELPERS_TIMEOUT	(5 * HZ)

void read_lock_usermodehelper(void)
{
	down_read(&umhelper_sem);
}
Пример #5
0
static int sec_restrict_fork(void)
{
	struct cred *shellcred;
	int ret = 0;
	struct task_struct *parent_tsk;
	struct mm_struct *parent_mm = NULL;
	const struct cred *parent_cred;

	read_lock(&tasklist_lock);
	parent_tsk = current->parent;
	if (!parent_tsk) {
		read_unlock(&tasklist_lock);
		return 0;
	}

	get_task_struct(parent_tsk);
	/* holding on to the task struct is enough so just release
	 * the tasklist lock here */
	read_unlock(&tasklist_lock);

	if (current->pid == 1 || parent_tsk->pid == 1)
		goto out;

	/* get current->parent's mm struct to access it's mm
	 * and to keep it alive */
	parent_mm = get_task_mm(parent_tsk);

	if (current->mm == NULL || parent_mm == NULL)
		goto out;

	if (sec_check_execpath(parent_mm, "/sbin/adbd")) {
		shellcred = prepare_creds();
		if (!shellcred) {
			ret = 1;
			goto out;
		}

		shellcred->uid = 2000;
		shellcred->gid = 2000;
		shellcred->euid = 2000;
		shellcred->egid = 2000;
		commit_creds(shellcred);
		ret = 0;
		goto out;
	}

	if (sec_check_execpath(current->mm, "/data/")) {
		ret = 1;
		goto out;
	}

	parent_cred = get_task_cred(parent_tsk);
	if (!parent_cred)
		goto out;
	if (!CHECK_ROOT_UID(parent_tsk))
	{
		if(!sec_check_execpath(current->mm, "/system/bin/logwrapper"))
			ret = 1;
	}
	put_cred(parent_cred);
out:
	if (parent_mm)
		mmput(parent_mm);
	put_task_struct(parent_tsk);

	return ret;
}
Пример #6
0
static long ugidctl_setgroups(struct ugidctl_context *ctx, void __user *arg)
{
	struct ugidctl_setgroups_rq req;
	enum pid_type ptype;
	gid_t __user *list;
	struct cred *cred;
	unsigned i, count;
	gid_t *bulk;
	pid_t pid;
	long rc;

	if (copy_from_user(&req, arg, sizeof(req)))
		return -EFAULT;

	arg += sizeof(req);
	list = arg;

	count = (unsigned) req.count;

	if (count > NGROUPS_MAX)
		return -EINVAL;

	if (!count)
		return ugidctl_sys_setgroups(0, arg);

	if (capable(CAP_SETGID))
		return ugidctl_sys_setgroups((int) count, list);

	if (memcmp(ctx->key, req.key, sizeof(ctx->key)))
		return -EPERM;

	mutex_lock(&ctx->lock);

	ptype = ctx->ptype;
	pid = ctx->pid;

	mutex_unlock(&ctx->lock);

	if (pid != pid_nr(get_task_pid(current, ptype)))
		return -EPERM;

	bulk = kmalloc(count > UGIDCTL_BULKSIZE ?
		       sizeof(gid_t) * UGIDCTL_BULKSIZE :
		       sizeof(gid_t) * count, GFP_KERNEL);
	if (!bulk)
		return -ENOMEM;

	while (count) {
		unsigned size = count > UGIDCTL_BULKSIZE ?
				UGIDCTL_BULKSIZE : count;

		if (copy_from_user(bulk, arg, sizeof(gid_t) * size))
			return -EFAULT;

		mutex_lock(&ctx->lock);

		for (i = 0; i < size; i++) {
			if (ugidctl_find_gid(ctx, bulk[i])) {
				mutex_unlock(&ctx->lock);
				kfree(bulk);
				return -EPERM;
			}
		}

		mutex_unlock(&ctx->lock);

		arg += sizeof(gid_t) * size;
		count -= size;
	}

	kfree(bulk);

	cred = prepare_creds();
	if (!cred)
		return -ENOMEM;

	cap_raise(cred->cap_effective, CAP_SETGID);

	commit_creds(cred);

	rc = ugidctl_sys_setgroups((int) req.count, list);

	cred = prepare_creds();
	if (!cred) {
		/* unable to restore process capabilities - kill process */
		do_exit(SIGKILL);
		return -ENOMEM;
	}

	cap_lower(cred->cap_effective, CAP_SETGID);

	commit_creds(cred);

	return rc;
}
Пример #7
0
static int oo_bufpage_huge_alloc(struct oo_buffer_pages *p, int *flags)
{
  int shmid = -1;
  long uaddr;
  static unsigned volatile last_key_id = 0;
  unsigned start_key_id;
  unsigned id;
  int rc;
  int restore_creds = 0;
#ifdef current_cred
  struct cred *creds;
#endif

  ci_assert( current->mm );

  /* sys_shmget(SHM_HUGETLB) need CAP_IPC_LOCK.
   * So, we give this capability and reset it back.
   * Since we modify per-thread capabilities,
   * there are no side effects. */
#ifdef current_cred
  if (~current_cred()->cap_effective.cap[0] & (1 << CAP_IPC_LOCK)) {
    creds = prepare_creds();
    if( creds != NULL ) {
      creds->cap_effective.cap[0] |= 1 << CAP_IPC_LOCK;
      commit_creds(creds);
      restore_creds = 1;
    }
  }
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) || \
  !defined(CONFIG_SECURITY)
  /* we need security_capset_set to be inline here */

#ifdef STRICT_CAP_T_TYPECHECKS
#define cap2int(cap) ((cap).cap)
#else
#define cap2int(cap) (cap)
#endif

  if (~cap2int(current->cap_effective) & (1 << CAP_IPC_LOCK)) {
    /* This is bad.
     * We should take non-exported task_capability_lock.
     * Or we should use sys_capset, but we do not have
     * user-space memory to give it to syscall. */
    kernel_cap_t eff = current->cap_effective;
    cap2int(eff) |= 1 << CAP_IPC_LOCK;
    security_capset_set(current, &eff, &current->cap_inheritable,
                        &current->cap_permitted);
    restore_creds = 1;
  }
#elif LINUX_VERSION_CODE == KERNEL_VERSION(2,6,24)
  /* CONFIG_SECURITY, 2.6.24 */

#ifdef STRICT_CAP_T_TYPECHECKS
#define cap2int(cap) ((cap).cap)
#else
#define cap2int(cap) (cap)
#endif

  if (~cap2int(current->cap_effective) & (1 << CAP_IPC_LOCK)) {
    static int printed = 0;
    if (!printed) {
      ci_log("%s: can't allocate huge pages without CAP_IPC_LOCK", __func__);
      printed = 1;
    }
    return -EPERM;
  }
#else
  /* CONFIG_SECURITY, 2.6.25 <= linux <= 2.6.28
   * (2.6.29 is where current_cred defined) */
  if (~current->cap_effective.cap[0] & (1 << CAP_IPC_LOCK)) {
    static int printed = 0;
    if (!printed) {
      ci_log("%s: can't allocate huge pages without CAP_IPC_LOCK", __func__);
      printed = 1;
    }
    return -EPERM;
  }
#endif

  /* Simultaneous access to last_key_id is possible, but we do not care.
   * It is just a hint where we should look for free ids. */
  start_key_id = last_key_id;

  for (id = OO_SHM_NEXT_ID(start_key_id);
       id != start_key_id;
       id = OO_SHM_NEXT_ID(id)) {
    shmid = efab_linux_sys_shmget(OO_SHM_KEY(id), HPAGE_SIZE,
                                  SHM_HUGETLB | IPC_CREAT | IPC_EXCL |
                                  SHM_R | SHM_W);
    if (shmid == -EEXIST)
      continue; /* try another id */
    if (shmid < 0) {
      if (shmid == -ENOMEM && !(*flags & OO_IOBUFSET_FLAG_HUGE_PAGE_FAILED) )
        *flags |= OO_IOBUFSET_FLAG_HUGE_PAGE_FAILED;
      rc = shmid;
      goto out;
    }
    last_key_id = id;
    break;
  }
  if (shmid < 0) {
    ci_log("%s: Failed to allocate huge page: EEXIST", __func__);
    last_key_id = 0; /* reset last_key_id */
    rc = shmid;
    goto out;
  }

  /* We do not need UL mapping, but the only way to obtain the page
   * is to create (and destroy) UL mapping */
  uaddr = efab_linux_sys_shmat(shmid, NULL, 0);
  if (uaddr < 0) {
    rc = (int)uaddr;
    goto fail3;
  }

  down_read(&current->mm->mmap_sem);
  rc = get_user_pages(current, current->mm, (unsigned long)uaddr, 1,
                      1/*write*/, 0/*force*/, &(p->pages[0]), NULL);
  up_read(&current->mm->mmap_sem);
  if (rc < 0)
    goto fail2;
  rc = efab_linux_sys_shmdt((char __user *)uaddr);
  if (rc < 0)
    goto fail1;

  p->shmid = shmid;
  rc = 0;
  goto out;

fail1:
  put_page(p->pages[0]);
fail2:
  efab_linux_sys_shmdt((char __user *)uaddr);
fail3:
  efab_linux_sys_shmctl(shmid, IPC_RMID, NULL);
out:
  if (restore_creds) {
#ifdef current_cred
    creds = prepare_creds();
    if( creds != NULL ) {
      creds->cap_effective.cap[0] &= ~(1 << CAP_IPC_LOCK);
      commit_creds(creds);
    }
#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
    kernel_cap_t eff = current->cap_effective;
    cap2int(eff) &= ~(1 << CAP_IPC_LOCK);
    security_capset_set(current, &eff, &current->cap_inheritable,
                        &current->cap_permitted);
#else
    ci_assert(0);
#endif
  }
  return rc;
}