示例#1
0
int cap_capget (struct task_struct *target, kernel_cap_t *effective,
		kernel_cap_t *inheritable, kernel_cap_t *permitted)
{
	/* Derived from kernel/capability.c:sys_capget. */
	*effective = cap_t (target->cap_effective);
	*inheritable = cap_t (target->cap_inheritable);
	*permitted = cap_t (target->cap_permitted);
	return 0;
}
示例#2
0
static int badness(struct task_struct *p)
{
	int points, cpu_time, run_time;

	if (!p->mm)
		return 0;

	if (p->flags & PF_MEMDIE)
		return 0;

	/*
	 * The memory size of the process is the basis for the badness.
	 */
	points = p->mm->total_vm;

	/*
	 * CPU time is in seconds and run time is in minutes. There is no
	 * particular reason for this other than that it turned out to work
	 * very well in practice. This is not safe against jiffie wraps
	 * but we don't care _that_ much...
	 */
	cpu_time = (p->times.tms_utime + p->times.tms_stime) >> (SHIFT_HZ + 3);
	run_time = (jiffies - p->start_time) >> (SHIFT_HZ + 10);

	points /= int_sqrt(cpu_time);
	points /= int_sqrt(int_sqrt(run_time));

	/*
	 * Niced processes are most likely less important, so double
	 * their badness points.
	 */
	if (p->nice > 0)
		points *= 2;

	/*
	 * Superuser processes are usually more important, so we make it
	 * less likely that we kill those.
	 */
	if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_ADMIN) ||
				p->uid == 0 || p->euid == 0)
		points /= 4;

	/*
	 * We don't want to kill a process with direct hardware access.
	 * Not only could that mess up the hardware, but usually users
	 * tend to only have this flag set on applications they think
	 * of as important.
	 */
	if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_RAWIO))
		points /= 4;
#ifdef DEBUG
	printk(KERN_DEBUG "OOMkill: task %d (%s) got %d points\n",
	p->pid, p->comm, points);
#endif
	return points;
}
示例#3
0
asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
{
     int error, pid;
     __u32 version;
     struct task_struct *target;
     struct __user_cap_data_struct data;

     if (get_user(version, &header->version))
	     return -EFAULT;
	     
     error = -EINVAL; 
     if (version != _LINUX_CAPABILITY_VERSION) {
             version = _LINUX_CAPABILITY_VERSION;
	     if (put_user(version, &header->version))
		     error = -EFAULT; 
             return error;
     }

     if (get_user(pid, &header->pid))
	     return -EFAULT; 

     if (pid < 0) 
             return -EINVAL;

     error = 0;

     spin_lock(&task_capability_lock);

     if (pid && pid != current->pid) {
	     read_lock(&tasklist_lock); 
             target = find_task_by_pid(pid);  /* identify target of query */
             if (!target) 
                     error = -ESRCH;
     } else {
             target = current;
     }

     if (!error) { 
	     data.permitted = cap_t(target->cap_permitted);
	     data.inheritable = cap_t(target->cap_inheritable); 
	     data.effective = cap_t(target->cap_effective);
     }

     if (target != current)
	     read_unlock(&tasklist_lock); 
     spin_unlock(&task_capability_lock);

     if (!error) {
	     if (copy_to_user(dataptr, &data, sizeof data))
		     return -EFAULT; 
     }

     return error;
}
示例#4
0
void
nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
{
	struct svc_cred	*cred = &rqstp->rq_cred;
	int		i;

	if (rqstp->rq_userset)
		return;

	if (exp->ex_flags & NFSEXP_ALLSQUASH) {
		cred->cr_uid = exp->ex_anon_uid;
		cred->cr_gid = exp->ex_anon_gid;
		cred->cr_groups[0] = NOGROUP;
	} else if (exp->ex_flags & NFSEXP_ROOTSQUASH) {
		if (!cred->cr_uid)
			cred->cr_uid = exp->ex_anon_uid;
		if (!cred->cr_gid)
			cred->cr_gid = exp->ex_anon_gid;
		for (i = 0; i < NGROUPS; i++)
			if (!cred->cr_groups[i])
				cred->cr_groups[i] = exp->ex_anon_gid;
	}

	if (cred->cr_uid != (uid_t) -1)
		current->fsuid = cred->cr_uid;
	else
		current->fsuid = exp->ex_anon_uid;
	if (cred->cr_gid != (gid_t) -1)
		current->fsgid = cred->cr_gid;
	else
		current->fsgid = exp->ex_anon_gid;
	for (i = 0; i < NGROUPS; i++) {
		gid_t group = cred->cr_groups[i];
		if (group == (gid_t) NOGROUP)
			break;
		current->groups[i] = group;
	}
	current->ngroups = i;

	if ((cred->cr_uid)) {
		cap_t(current->cap_effective) &= ~CAP_FS_MASK;
	} else {
		cap_t(current->cap_effective) |= (CAP_FS_MASK &
		                                  current->cap_permitted);
	}

	rqstp->rq_userset = 1;
}
示例#5
0
int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid,
			  int flags)
{
	switch (flags) {
	case LSM_SETID_RE:
	case LSM_SETID_ID:
	case LSM_SETID_RES:
		/* Copied from kernel/sys.c:setreuid/setuid/setresuid. */
		if (!issecure (SECURE_NO_SETUID_FIXUP)) {
			cap_emulate_setxuid (old_ruid, old_euid, old_suid);
		}
		break;
	case LSM_SETID_FS:
		{
			uid_t old_fsuid = old_ruid;

			/* Copied from kernel/sys.c:setfsuid. */

			/*
			 * FIXME - is fsuser used for all CAP_FS_MASK capabilities?
			 *          if not, we might be a bit too harsh here.
			 */

			if (!issecure (SECURE_NO_SETUID_FIXUP)) {
				if (old_fsuid == 0 && current->fsuid != 0) {
					cap_t (current->cap_effective) &=
					    ~CAP_FS_MASK;
				}
				if (old_fsuid != 0 && current->fsuid == 0) {
					cap_t (current->cap_effective) |=
					    (cap_t (current->cap_permitted) &
					     CAP_FS_MASK);
				}
			}
			break;
		}
	default:
		return -EINVAL;
	}

	return 0;
}
示例#6
0
/**
 * We must be careful though to never send SIGKILL a process with
 * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that
 * we select a process with CAP_SYS_RAW_IO set).
 */
void oom_kill_task(struct task_struct *p)
{
	printk(KERN_ERR "Out of Memory: Killed process %d (%s).\n", p->pid, p->comm);

	/*
	 * We give our sacrificial lamb high priority and access to
	 * all the memory it needs. That way it should be able to
	 * exit() and clear out its resources quickly...
	 */
	p->counter = 5 * HZ;

	/* This process has hardware access, be more careful. */
	if (cap_t(p->cap_effective) & CAP_TO_MASK(CAP_SYS_RAWIO)) {
		force_sig(SIGTERM, p);
	} else {
		force_sig(SIGKILL, p);
	}
}