Exemplo n.º 1
0
int
ktrace_set_owning_pid(int pid)
{
	lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED);

	/* allow user space to successfully unset owning pid */
	if (pid == -1) {
		ktrace_set_invalid_owning_pid();
		return 0;
	}

	/* use ktrace_reset or ktrace_release_ownership, not this */
	if (pid == 0) {
		ktrace_set_invalid_owning_pid();
		return EINVAL;
	}

	proc_t p = proc_find(pid);
	if (!p) {
		ktrace_set_invalid_owning_pid();
		return ESRCH;
	}

	ktrace_keep_ownership_on_reset = TRUE;
	ktrace_set_owning_proc(p);

	proc_rele(p);
	return 0;
}
Exemplo n.º 2
0
/* Validate whether the current process has priviledges to access
 * kperf (and by extension, trace). Returns 0 if access is granted.
 */
int
kperf_access_check(void)
{
	proc_t p = current_proc();
	proc_t blessed_p;
	int ret = 0;
	boolean_t pid_gone = FALSE;

	/* check if the pid that held the lock is gone */
	blessed_p = proc_find(blessed_pid);

	if ( blessed_p != NULL )
		proc_rele(blessed_p);
	else
		pid_gone = TRUE;

	if ( blessed_pid == -1 || pid_gone ) {
		/* check for root */
		ret = suser(kauth_cred_get(), &p->p_acflag);
		if( !ret )
			return ret;
	}

	/* check against blessed pid */
	if( p->p_pid != blessed_pid )
		return EACCES;

	/* access granted. */
	return 0;
}
Exemplo n.º 3
0
/* If an owning process has exited, reset the ownership. */
static void
ktrace_ownership_maintenance(void)
{
	lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED);

	/* do nothing if ktrace is not owned */
	if (ktrace_owning_unique_id == 0) {
		return;
	}

	/* reset ownership if process cannot be found */

	proc_t owning_proc = proc_find(ktrace_owning_pid);

	if (owning_proc != NULL) {
		/* make sure the pid was not recycled */
		if (proc_uniqueid(owning_proc) != ktrace_owning_unique_id) {
			ktrace_release_ownership();
		}

		proc_rele(owning_proc);
	} else {
		ktrace_release_ownership();
	}
}
Exemplo n.º 4
0
/* Not called from probe context */
void
sprunlock(proc_t *p)
{
	if (p != PROC_NULL) {
		lck_mtx_unlock(&p->p_dtrace_sprlock);

		proc_unlock(p);

		task_resume(p->task);

		proc_rele(p);
	}
}
Exemplo n.º 5
0
/* Not called from probe context */
void
sprunlock(proc_t *p)
{
	if (p != PROC_NULL) {
		proc_unlock(p);

		dtrace_sprunlock(p);

		task_resume_internal(p->task);

		proc_rele(p);
	}
}
Exemplo n.º 6
0
int
mac_task_check_expose_task(struct task *task)
{
	int error;

	struct proc *p = mac_task_get_proc(task);
	if (p == NULL)
		return ESRCH;

	struct ucred *cred = kauth_cred_get();
	MAC_CHECK(proc_check_expose_task, cred, p);
	proc_rele(p);
	return (error);
}
Exemplo n.º 7
0
int mac_exc_action_label_update(struct task *task, struct exception_action *action) {
	if (task == kernel_task) {
		// The kernel may set exception ports without any check.
		return 0;
	}

	struct proc *p = mac_task_get_proc(task);
	if (p == NULL)
		return ESRCH;

	MAC_PERFORM(exc_action_label_update, p, action->label);
	proc_rele(p);
	return 0;
}
Exemplo n.º 8
0
//used only if exit extension
bool
SocketCookie::IsValid()
{
	//check socket
	
	//check last pid used
	proc_t p;
	p = proc_find(this->application->pid);
	if (p)
	{
		proc_rele(p);
		return true;
	}
	return false;
}
Exemplo n.º 9
0
/* specify a pid as being able to access kperf/trace, depiste not
 * being root
 */
int
kperf_bless_pid(pid_t newpid)
{
	proc_t p = NULL;
	pid_t current_pid;

	p = current_proc();
	current_pid = p->p_pid;

	/* are we allowed to preempt? */
	if ( (newpid != -1) && (blessed_pid != -1) &&
	     (blessed_pid != current_pid) && !blessed_preempt ) {
		/* check if the pid that held the lock is gone */
		p = proc_find(blessed_pid);

		if ( p != NULL ) {
			proc_rele(p);
			return EACCES;
		}
	}

	/* validate new pid */
	if ( newpid != -1 ) {
		p = proc_find(newpid);

		if ( p == NULL )
			return EINVAL;

		proc_rele(p);
	}

	blessed_pid = newpid;
	blessed_preempt = FALSE;

	return 0;
}
Exemplo n.º 10
0
int
mac_task_check_set_host_special_port(struct task *task, int id, struct ipc_port *port)
{
	int error;

	struct proc *p = mac_task_get_proc(task);
	if (p == NULL)
		return ESRCH;

	kauth_cred_t cred = kauth_cred_proc_ref(p);
	MAC_CHECK(proc_check_set_host_special_port, cred, id, port);
	kauth_cred_unref(&cred);
	proc_rele(p);
	return (error);
}
Exemplo n.º 11
0
int
mac_task_check_set_host_exception_port(struct task *task, unsigned int exception)
{
	int error;

	struct proc *p = mac_task_get_proc(task);
	if (p == NULL)
		return ESRCH;

	kauth_cred_t cred = kauth_cred_proc_ref(p);
	MAC_CHECK(proc_check_set_host_exception_port, cred, exception);
	kauth_cred_unref(&cred);
	proc_rele(p);
	return (error);
}
Exemplo n.º 12
0
static struct proc *
mac_task_get_proc(struct task *task)
{
	if (task == current_task())
		return proc_self();

	/*
	 * Tasks don't really hold a reference on a proc unless the
	 * calling thread belongs to the task in question.
	 */
	int pid = task_pid(task);
	struct proc *p = proc_find(pid);

	if (p != NULL) {
		if (proc_task(p) == task)
			return p;
		proc_rele(p);
	}
	return NULL;
}
Exemplo n.º 13
0
/* 
 * KPI to determine if a pid is currently backgrounded. 
 * Returns ESRCH if pid cannot be found or has started exiting.
 * Returns EINVAL if state is NULL.
 * Sets *state to 1 if pid is backgrounded, and 0 otherwise.
 */
int
proc_pidbackgrounded(pid_t pid, uint32_t* state)
{
	proc_t target_proc = PROC_NULL;

	if (state == NULL)
		return(EINVAL);	

	target_proc = proc_find(pid);

	if (target_proc == PROC_NULL)
		return(ESRCH);

	if ( proc_get_effective_task_policy(target_proc->task, TASK_POLICY_DARWIN_BG) ) {
		*state = 1;
	} else {
		*state = 0;
	}

	proc_rele(target_proc);
	return (0);
}
Exemplo n.º 14
0
/*
 * get the proc_t structure corresponding to a given process name
 */
proc_t
find_proc_by_name(char *name)
{
    // get pointer to kernel process
    proc_t all_proc = proc_find(0);
    // don't forget to drop reference
    proc_rele(all_proc);
    if (all_proc == PROC_NULL)
    {
#if DEBUG
        printf("[ERROR] Couldn't find all_proc!\n");
#endif
        return PROC_NULL;
    }
    
    // we need to lock before searching - proc_list_lock() and proc_list_unlock() aren't exported
    if (_proc_list_lock == NULL) _proc_list_lock = (void*)solve_kernel_symbol(&g_kernel_info, "_proc_list_lock");
    if (_proc_list_unlock == NULL) _proc_list_unlock = (void*)solve_kernel_symbol(&g_kernel_info, "_proc_list_unlock");
    
    _proc_list_lock();
    for (proc_t tmp = all_proc ; tmp != PROC_NULL; tmp = (proc_t)(tmp->p_list.le_prev))
    {
        char processname[MAXCOMLEN+1] = { 0 };
        strlcpy(processname, tmp->p_comm, MAXCOMLEN+1);
        if (strncmp(tmp->p_comm, name, sizeof(tmp->p_comm)) == 0)
        {
            _proc_list_unlock();
#if DEBUG
//            printf("[INFO] Found proc_t of %s\n", name);
#endif
            return tmp;
        }
    }
    _proc_list_unlock();
#if DEBUG
    printf("[ERROR] Couldn't find target proc %s\n", name);
#endif
    return PROC_NULL;
}
Exemplo n.º 15
0
int
mac_task_check_set_host_exception_ports(struct task *task, unsigned int exception_mask)
{
	int error = 0;
	int exception;

	struct proc *p = mac_task_get_proc(task);
	if (p == NULL)
		return ESRCH;

	kauth_cred_t cred = kauth_cred_proc_ref(p);
	for (exception = FIRST_EXCEPTION; exception < EXC_TYPES_COUNT; exception++) {
		if (exception_mask & (1 << exception)) {
			MAC_CHECK(proc_check_set_host_exception_port, cred, exception);
			if (error)
				break;
		}
	}
	kauth_cred_unref(&cred);
	proc_rele(p);
	return (error);
}
Exemplo n.º 16
0
/*
 *	Routine:	task_for_pid
 *	Purpose:
 *		Get the task port for another "process", named by its
 *		process ID on the same host as "target_task".
 *
 *		Only permitted to privileged processes, or processes
 *		with the same user ID.
 *
 * XXX This should be a BSD system call, not a Mach trap!!!
 */
kern_return_t
task_for_pid(
	struct task_for_pid_args *args)
{
	mach_port_name_t	target_tport = args->target_tport;
	int			pid = args->pid;
	user_addr_t		task_addr = args->t;
	struct uthread		*uthread;
	proc_t 			p = PROC_NULL;
	task_t			t1 = TASK_NULL;
	mach_port_name_t	tret = MACH_PORT_NULL;
 	ipc_port_t 		tfpport;
	void * sright;
	int error = 0;

	AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID);
	AUDIT_ARG(pid, pid);
	AUDIT_ARG(mach_port1, target_tport);

#if defined(SECURE_KERNEL)
	if (0 == pid) {
		(void ) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	}
#endif

	t1 = port_name_to_task(target_tport);
	if (t1 == TASK_NULL) {
		(void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	} 


	/*
	 * Delayed binding of thread credential to process credential, if we
	 * are not running with an explicitly set thread credential.
	 */
	uthread = get_bsdthread_info(current_thread());
	kauth_cred_uthread_update(uthread, current_proc());

	p = proc_find(pid);
	AUDIT_ARG(process, p);

	if (!(task_for_pid_posix_check(p))) {
		error = KERN_FAILURE;
		goto tfpout;
	}

	if (p->task != TASK_NULL) {
		/* If we aren't root and target's task access port is set... */
		if (!kauth_cred_issuser(kauth_cred_get()) &&
			p != current_proc() &&
			(task_get_task_access_port(p->task, &tfpport) == 0) &&
			(tfpport != IPC_PORT_NULL)) {

			if (tfpport == IPC_PORT_DEAD) {
				error = KERN_PROTECTION_FAILURE;
				goto tfpout;
			}

			/* Call up to the task access server */
			error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);

			if (error != MACH_MSG_SUCCESS) {
				if (error == MACH_RCV_INTERRUPTED)
					error = KERN_ABORTED;
				else
					error = KERN_FAILURE;
				goto tfpout;
			}
		}
#if CONFIG_MACF
		error = mac_proc_check_get_task(kauth_cred_get(), p);
		if (error) {
			error = KERN_FAILURE;
			goto tfpout;
		}
#endif

		/* Grant task port access */
		task_reference(p->task);
		sright = (void *) convert_task_to_port(p->task);
		tret = ipc_port_copyout_send(
				sright, 
				get_task_ipcspace(current_task()));
	} 
	error = KERN_SUCCESS;

tfpout:
	task_deallocate(t1);
	AUDIT_ARG(mach_port2, tret);
	(void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
	if (p != PROC_NULL)
		proc_rele(p);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Exemplo n.º 17
0
/* system call implementation */
int
process_policy(__unused struct proc *p, struct process_policy_args * uap, __unused int32_t *retval)
{
	int error = 0;
	int scope = uap->scope;
	int policy = uap->policy;
	int action = uap->action;
	int policy_subtype = uap->policy_subtype;
	user_addr_t attrp = uap->attrp;
	pid_t target_pid = uap->target_pid;
	uint64_t target_threadid = uap->target_threadid;
	proc_t target_proc = PROC_NULL;
#if CONFIG_MACF || !CONFIG_EMBEDDED
	proc_t curp = current_proc();
#endif
	kauth_cred_t my_cred;
#if CONFIG_EMBEDDED
	kauth_cred_t target_cred;
#endif

	if ((scope != PROC_POLICY_SCOPE_PROCESS) && (scope != PROC_POLICY_SCOPE_THREAD)) {
		return(EINVAL);
	}

	if (target_pid == 0 || target_pid == proc_selfpid())
		target_proc = proc_self();
	else
		target_proc = proc_find(target_pid);

	if (target_proc == PROC_NULL)
		return(ESRCH);

	my_cred = kauth_cred_get();

#if CONFIG_EMBEDDED
	target_cred = kauth_cred_proc_ref(target_proc);

	if (!kauth_cred_issuser(my_cred) && kauth_cred_getruid(my_cred) &&
	    kauth_cred_getuid(my_cred) != kauth_cred_getuid(target_cred) &&
	    kauth_cred_getruid(my_cred) != kauth_cred_getuid(target_cred))
#else
	/* 
	 * Resoure starvation control can be used by unpriv resource owner but priv at the time of ownership claim. This is
	 * checked in low resource handle routine. So bypass the checks here.
	 */
	if ((policy != PROC_POLICY_RESOURCE_STARVATION) && 
		(policy != PROC_POLICY_APPTYPE) && 
		(!kauth_cred_issuser(my_cred) && curp != p))
#endif
	{
		error = EPERM;
		goto out;
	}

#if CONFIG_MACF
	switch (policy) {
		case PROC_POLICY_BOOST:
		case PROC_POLICY_RESOURCE_USAGE:
#if CONFIG_EMBEDDED
		case PROC_POLICY_APPTYPE:
		case PROC_POLICY_APP_LIFECYCLE:
#endif
			/* These policies do their own appropriate mac checks */
			break;
		default:
			error = mac_proc_check_sched(curp, target_proc);
			if (error) goto out;
			break;
	}
#endif /* CONFIG_MACF */

	switch(policy) {
		case PROC_POLICY_BACKGROUND:
			error = ENOTSUP;
			break;
		case PROC_POLICY_HARDWARE_ACCESS:
			error = ENOTSUP;
			break;
		case PROC_POLICY_RESOURCE_STARVATION:
			error = handle_lowresource(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid);
			break;
		case PROC_POLICY_RESOURCE_USAGE:
			switch(policy_subtype) {
				case PROC_POLICY_RUSAGE_NONE:
				case PROC_POLICY_RUSAGE_WIREDMEM:
				case PROC_POLICY_RUSAGE_VIRTMEM:
				case PROC_POLICY_RUSAGE_DISK:
				case PROC_POLICY_RUSAGE_NETWORK:
				case PROC_POLICY_RUSAGE_POWER:
					error = ENOTSUP;
					goto out;
				default:
					error = EINVAL;
					goto out;
				case PROC_POLICY_RUSAGE_CPU:
					break;
			}

			error = handle_cpuuse(action, attrp, target_proc, target_threadid);
			break;
#if CONFIG_EMBEDDED
		case PROC_POLICY_APP_LIFECYCLE:
			error = handle_applifecycle(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid);
			break;
#endif /* CONFIG_EMBEDDED */
		case PROC_POLICY_APPTYPE:
			error = handle_apptype(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid);
			break;
		case PROC_POLICY_BOOST:
			error = handle_boost(scope, action, policy, policy_subtype, attrp, target_proc, target_threadid);
			break;
		default:
			error = EINVAL;
			break;
	}

out:
	proc_rele(target_proc);
#if CONFIG_EMBEDDED
        kauth_cred_unref(&target_cred);
#endif
	return(error);
}
Exemplo n.º 18
0
int
ptrace(struct proc *p, struct ptrace_args *uap, register_t *retval)
{
	struct proc *t = current_proc();	/* target process */
	task_t		task;
	thread_t	th_act;
	struct uthread 	*ut;
	int tr_sigexc = 0;
	int error = 0;
	int stopped = 0;

	AUDIT_ARG(cmd, uap->req);
	AUDIT_ARG(pid, uap->pid);
	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(value, uap->data);

	if (uap->req == PT_DENY_ATTACH) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			proc_unlock(p);
			exit1(p, W_EXITCODE(ENOTSUP, 0), retval);
			/* drop funnel before we return */
			thread_exception_return();
			/* NOTREACHED */
		}
		SET(p->p_lflag, P_LNOATTACH);
		proc_unlock(p);

		return(0);
	}

	if (uap->req == PT_FORCEQUOTA) {
		if (is_suser()) {
			OSBitOrAtomic(P_FORCEQUOTA, (UInt32 *)&t->p_flag);
			return (0);
		} else
			return (EPERM);
	}

	/*
	 *	Intercept and deal with "please trace me" request.
	 */	 
	if (uap->req == PT_TRACE_ME) {
		proc_lock(p);
		SET(p->p_lflag, P_LTRACED);
		/* Non-attached case, our tracer is our parent. */
		p->p_oppid = p->p_ppid;
		proc_unlock(p);
		return(0);
	}
	if (uap->req == PT_SIGEXC) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			SET(p->p_lflag, P_LSIGEXC);
			proc_unlock(p);
			return(0);
		} else {
			proc_unlock(p);
			return(EINVAL);
		}
	}

	/* 
	 * We do not want ptrace to do anything with kernel or launchd 
	 */
	if (uap->pid < 2) {
		return(EPERM);
	}

	/*
	 *	Locate victim, and make sure it is traceable.
	 */
	if ((t = proc_find(uap->pid)) == NULL)
			return (ESRCH);

	AUDIT_ARG(process, t);

	task = t->task;
	if (uap->req == PT_ATTACHEXC) {
		uap->req = PT_ATTACH;
		tr_sigexc = 1;
	}
	if (uap->req == PT_ATTACH) {
		int		err;
		
		if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, 
									 t, (uintptr_t)&err, 0, 0) == 0 ) {
			/* it's OK to attach */
			proc_lock(t);
			SET(t->p_lflag, P_LTRACED);
			if (tr_sigexc) 
				SET(t->p_lflag, P_LSIGEXC);
	
			t->p_oppid = t->p_ppid;
			proc_unlock(t);
			if (t->p_pptr != p)
				proc_reparentlocked(t, p, 1, 0);
	
			proc_lock(t);
			if (get_task_userstop(task) > 0 ) {
				stopped = 1;
			}
			t->p_xstat = 0;
			proc_unlock(t);
			psignal(t, SIGSTOP);
			/*
			 * If the process was stopped, wake up and run through
			 * issignal() again to properly connect to the tracing
			 * process.
			 */
			if (stopped)
				task_resume(task);       
			error = 0;
			goto out;
		}
		else {
			/* not allowed to attach, proper error code returned by kauth_authorize_process */
			if (ISSET(t->p_lflag, P_LNOATTACH)) {
				psignal(p, SIGSEGV);
			}
			
			error = err;
			goto out;
		}
	}

	/*
	 * You can't do what you want to the process if:
	 *	(1) It's not being traced at all,
	 */
	proc_lock(t);
	if (!ISSET(t->p_lflag, P_LTRACED)) {
		proc_unlock(t);
		error = EPERM;
		goto out;
	}

	/*
	 *	(2) it's not being traced by _you_, or
	 */
	if (t->p_pptr != p) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	(3) it's not currently stopped.
	 */
	if (t->p_stat != SSTOP) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	Mach version of ptrace executes request directly here,
	 *	thus simplifying the interaction of ptrace and signals.
	 */
	/* proc lock is held here */
	switch (uap->req) {

	case PT_DETACH:
		if (t->p_oppid != t->p_ppid) {
			struct proc *pp;

			proc_unlock(t);
			pp = proc_find(t->p_oppid);
			proc_reparentlocked(t, pp ? pp : initproc, 1, 0);
			if (pp != PROC_NULL)
				proc_rele(pp);
			proc_lock(t);
			
		}

		t->p_oppid = 0;
		CLR(t->p_lflag, P_LTRACED);
		CLR(t->p_lflag, P_LSIGEXC);
		proc_unlock(t);
		goto resume;
		
	case PT_KILL:
		/*
		 *	Tell child process to kill itself after it
		 *	is resumed by adding NSIG to p_cursig. [see issig]
		 */
		proc_unlock(t);
		psignal(t, SIGKILL);
		goto resume;

	case PT_STEP:			/* single step the child */
	case PT_CONTINUE:		/* continue the child */
		proc_unlock(t);
		th_act = (thread_t)get_firstthread(task);
		if (th_act == THREAD_NULL) {
			error = EINVAL;
			goto out;
		}

		if (uap->addr != (user_addr_t)1) {
#if defined(ppc)
#define ALIGNED(addr,size)	(((unsigned)(addr)&((size)-1))==0)
			if (!ALIGNED((int)uap->addr, sizeof(int)))
				return (ERESTART);
#undef 	ALIGNED
#endif
			thread_setentrypoint(th_act, uap->addr);
		}

		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}

		if (uap->data != 0) {
			psignal(t, uap->data);
                }

		if (uap->req == PT_STEP) {
		        /*
			 * set trace bit
			 */
			if (thread_setsinglestep(th_act, 1) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		} else {
		        /*
			 * clear trace bit if on
			 */
			if (thread_setsinglestep(th_act, 0) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		}	
	resume:
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		if (t->sigwait) {
			wakeup((caddr_t)&(t->sigwait));
			proc_unlock(t);
			if ((t->p_lflag & P_LSIGEXC) == 0) {
				task_resume(task);
			}
		} else
			proc_unlock(t);
			
		break;
		
	case PT_THUPDATE:  {
		proc_unlock(t);
		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}
		th_act = port_name_to_thread(CAST_DOWN(mach_port_name_t, uap->addr));
		if (th_act == THREAD_NULL)
			return (ESRCH);
		ut = (uthread_t)get_bsdthread_info(th_act);
		if (uap->data)
			ut->uu_siglist |= sigmask(uap->data);
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		proc_unlock(t);
		thread_deallocate(th_act);
		error = 0;
		}
		break;
	default:
		proc_unlock(t);
		error = EINVAL;
		goto out;
	}

	error = 0;
out:
	proc_rele(t);
	return(error);
}
Exemplo n.º 19
0
static void
proc_shutdown(void)
{
	vfs_context_t ctx = vfs_context_current();
	struct proc *p, *self;
	int delayterm = 0;
	struct sd_filterargs sfargs;
	struct sd_iterargs sdargs;
	int error = 0;
	struct timespec ts;

	/*
	 *	Kill as many procs as we can.  (Except ourself...)
	 */
	self = (struct proc *)current_proc();
	
	/*
	 * Signal the init with SIGTERM so that he does not launch
	 * new processes 
	 */
	p = proc_find(1);
	if (p && p != self) {
		psignal(p, SIGTERM);
	}
	proc_rele(p);

	printf("Killing all processes ");

sigterm_loop:
	/*
	 * send SIGTERM to those procs interested in catching one
	 */
	sfargs.delayterm = delayterm;
	sfargs.shutdownstate = 0;
	sdargs.signo = SIGTERM;
	sdargs.setsdstate = 1;
	sdargs.countproc = 1;
	sdargs.activecount = 0;

	error = 0;
	/* post a SIGTERM to all that catch SIGTERM and not marked for delay */
	proc_rebootscan(sd_callback1, (void *)&sdargs, sd_filt1, (void *)&sfargs);

	if (sdargs.activecount != 0 && proc_shutdown_exitcount!= 0) {
		proc_list_lock();
		if (proc_shutdown_exitcount != 0) {
			/*
	 		* now wait for up to 30 seconds to allow those procs catching SIGTERM
	 		* to digest it
	 		* as soon as these procs have exited, we'll continue on to the next step
	 		*/
			ts.tv_sec = 30;
			ts.tv_nsec = 0;
			error = msleep(&proc_shutdown_exitcount, proc_list_mlock, PWAIT, "shutdownwait", &ts);
			if (error != 0) {
				for (p = allproc.lh_first; p; p = p->p_list.le_next) {
					if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT)
						p->p_listflag &= ~P_LIST_EXITCOUNT;
				}
				for (p = zombproc.lh_first; p; p = p->p_list.le_next) {
					if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT)
						p->p_listflag &= ~P_LIST_EXITCOUNT;
				}
			}
			
		}
		proc_list_unlock();
	}
	if (error == ETIMEDOUT) {
		/*
		 * log the names of the unresponsive tasks
		 */


		proc_list_lock();

		for (p = allproc.lh_first; p; p = p->p_list.le_next) {
			if (p->p_shutdownstate == 1) {
				printf("%s[%d]: didn't act on SIGTERM\n", p->p_comm, p->p_pid);
				sd_log(ctx, "%s[%d]: didn't act on SIGTERM\n", p->p_comm, p->p_pid);
			}
		}

		proc_list_unlock();

		delay_for_interval(1000 * 5, 1000 * 1000);
	}

	/*
	 * send a SIGKILL to all the procs still hanging around
	 */
	sfargs.delayterm = delayterm;
	sfargs.shutdownstate = 2;
	sdargs.signo = SIGKILL;
	sdargs.setsdstate = 2;
	sdargs.countproc = 1;
	sdargs.activecount = 0;

	/* post a SIGKILL to all that catch SIGTERM and not marked for delay */
	proc_rebootscan(sd_callback2, (void *)&sdargs, sd_filt2, (void *)&sfargs);

	if (sdargs.activecount != 0 && proc_shutdown_exitcount!= 0) {
		proc_list_lock();
		if (proc_shutdown_exitcount != 0) {
			/*
	 		* wait for up to 60 seconds to allow these procs to exit normally
	 		*
	 		* History:	The delay interval was changed from 100 to 200
	 		*		for NFS requests in particular.
	 		*/
			ts.tv_sec = 60;
			ts.tv_nsec = 0;
			error = msleep(&proc_shutdown_exitcount, proc_list_mlock, PWAIT, "shutdownwait", &ts);
			if (error != 0) {
				for (p = allproc.lh_first; p; p = p->p_list.le_next) {
					if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT)
						p->p_listflag &= ~P_LIST_EXITCOUNT;
				}
				for (p = zombproc.lh_first; p; p = p->p_list.le_next) {
					if ((p->p_listflag & P_LIST_EXITCOUNT) == P_LIST_EXITCOUNT)
						p->p_listflag &= ~P_LIST_EXITCOUNT;
				}
			}
		}
		proc_list_unlock();
	}

	/*
	 * if we still have procs that haven't exited, then brute force 'em
	 */
	sfargs.delayterm = delayterm;
	sfargs.shutdownstate = 3;
	sdargs.signo = 0;
	sdargs.setsdstate = 3;
	sdargs.countproc = 0;
	sdargs.activecount = 0;

	/* post a SIGTERM to all that catch SIGTERM and not marked for delay */
	proc_rebootscan(sd_callback3, (void *)&sdargs, sd_filt2, (void *)&sfargs);
	printf("\n");

	/* Now start the termination of processes that are marked for delayed termn */
	if (delayterm == 0) {
		delayterm = 1;
		goto  sigterm_loop;
	}

	sd_closelog(ctx);

	/* drop the ref on initproc */
	proc_rele(initproc);
	printf("continuing\n");
}
Exemplo n.º 20
0
kern_return_t
task_name_for_pid(
	struct task_name_for_pid_args *args)
{
	mach_port_name_t	target_tport = args->target_tport;
	int			pid = args->pid;
	user_addr_t		task_addr = args->t;
	proc_t		p = PROC_NULL;
	task_t		t1;
	mach_port_name_t	tret;
	void * sright;
	int error = 0, refheld = 0;
	kauth_cred_t target_cred;

	AUDIT_MACH_SYSCALL_ENTER(AUE_TASKNAMEFORPID);
	AUDIT_ARG(pid, pid);
	AUDIT_ARG(mach_port1, target_tport);

	t1 = port_name_to_task(target_tport);
	if (t1 == TASK_NULL) {
		(void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	} 

	p = proc_find(pid);
	if (p != PROC_NULL) {
		AUDIT_ARG(process, p);
		target_cred = kauth_cred_proc_ref(p);
		refheld = 1;

		if ((p->p_stat != SZOMB)
		    && ((current_proc() == p)
			|| kauth_cred_issuser(kauth_cred_get()) 
			|| ((kauth_cred_getuid(target_cred) == kauth_cred_getuid(kauth_cred_get())) && 
			    ((kauth_cred_getruid(target_cred) == kauth_getruid()))))) {

			if (p->task != TASK_NULL) {
				task_reference(p->task);
#if CONFIG_MACF
				error = mac_proc_check_get_task_name(kauth_cred_get(),  p);
				if (error) {
					task_deallocate(p->task);
					goto noperm;
				}
#endif
				sright = (void *)convert_task_name_to_port(p->task);
				tret = ipc_port_copyout_send(sright, 
						get_task_ipcspace(current_task()));
			} else
				tret  = MACH_PORT_NULL;

			AUDIT_ARG(mach_port2, tret);
			(void) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
			task_deallocate(t1);
			error = KERN_SUCCESS;
			goto tnfpout;
		}
	}

#if CONFIG_MACF
noperm:
#endif
    task_deallocate(t1);
	tret = MACH_PORT_NULL;
	(void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
	error = KERN_FAILURE;
tnfpout:
	if (refheld != 0)
		kauth_cred_unref(&target_cred);
	if (p != PROC_NULL)
		proc_rele(p);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Exemplo n.º 21
0
/*
 *	Routine:	task_for_pid
 *	Purpose:
 *		Get the task port for another "process", named by its
 *		process ID on the same host as "target_task".
 *
 *		Only permitted to privileged processes, or processes
 *		with the same user ID.
 *
 *		Note: if pid == 0, an error is return no matter who is calling.
 *
 * XXX This should be a BSD system call, not a Mach trap!!!
 */
kern_return_t
task_for_pid(
	struct task_for_pid_args *args)
{
	mach_port_name_t	target_tport = args->target_tport;
	int			pid = args->pid;
	user_addr_t		task_addr = args->t;
	proc_t 			p = PROC_NULL;
	task_t			t1 = TASK_NULL;
	mach_port_name_t	tret = MACH_PORT_NULL;
 	ipc_port_t 		tfpport;
	void * sright;
	int error = 0;

	AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID);
	AUDIT_ARG(pid, pid);
	AUDIT_ARG(mach_port1, target_tport);

	/* Always check if pid == 0 */
	if (pid == 0) {
		(void ) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	}

	t1 = port_name_to_task(target_tport);
	if (t1 == TASK_NULL) {
		(void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	} 


	p = proc_find(pid);
	if (p == PROC_NULL) {
		error = KERN_FAILURE;
		goto tfpout;
	}

#if CONFIG_AUDIT
	AUDIT_ARG(process, p);
#endif

	if (!(task_for_pid_posix_check(p))) {
		error = KERN_FAILURE;
		goto tfpout;
	}

	if (p->task != TASK_NULL) {
		/* If we aren't root and target's task access port is set... */
		if (!kauth_cred_issuser(kauth_cred_get()) &&
			p != current_proc() &&
			(task_get_task_access_port(p->task, &tfpport) == 0) &&
			(tfpport != IPC_PORT_NULL)) {

			if (tfpport == IPC_PORT_DEAD) {
				error = KERN_PROTECTION_FAILURE;
				goto tfpout;
			}

			/* Call up to the task access server */
			error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);

			if (error != MACH_MSG_SUCCESS) {
				if (error == MACH_RCV_INTERRUPTED)
					error = KERN_ABORTED;
				else
					error = KERN_FAILURE;
				goto tfpout;
			}
		}
#if CONFIG_MACF
		error = mac_proc_check_get_task(kauth_cred_get(), p);
		if (error) {
			error = KERN_FAILURE;
			goto tfpout;
		}
#endif

		/* Grant task port access */
		task_reference(p->task);
		extmod_statistics_incr_task_for_pid(p->task);

		sright = (void *) convert_task_to_port(p->task);
		tret = ipc_port_copyout_send(
				sright, 
				get_task_ipcspace(current_task()));
	} 
	error = KERN_SUCCESS;

tfpout:
	task_deallocate(t1);
	AUDIT_ARG(mach_port2, tret);
	(void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
	if (p != PROC_NULL)
		proc_rele(p);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Exemplo n.º 22
0
int
ptrace(struct proc *p, struct ptrace_args *uap, int32_t *retval)
{
	struct proc *t = current_proc();	/* target process */
	task_t		task;
	thread_t	th_act;
	struct uthread 	*ut;
	int tr_sigexc = 0;
	int error = 0;
	int stopped = 0;

	AUDIT_ARG(cmd, uap->req);
	AUDIT_ARG(pid, uap->pid);
	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(value32, uap->data);

	if (uap->req == PT_DENY_ATTACH) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			proc_unlock(p);
			KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_FRCEXIT) | DBG_FUNC_NONE,
					      p->p_pid, W_EXITCODE(ENOTSUP, 0), 4, 0, 0);
			exit1(p, W_EXITCODE(ENOTSUP, 0), retval);

			thread_exception_return();
			/* NOTREACHED */
		}
		SET(p->p_lflag, P_LNOATTACH);
		proc_unlock(p);

		return(0);
	}

	if (uap->req == PT_FORCEQUOTA) {
		if (kauth_cred_issuser(kauth_cred_get())) {
			OSBitOrAtomic(P_FORCEQUOTA, &t->p_flag);
			return (0);
		} else
			return (EPERM);
	}

	/*
	 *	Intercept and deal with "please trace me" request.
	 */	 
	if (uap->req == PT_TRACE_ME) {
retry_trace_me:;
		proc_t pproc = proc_parent(p);
		if (pproc == NULL)
			return (EINVAL);
#if CONFIG_MACF
		/*
		 * NB: Cannot call kauth_authorize_process(..., KAUTH_PROCESS_CANTRACE, ...)
		 *     since that assumes the process being checked is the current process
		 *     when, in this case, it is the current process's parent.
		 *     Most of the other checks in cantrace() don't apply either.
		 */
		if ((error = mac_proc_check_debug(pproc, p)) == 0) {
#endif
			proc_lock(p);
			/* Make sure the process wasn't re-parented. */
			if (p->p_ppid != pproc->p_pid) {
				proc_unlock(p);
				proc_rele(pproc);
				goto retry_trace_me;
			}
			SET(p->p_lflag, P_LTRACED);
			/* Non-attached case, our tracer is our parent. */
			p->p_oppid = p->p_ppid;
			proc_unlock(p);
			/* Child and parent will have to be able to run modified code. */
			cs_allow_invalid(p);
			cs_allow_invalid(pproc);
#if CONFIG_MACF
		}
#endif
		proc_rele(pproc);
		return (error);
	}
	if (uap->req == PT_SIGEXC) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			SET(p->p_lflag, P_LSIGEXC);
			proc_unlock(p);
			return(0);
		} else {
			proc_unlock(p);
			return(EINVAL);
		}
	}

	/* 
	 * We do not want ptrace to do anything with kernel or launchd 
	 */
	if (uap->pid < 2) {
		return(EPERM);
	}

	/*
	 *	Locate victim, and make sure it is traceable.
	 */
	if ((t = proc_find(uap->pid)) == NULL)
			return (ESRCH);

	AUDIT_ARG(process, t);

	task = t->task;
	if (uap->req == PT_ATTACHEXC) {
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
		uap->req = PT_ATTACH;
		tr_sigexc = 1;
	}
	if (uap->req == PT_ATTACH) {
#pragma clang diagnostic pop
		int		err;


		if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, 
									 t, (uintptr_t)&err, 0, 0) == 0 ) {
			/* it's OK to attach */
			proc_lock(t);
			SET(t->p_lflag, P_LTRACED);
			if (tr_sigexc) 
				SET(t->p_lflag, P_LSIGEXC);
	
			t->p_oppid = t->p_ppid;
			/* Check whether child and parent are allowed to run modified
			 * code (they'll have to) */
			proc_unlock(t);
			cs_allow_invalid(t);
			cs_allow_invalid(p);
			if (t->p_pptr != p)
				proc_reparentlocked(t, p, 1, 0);
	
			proc_lock(t);
			if (get_task_userstop(task) > 0 ) {
				stopped = 1;
			}
			t->p_xstat = 0;
			proc_unlock(t);
			psignal(t, SIGSTOP);
			/*
			 * If the process was stopped, wake up and run through
			 * issignal() again to properly connect to the tracing
			 * process.
			 */
			if (stopped)
				task_resume(task);       
			error = 0;
			goto out;
		}
		else {
			/* not allowed to attach, proper error code returned by kauth_authorize_process */
			if (ISSET(t->p_lflag, P_LNOATTACH)) {
				psignal(p, SIGSEGV);
			}
			
			error = err;
			goto out;
		}
	}

	/*
	 * You can't do what you want to the process if:
	 *	(1) It's not being traced at all,
	 */
	proc_lock(t);
	if (!ISSET(t->p_lflag, P_LTRACED)) {
		proc_unlock(t);
		error = EPERM;
		goto out;
	}

	/*
	 *	(2) it's not being traced by _you_, or
	 */
	if (t->p_pptr != p) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	(3) it's not currently stopped.
	 */
	if (t->p_stat != SSTOP) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	Mach version of ptrace executes request directly here,
	 *	thus simplifying the interaction of ptrace and signals.
	 */
	/* proc lock is held here */
	switch (uap->req) {

	case PT_DETACH:
		if (t->p_oppid != t->p_ppid) {
			struct proc *pp;

			proc_unlock(t);
			pp = proc_find(t->p_oppid);
			if (pp != PROC_NULL) {
				proc_reparentlocked(t, pp, 1, 0);
				proc_rele(pp);
			} else {
				/* original parent exited while traced */
				proc_list_lock();
				t->p_listflag |= P_LIST_DEADPARENT;
				proc_list_unlock();
				proc_reparentlocked(t, initproc, 1, 0);
			}
			proc_lock(t);
		}

		t->p_oppid = 0;
		CLR(t->p_lflag, P_LTRACED);
		CLR(t->p_lflag, P_LSIGEXC);
		proc_unlock(t);
		goto resume;
		
	case PT_KILL:
		/*
		 *	Tell child process to kill itself after it
		 *	is resumed by adding NSIG to p_cursig. [see issig]
		 */
		proc_unlock(t);
#if CONFIG_MACF
		error = mac_proc_check_signal(p, t, SIGKILL);
		if (0 != error)
			goto resume;
#endif
		psignal(t, SIGKILL);
		goto resume;

	case PT_STEP:			/* single step the child */
	case PT_CONTINUE:		/* continue the child */
		proc_unlock(t);
		th_act = (thread_t)get_firstthread(task);
		if (th_act == THREAD_NULL) {
			error = EINVAL;
			goto out;
		}

		/* force use of Mach SPIs (and task_for_pid security checks) to adjust PC */
		if (uap->addr != (user_addr_t)1) {
			error = ENOTSUP;
			goto out;
		}

		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}

		if (uap->data != 0) {
#if CONFIG_MACF
			error = mac_proc_check_signal(p, t, uap->data);
			if (0 != error)
				goto out;
#endif
			psignal(t, uap->data);
		}

		if (uap->req == PT_STEP) {
		        /*
			 * set trace bit 
			 * we use sending SIGSTOP as a comparable security check.
			 */
#if CONFIG_MACF
			error = mac_proc_check_signal(p, t, SIGSTOP);
			if (0 != error) {
				goto out;
			}
#endif
			if (thread_setsinglestep(th_act, 1) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		} else {
		        /*
			 * clear trace bit if on
			 * we use sending SIGCONT as a comparable security check.
			 */
#if CONFIG_MACF
			error = mac_proc_check_signal(p, t, SIGCONT);
			if (0 != error) {
				goto out;
			}
#endif
			if (thread_setsinglestep(th_act, 0) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		}	
	resume:
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		if (t->sigwait) {
			wakeup((caddr_t)&(t->sigwait));
			proc_unlock(t);
			if ((t->p_lflag & P_LSIGEXC) == 0) {
				task_resume(task);
			}
		} else
			proc_unlock(t);
			
		break;
		
	case PT_THUPDATE:  {
		proc_unlock(t);
		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}
		th_act = port_name_to_thread(CAST_MACH_PORT_TO_NAME(uap->addr));
		if (th_act == THREAD_NULL) {
			error = ESRCH;
			goto out;
		}
		ut = (uthread_t)get_bsdthread_info(th_act);
		if (uap->data)
			ut->uu_siglist |= sigmask(uap->data);
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		proc_unlock(t);
		thread_deallocate(th_act);
		error = 0;
		}
		break;
	default:
		proc_unlock(t);
		error = EINVAL;
		goto out;
	}

	error = 0;
out:
	proc_rele(t);
	return(error);
}
Exemplo n.º 23
0
kern_return_t
catch_mach_exception_raise(
        __unused mach_port_t exception_port,
        mach_port_t thread,
        mach_port_t task,
        exception_type_t exception,
        mach_exception_data_t code,
        __unused mach_msg_type_number_t codeCnt
)
{
	task_t			self = current_task();
	thread_t		th_act;
	ipc_port_t 		thread_port;
	struct proc		*p;
	kern_return_t		result = MACH_MSG_SUCCESS;
	int			ux_signal = 0;
	mach_exception_code_t 	ucode = 0;
	struct uthread 		*ut;
	mach_port_name_t thread_name = CAST_MACH_PORT_TO_NAME(thread);
	mach_port_name_t task_name = CAST_MACH_PORT_TO_NAME(task);

	/*
	 *	Convert local thread name to global port.
	 */
   if (MACH_PORT_VALID(thread_name) &&
       (ipc_object_copyin(get_task_ipcspace(self), thread_name,
		       MACH_MSG_TYPE_PORT_SEND,
		       (void *) &thread_port) == MACH_MSG_SUCCESS)) {
        if (IPC_PORT_VALID(thread_port)) {
	   th_act = convert_port_to_thread(thread_port);
	   ipc_port_release(thread_port);
	} else {
	   th_act = THREAD_NULL;
	}

	/*
	 *	Catch bogus ports
	 */
	if (th_act != THREAD_NULL) {

	    /*
	     *	Convert exception to unix signal and code.
	     */
	    ux_exception(exception, code[0], code[1], &ux_signal, &ucode);

	    ut = get_bsdthread_info(th_act);
	    p = proc_findthread(th_act);

	    /* Can't deliver a signal without a bsd process reference */
	    if (p == NULL) {
		    ux_signal = 0;
		    result = KERN_FAILURE;
	    }

	    /*
	     * Stack overflow should result in a SIGSEGV signal
	     * on the alternate stack.
	     * but we have one or more guard pages after the
	     * stack top, so we would get a KERN_PROTECTION_FAILURE
	     * exception instead of KERN_INVALID_ADDRESS, resulting in
	     * a SIGBUS signal.
	     * Detect that situation and select the correct signal.
	     */
	    if (code[0] == KERN_PROTECTION_FAILURE &&
		ux_signal == SIGBUS) {
		    user_addr_t		sp, stack_min, stack_max;
		    int			mask;
		    struct sigacts	*ps;

		    sp = code[1];

		    stack_max = p->user_stack;
		    stack_min = p->user_stack - MAXSSIZ;
		    if (sp >= stack_min &&
			sp < stack_max) {
			    /*
			     * This is indeed a stack overflow.  Deliver a
			     * SIGSEGV signal.
			     */
			    ux_signal = SIGSEGV;

			    /*
			     * If the thread/process is not ready to handle
			     * SIGSEGV on an alternate stack, force-deliver
			     * SIGSEGV with a SIG_DFL handler.
			     */
			    mask = sigmask(ux_signal);
			    ps = p->p_sigacts;
			    if ((p->p_sigignore & mask) ||
				(ut->uu_sigwait & mask) ||
				(ut->uu_sigmask & mask) ||
				(ps->ps_sigact[SIGSEGV] == SIG_IGN) ||
				(! (ps->ps_sigonstack & mask))) {
				    p->p_sigignore &= ~mask;
				    p->p_sigcatch &= ~mask;
				    ps->ps_sigact[SIGSEGV] = SIG_DFL;
				    ut->uu_sigwait &= ~mask;
				    ut->uu_sigmask &= ~mask;
			    }
		    }
	    }
	    /*
	     *	Send signal.
	     */
	    if (ux_signal != 0) {
			ut->uu_exception = exception;
			//ut->uu_code = code[0]; // filled in by threadsignal
			ut->uu_subcode = code[1];			
			threadsignal(th_act, ux_signal, code[0]);
	    }
	    if (p != NULL) 
		    proc_rele(p);
	    thread_deallocate(th_act);
	}
	else
	    result = KERN_INVALID_ARGUMENT;
    }
    else
    	result = KERN_INVALID_ARGUMENT;

    /*
     *	Delete our send rights to the task port.
     */
    (void)mach_port_deallocate(get_task_ipcspace(ux_handler_self), task_name);

    return (result);
}
Exemplo n.º 24
0
/* ARGSUSED */
int
auditon(proc_t p, struct auditon_args *uap, __unused int32_t *retval)
{
	kauth_cred_t scred;
	int error = 0;
	union auditon_udata udata;
	proc_t tp = PROC_NULL;
	struct auditinfo_addr aia;

	AUDIT_ARG(cmd, uap->cmd);

#if CONFIG_MACF
	error = mac_system_check_auditon(kauth_cred_get(), uap->cmd);
	if (error)
		return (error);
#endif

	if ((uap->length <= 0) || (uap->length >
	    (int)sizeof(union auditon_udata)))
		return (EINVAL);

	memset((void *)&udata, 0, sizeof(udata));

	/*
	 * Some of the GET commands use the arguments too.
	 */
	switch (uap->cmd) {
	case A_SETPOLICY:
	case A_OLDSETPOLICY:
	case A_SETKMASK:
	case A_SETQCTRL:
	case A_OLDSETQCTRL:
	case A_SETSTAT:
	case A_SETUMASK:
	case A_SETSMASK:
	case A_SETCOND:
	case A_OLDSETCOND:
	case A_SETCLASS:
	case A_SETPMASK:
	case A_SETFSIZE:
	case A_SETKAUDIT:
	case A_GETCLASS:
	case A_GETPINFO:
	case A_GETPINFO_ADDR:
	case A_SENDTRIGGER:
	case A_GETSINFO_ADDR:
	case A_GETSFLAGS:
	case A_SETSFLAGS:
		error = copyin(uap->data, (void *)&udata, uap->length);
		if (error)
			return (error);
		AUDIT_ARG(auditon, &udata);
		AUDIT_ARG(len, uap->length);
		break;
	}

	/* Check appropriate privilege. */
	switch (uap->cmd) {
	/*
	 * A_GETSINFO doesn't require priviledge but only superuser  
	 * gets to see the audit masks. 
	 */
	case A_GETSINFO_ADDR:
		if ((sizeof(udata.au_kau_info) != uap->length) ||
	   		(audit_session_lookup(udata.au_kau_info.ai_asid,
					      &udata.au_kau_info) != 0))
			error = EINVAL;
		else if (!kauth_cred_issuser(kauth_cred_get())) {
			udata.au_kau_info.ai_mask.am_success = ~0;
			udata.au_kau_info.ai_mask.am_failure = ~0;
		}
		break;
	case A_GETSFLAGS:
	case A_SETSFLAGS:
		/* Getting one's own audit session flags requires no
		 * privilege.  Setting the flags is subject to access
		 * control implemented in audit_session_setaia().
		 */
		break;
	default:
		error = suser(kauth_cred_get(), &p->p_acflag);
		break;
	}
	if (error)
		return (error);

	/*
	 * XXX Need to implement these commands by accessing the global
	 * values associated with the commands.
	 */
	switch (uap->cmd) {
	case A_OLDGETPOLICY:
	case A_GETPOLICY:
		if (sizeof(udata.au_policy64) == uap->length) {
			mtx_lock(&audit_mtx);
			if (!audit_fail_stop)
				udata.au_policy64 |= AUDIT_CNT;
			if (audit_panic_on_write_fail)
				udata.au_policy64 |= AUDIT_AHLT;
			if (audit_argv)
				udata.au_policy64 |= AUDIT_ARGV;
			if (audit_arge)
				udata.au_policy64 |= AUDIT_ARGE;
			mtx_unlock(&audit_mtx);
			break;
		}
		if (sizeof(udata.au_policy) != uap->length)
			return (EINVAL);
		mtx_lock(&audit_mtx);
		if (!audit_fail_stop)
			udata.au_policy |= AUDIT_CNT;
		if (audit_panic_on_write_fail)
			udata.au_policy |= AUDIT_AHLT;
		if (audit_argv)
			udata.au_policy |= AUDIT_ARGV;
		if (audit_arge)
			udata.au_policy |= AUDIT_ARGE;
		mtx_unlock(&audit_mtx);
		break;

	case A_OLDSETPOLICY:
	case A_SETPOLICY:
		if (sizeof(udata.au_policy64) == uap->length) {
			if (udata.au_policy64 & ~(AUDIT_CNT|AUDIT_AHLT|
				AUDIT_ARGV|AUDIT_ARGE))
				return (EINVAL);
			mtx_lock(&audit_mtx);
			audit_fail_stop = ((udata.au_policy64 & AUDIT_CNT) ==
			    0);
			audit_panic_on_write_fail = (udata.au_policy64 &
			    AUDIT_AHLT);
			audit_argv = (udata.au_policy64 & AUDIT_ARGV);
			audit_arge = (udata.au_policy64 & AUDIT_ARGE);
			mtx_unlock(&audit_mtx);
			break;
		}	
		if ((sizeof(udata.au_policy) != uap->length) ||
		    (udata.au_policy & ~(AUDIT_CNT|AUDIT_AHLT|AUDIT_ARGV|
					 AUDIT_ARGE)))
			return (EINVAL);
		/*
		 * XXX - Need to wake up waiters if the policy relaxes?
		 */
		mtx_lock(&audit_mtx);
		audit_fail_stop = ((udata.au_policy & AUDIT_CNT) == 0);
		audit_panic_on_write_fail = (udata.au_policy & AUDIT_AHLT);
		audit_argv = (udata.au_policy & AUDIT_ARGV);
		audit_arge = (udata.au_policy & AUDIT_ARGE);
		mtx_unlock(&audit_mtx);
		break;

	case A_GETKMASK:
		if (sizeof(udata.au_mask) != uap->length)
			return (EINVAL);
		mtx_lock(&audit_mtx);
		udata.au_mask = audit_nae_mask;
		mtx_unlock(&audit_mtx);
		break;

	case A_SETKMASK:
		if (sizeof(udata.au_mask) != uap->length)
			return (EINVAL);
		mtx_lock(&audit_mtx);
		audit_nae_mask = udata.au_mask;
		AUDIT_CHECK_IF_KEVENTS_MASK(audit_nae_mask);
		mtx_unlock(&audit_mtx);
		break;

	case A_OLDGETQCTRL:
	case A_GETQCTRL:
		if (sizeof(udata.au_qctrl64) == uap->length) {
			mtx_lock(&audit_mtx);
			udata.au_qctrl64.aq64_hiwater =
			    (u_int64_t)audit_qctrl.aq_hiwater;
			udata.au_qctrl64.aq64_lowater =
			    (u_int64_t)audit_qctrl.aq_lowater;
			udata.au_qctrl64.aq64_bufsz =
			    (u_int64_t)audit_qctrl.aq_bufsz;
			udata.au_qctrl64.aq64_delay =
			    (u_int64_t)audit_qctrl.aq_delay;
			udata.au_qctrl64.aq64_minfree = 
			    (int64_t)audit_qctrl.aq_minfree;
			mtx_unlock(&audit_mtx);
			break;
		} 
		if (sizeof(udata.au_qctrl) != uap->length)
			return (EINVAL);
		mtx_lock(&audit_mtx);
		udata.au_qctrl = audit_qctrl;
		mtx_unlock(&audit_mtx);
		break;

	case A_OLDSETQCTRL:
	case A_SETQCTRL:
		if (sizeof(udata.au_qctrl64) == uap->length) {
			 if ((udata.au_qctrl64.aq64_hiwater > AQ_MAXHIGH) ||
			     (udata.au_qctrl64.aq64_lowater >= 
			      udata.au_qctrl64.aq64_hiwater) ||
			     (udata.au_qctrl64.aq64_bufsz > AQ_MAXBUFSZ) ||
			     (udata.au_qctrl64.aq64_minfree < 0) ||
			     (udata.au_qctrl64.aq64_minfree > 100))
				return (EINVAL);
			mtx_lock(&audit_mtx);
			audit_qctrl.aq_hiwater =
			     (int)udata.au_qctrl64.aq64_hiwater;
			audit_qctrl.aq_lowater =
			     (int)udata.au_qctrl64.aq64_lowater;
			audit_qctrl.aq_bufsz =
			     (int)udata.au_qctrl64.aq64_bufsz;
			audit_qctrl.aq_minfree = 
			    (int)udata.au_qctrl64.aq64_minfree;
			audit_qctrl.aq_delay = -1;  /* Not used. */
			mtx_unlock(&audit_mtx);
			break;
		}
		if ((sizeof(udata.au_qctrl) != uap->length) ||
		    (udata.au_qctrl.aq_hiwater > AQ_MAXHIGH) ||
		    (udata.au_qctrl.aq_lowater >= udata.au_qctrl.aq_hiwater) ||
		    (udata.au_qctrl.aq_bufsz > AQ_MAXBUFSZ) ||
		    (udata.au_qctrl.aq_minfree < 0) ||
		    (udata.au_qctrl.aq_minfree > 100))
			return (EINVAL);

		mtx_lock(&audit_mtx);
		audit_qctrl = udata.au_qctrl;
		/* XXX The queue delay value isn't used with the kernel. */
		audit_qctrl.aq_delay = -1;
		mtx_unlock(&audit_mtx);
		break;

	case A_GETCWD:
		return (ENOSYS);

	case A_GETCAR:
		return (ENOSYS);

	case A_GETSTAT:
		return (ENOSYS);

	case A_SETSTAT:
		return (ENOSYS);

	case A_SETUMASK:
		return (ENOSYS);

	case A_SETSMASK:
		return (ENOSYS);

	case A_OLDGETCOND:
	case A_GETCOND:
		if (sizeof(udata.au_cond64) == uap->length) {
			mtx_lock(&audit_mtx);
			if (audit_enabled && !audit_suspended)
				udata.au_cond64 = AUC_AUDITING;
			else
				udata.au_cond64 = AUC_NOAUDIT;
			mtx_unlock(&audit_mtx);
			break;
		}
		if (sizeof(udata.au_cond) != uap->length)
			return (EINVAL);
		mtx_lock(&audit_mtx);
		if (audit_enabled && !audit_suspended)
			udata.au_cond = AUC_AUDITING;
		else
			udata.au_cond = AUC_NOAUDIT;
		mtx_unlock(&audit_mtx);
		break;

	case A_OLDSETCOND:
	case A_SETCOND:
		if (sizeof(udata.au_cond64) == uap->length) {
			mtx_lock(&audit_mtx);
			if (udata.au_cond64 == AUC_NOAUDIT)
				audit_suspended = 1;
			if (udata.au_cond64 == AUC_AUDITING)
				audit_suspended = 0;
			if (udata.au_cond64 == AUC_DISABLED) {
				audit_suspended = 1;
				mtx_unlock(&audit_mtx);
				audit_shutdown();
				break;
			}
			mtx_unlock(&audit_mtx);
			break;
		}
		if (sizeof(udata.au_cond) != uap->length) {
			return (EINVAL);
		}
		mtx_lock(&audit_mtx);
		if (udata.au_cond == AUC_NOAUDIT)
			audit_suspended = 1;
		if (udata.au_cond == AUC_AUDITING)
			audit_suspended = 0;
		if (udata.au_cond == AUC_DISABLED) {
			audit_suspended = 1;
			mtx_unlock(&audit_mtx);
			audit_shutdown();
			break;
		}
		mtx_unlock(&audit_mtx);
		break;

	case A_GETCLASS:
		if (sizeof(udata.au_evclass) != uap->length)
			return (EINVAL);
		udata.au_evclass.ec_class = au_event_class(
		    udata.au_evclass.ec_number);
		break;

	case A_SETCLASS:
		if (sizeof(udata.au_evclass) != uap->length)
			return (EINVAL);
		au_evclassmap_insert(udata.au_evclass.ec_number,
		    udata.au_evclass.ec_class);
		break;

	case A_GETPINFO:
		if ((sizeof(udata.au_aupinfo) != uap->length) ||
		    IS_NOT_VALID_PID(udata.au_aupinfo.ap_pid))
			return (EINVAL);
		if ((tp = proc_find(udata.au_aupinfo.ap_pid)) == NULL)
			return (ESRCH);

		scred = kauth_cred_proc_ref(tp);
		if (scred->cr_audit.as_aia_p->ai_termid.at_type == AU_IPv6) {
			kauth_cred_unref(&scred);
			proc_rele(tp);
			return (EINVAL);
		}
		
		udata.au_aupinfo.ap_auid =
		    scred->cr_audit.as_aia_p->ai_auid;
		udata.au_aupinfo.ap_mask.am_success =
		    scred->cr_audit.as_mask.am_success;
		udata.au_aupinfo.ap_mask.am_failure =
		    scred->cr_audit.as_mask.am_failure;
		udata.au_aupinfo.ap_termid.machine =
		    scred->cr_audit.as_aia_p->ai_termid.at_addr[0];
		udata.au_aupinfo.ap_termid.port =
		    scred->cr_audit.as_aia_p->ai_termid.at_port;
		udata.au_aupinfo.ap_asid =
		    scred->cr_audit.as_aia_p->ai_asid;
		kauth_cred_unref(&scred);
		proc_rele(tp);
		tp = PROC_NULL;
		break;

	case A_SETPMASK:
		if ((sizeof(udata.au_aupinfo) != uap->length) ||
		    IS_NOT_VALID_PID(udata.au_aupinfo.ap_pid))
			return (EINVAL);
		if ((tp = proc_find(udata.au_aupinfo.ap_pid)) == NULL)
			return (ESRCH);
		scred = kauth_cred_proc_ref(tp);
		bcopy(scred->cr_audit.as_aia_p, &aia, sizeof(aia));
		kauth_cred_unref(&scred);
		aia.ai_mask.am_success =
		    udata.au_aupinfo.ap_mask.am_success;
		aia.ai_mask.am_failure =
		    udata.au_aupinfo.ap_mask.am_failure;
		AUDIT_CHECK_IF_KEVENTS_MASK(aia.ai_mask);
		error = audit_session_setaia(tp, &aia);
		proc_rele(tp);
		tp = PROC_NULL;
		if (error)
			return (error);
		break;

	case A_SETFSIZE:
		if ((sizeof(udata.au_fstat) != uap->length) ||
		    ((udata.au_fstat.af_filesz != 0) &&
		     (udata.au_fstat.af_filesz < MIN_AUDIT_FILE_SIZE)))
			return (EINVAL);
		mtx_lock(&audit_mtx);
		audit_fstat.af_filesz = udata.au_fstat.af_filesz;
		mtx_unlock(&audit_mtx);
		break;

	case A_GETFSIZE:
		if (sizeof(udata.au_fstat) != uap->length)
			return (EINVAL);
		mtx_lock(&audit_mtx);
		udata.au_fstat.af_filesz = audit_fstat.af_filesz;
		udata.au_fstat.af_currsz = audit_fstat.af_currsz;
		mtx_unlock(&audit_mtx);
		break;

	case A_GETPINFO_ADDR:
		if ((sizeof(udata.au_aupinfo_addr) != uap->length) ||
		    IS_NOT_VALID_PID(udata.au_aupinfo_addr.ap_pid))
			return (EINVAL);
		if ((tp = proc_find(udata.au_aupinfo.ap_pid)) == NULL)
			return (ESRCH);
		WARN_IF_AINFO_ADDR_CHANGED(uap->length,
		    sizeof(auditpinfo_addr_t), "auditon(A_GETPINFO_ADDR,...)",
		    "auditpinfo_addr_t");
		scred = kauth_cred_proc_ref(tp);
		udata.au_aupinfo_addr.ap_auid =
		    scred->cr_audit.as_aia_p->ai_auid;
		udata.au_aupinfo_addr.ap_asid =
		    scred->cr_audit.as_aia_p->ai_asid;
		udata.au_aupinfo_addr.ap_mask.am_success =
		    scred->cr_audit.as_mask.am_success;
		udata.au_aupinfo_addr.ap_mask.am_failure =
		    scred->cr_audit.as_mask.am_failure;
		bcopy(&scred->cr_audit.as_aia_p->ai_termid, 
		    &udata.au_aupinfo_addr.ap_termid,
		    sizeof(au_tid_addr_t));
		udata.au_aupinfo_addr.ap_flags =
		    scred->cr_audit.as_aia_p->ai_flags;
		kauth_cred_unref(&scred);
		proc_rele(tp);
		tp = PROC_NULL;
		break;

	case A_GETKAUDIT:
		if (sizeof(udata.au_kau_info) != uap->length) 
			return (EINVAL);
		audit_get_kinfo(&udata.au_kau_info);
		break;

	case A_SETKAUDIT:
		if ((sizeof(udata.au_kau_info) != uap->length) ||
		    (udata.au_kau_info.ai_termid.at_type != AU_IPv4 &&
		    udata.au_kau_info.ai_termid.at_type != AU_IPv6))
			return (EINVAL);
		audit_set_kinfo(&udata.au_kau_info);
		break;

	case A_SENDTRIGGER:
		if ((sizeof(udata.au_trigger) != uap->length) || 
		    (udata.au_trigger < AUDIT_TRIGGER_MIN) ||
		    (udata.au_trigger > AUDIT_TRIGGER_MAX))
			return (EINVAL);
		return (audit_send_trigger(udata.au_trigger));

	case A_GETSINFO_ADDR:
		/* Handled above before switch(). */
		break;

	case A_GETSFLAGS:
		if (sizeof(udata.au_flags) != uap->length)
			return (EINVAL);
		bcopy(&(kauth_cred_get()->cr_audit.as_aia_p->ai_flags),
		    &udata.au_flags, sizeof(udata.au_flags));
		break;

	case A_SETSFLAGS:
		if (sizeof(udata.au_flags) != uap->length)
			return (EINVAL);
		bcopy(kauth_cred_get()->cr_audit.as_aia_p, &aia, sizeof(aia));
		aia.ai_flags = udata.au_flags;
		error = audit_session_setaia(p, &aia);
		if (error)
			return (error);
		break;

	default:
		return (EINVAL);
	}

	/*
	 * Copy data back to userspace for the GET comands.
	 */
	switch (uap->cmd) {
	case A_GETPOLICY:
	case A_OLDGETPOLICY:
	case A_GETKMASK:
	case A_GETQCTRL:
	case A_OLDGETQCTRL:
	case A_GETCWD:
	case A_GETCAR:
	case A_GETSTAT:
	case A_GETCOND:
	case A_OLDGETCOND:
	case A_GETCLASS:
	case A_GETPINFO:
	case A_GETFSIZE:
	case A_GETPINFO_ADDR:
	case A_GETKAUDIT:
	case A_GETSINFO_ADDR:
	case A_GETSFLAGS:
		error = copyout((void *)&udata, uap->data, uap->length);
		if (error)
			return (ENOSYS);
		break;
	}

	return (0);
}