Esempio n. 1
0
/*
 * pid_for_task
 *
 * Find the BSD process ID for the Mach task associated with the given Mach port 
 * name
 *
 * Parameters:	args		User argument descriptor (see below)
 *
 * Indirect parameters:	args->t		Mach port name
 * 			args->pid	Process ID (returned value; see below)
 *
 * Returns:	KERL_SUCCESS	Success
 * 		KERN_FAILURE	Not success           
 *
 * Implicit returns: args->pid		Process ID
 *
 */
kern_return_t
pid_for_task(
	struct pid_for_task_args *args)
{
	mach_port_name_t	t = args->t;
	user_addr_t		pid_addr  = args->pid;  
	proc_t p;
	task_t		t1;
	int	pid = -1;
	kern_return_t	err = KERN_SUCCESS;

	AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK);
	AUDIT_ARG(mach_port1, t);

	t1 = port_name_to_task(t);

	if (t1 == TASK_NULL) {
		err = KERN_FAILURE;
		goto pftout;
	} else {
		p = get_bsdtask_info(t1);
		if (p) {
			pid  = proc_pid(p);
			err = KERN_SUCCESS;
		} else {
			err = KERN_FAILURE;
		}
	}
	task_deallocate(t1);
pftout:
	AUDIT_ARG(pid, pid);
	(void) copyout((char *) &pid, pid_addr, sizeof(int));
	AUDIT_MACH_SYSCALL_EXIT(err);
	return(err);
}
Esempio n. 2
0
int
_kernelrpc_mach_port_construct_trap(struct _kernelrpc_mach_port_construct_args *args)
{
	task_t task = port_name_to_task(args->target);
	mach_port_name_t name;
	int rv = MACH_SEND_INVALID_DEST;
	mach_port_options_t options;

	if (copyin(args->options, (char *)&options, sizeof (options))) {
		rv = MACH_SEND_INVALID_DATA;
		goto done;
	}

	if (task != current_task())
		goto done;

	rv = mach_port_construct(task->itk_space, &options, args->context, &name);
	if (rv == KERN_SUCCESS)
		rv = copyout(&name, args->name, sizeof (name));

done:
	if (task)
		task_deallocate(task);
	return (rv);
}
Esempio n. 3
0
void
thread_deallocate(
	thread_t			thread)
{
	task_t				task;

	if (thread == THREAD_NULL)
		return;

	if (thread_deallocate_internal(thread) > 0)
		return;

	ipc_thread_terminate(thread);

	task = thread->task;

#ifdef MACH_BSD 
	{
		void *ut = thread->uthread;

		thread->uthread = NULL;
		uthread_zone_free(ut);
	}
#endif  /* MACH_BSD */   

	task_deallocate(task);

	if (thread->kernel_stack != 0)
		stack_free(thread);

	machine_thread_destroy(thread);

	zfree(thread_zone, thread);
}
Esempio n. 4
0
int
_kernelrpc_mach_vm_purgable_control_trap(
	struct _kernelrpc_mach_vm_purgable_control_trap_args *args)
{
	int state;
	task_t task = port_name_to_task(args->target);
	int rv = MACH_SEND_INVALID_DEST;

	if (task != current_task())
		goto done;

	if (copyin(args->state, (char *)&state, sizeof (state)))
		goto done;

	rv = mach_vm_purgable_control(task->map,
				      args->address,
				      args->control,
				      &state);
	if (rv == KERN_SUCCESS)
		rv = copyout(&state, args->state, sizeof (state));
	
done:
	if (task)
		task_deallocate(task);
	return (rv);
}
void IOHIDLibUserClient::cleanupGated(void)
{
    if (fClient) {
        task_deallocate(fClient);
        fClient = 0;
    }

   if (fNub) {

        // First clear any remaining queues
        setStateForQueues(kHIDQueueStateClear);
        
        // Have been started so we better detach
        
        // make sure device is closed (especially on crash)
        // note radar #2729708 for a more comprehensive fix
        // probably should also subclass clientDied for crash specific code
        fNub->close(this, fCachedOptionBits);
    }

    if ( fResourceNotification ) {
        fResourceNotification->remove();
        fResourceNotification = 0;
    }

    if (fResourceES) {
        if ( fWL )
            fWL->removeEventSource(fResourceES);
        fResourceES->release();
        fResourceES = 0;
    }
}
Esempio n. 6
0
/*
 * fork_create_child
 *
 * Description:	Common operations associated with the creation of a child
 *		process
 *
 * Parameters:	parent_task		parent task
 *		child_proc		child process
 *		inherit_memory		TRUE, if the parents address space is
 *					to be inherited by the child
 *		is64bit			TRUE, if the child being created will
 *					be associated with a 64 bit process
 *					rather than a 32 bit process
 *
 * Note:	This code is called in the fork() case, from the execve() call
 *		graph, if implementing an execve() following a vfork(), from
 *		the posix_spawn() call graph (which implicitly includes a
 *		vfork() equivalent call, and in the system bootstrap case.
 *
 *		It creates a new task and thread (and as a side effect of the
 *		thread creation, a uthread), which is then associated with the
 *		process 'child'.  If the parent process address space is to
 *		be inherited, then a flag indicates that the newly created
 *		task should inherit this from the child task.
 *
 *		As a special concession to bootstrapping the initial process
 *		in the system, it's possible for 'parent_task' to be TASK_NULL;
 *		in this case, 'inherit_memory' MUST be FALSE.
 */
thread_t
fork_create_child(task_t parent_task, proc_t child_proc, int inherit_memory, int is64bit)
{
	thread_t	child_thread = NULL;
	task_t		child_task;
	kern_return_t	result;

	/* Create a new task for the child process */
	result = task_create_internal(parent_task,
					inherit_memory,
					is64bit,
					&child_task);
	if (result != KERN_SUCCESS) {
		printf("execve: task_create_internal failed.  Code: %d\n", result);
		goto bad;
	}

	/* Set the child process task to the new task */
	child_proc->task = child_task;

	/* Set child task process to child proc */
	set_bsdtask_info(child_task, child_proc);

	/* Propagate CPU limit timer from parent */
	if (timerisset(&child_proc->p_rlim_cpu))
		task_vtimer_set(child_task, TASK_VTIMER_RLIM);

	/* Set/clear 64 bit vm_map flag */
	if (is64bit)
		vm_map_set_64bit(get_task_map(child_task));
	else
		vm_map_set_32bit(get_task_map(child_task));

#if CONFIG_MACF
	/* Update task for MAC framework */
	/* valid to use p_ucred as child is still not running ... */
	mac_task_label_update_cred(child_proc->p_ucred, child_task);
#endif

	/*
	 * Set child process BSD visible scheduler priority if nice value
	 * inherited from parent
	 */
	if (child_proc->p_nice != 0)
		resetpriority(child_proc);

	/* Create a new thread for the child process */
	result = thread_create(child_task, &child_thread);
	if (result != KERN_SUCCESS) {
		printf("execve: thread_create failed. Code: %d\n", result);
		task_deallocate(child_task);
		child_task = NULL;
	}
bad:
	thread_yield_internal(1);

	return(child_thread);
}
IOReturn IOHIDEventSystemUserClient::clientClose( void )
{
    if (client) {
        task_deallocate(client);
        client = 0;
    }

    return( kIOReturnSuccess);
}
IOReturn IOHIDStackShotUserClient::clientClose( void )
{
    if (client) {
        task_deallocate(client);
        client = 0;
    }

    return( kIOReturnSuccess);
}
Esempio n. 9
0
IOReturn RootDomainUserClient::clientClose( void )
{
    detach(fOwner);

    if(fOwningTask) {
        task_deallocate(fOwningTask);
        fOwningTask = 0;
    }

    return kIOReturnSuccess;
}
//==============================================================================
// IOHIDEventServiceUserClient::clientClose
//==============================================================================
IOReturn IOHIDEventServiceUserClient::clientClose( void )
{
   if (_client) {
        task_deallocate(_client);
        _client = 0;
    }
   
   if (_owner) {	
        _owner->close(this, _options);
        detach(_owner);
    }

    return kIOReturnSuccess;
}
Esempio n. 11
0
ipc_port_t
convert_task_to_port(
	task_t		task)
{
	ipc_port_t port;

	itk_lock(task);
	if (task->itk_self != IP_NULL)
		port = ipc_port_make_send(task->itk_self);
	else
		port = IP_NULL;
	itk_unlock(task);

	task_deallocate(task);
	return port;
}
Esempio n. 12
0
int
_kernelrpc_mach_vm_deallocate_trap(struct _kernelrpc_mach_vm_deallocate_args *args)
{
	task_t task = port_name_to_task(args->target);
	int rv = MACH_SEND_INVALID_DEST;

	if (task != current_task())
		goto done;

	rv = mach_vm_deallocate(task->map, args->address, args->size);
	
done:
	if (task)
		task_deallocate(task);
	return (rv);
}
Esempio n. 13
0
int
_kernelrpc_mach_port_move_member_trap(struct _kernelrpc_mach_port_move_member_args *args)
{
	task_t task = port_name_to_task(args->target);
	int rv = MACH_SEND_INVALID_DEST;

	if (task != current_task())
		goto done;

	rv = mach_port_move_member(task->itk_space, args->member, args->after);
	
done:
	if (task)
		task_deallocate(task);
	return (rv);
}
Esempio n. 14
0
int
_kernelrpc_mach_port_unguard_trap(struct _kernelrpc_mach_port_unguard_args *args)
{
	task_t task = port_name_to_task(args->target);
	int rv = MACH_SEND_INVALID_DEST;

	if (task != current_task())
		goto done;

	rv = mach_port_unguard(task->itk_space, args->name, args->guard);
	
done:
	if (task)
		task_deallocate(task);
	return (rv);
}
Esempio n. 15
0
/*
 * fork
 *
 * Description:	fork system call.
 *
 * Parameters:	parent			Parent process to fork
 *		uap (void)		[unused]
 *		retval			Return value
 *
 * Returns:	0			Success
 *		EAGAIN			Resource unavailable, try again
 *
 * Notes:	Attempts to create a new child process which inherits state
 *		from the parent process.  If successful, the call returns
 *		having created an initially suspended child process with an
 *		extra Mach task and thread reference, for which the thread
 *		is initially suspended.  Until we resume the child process,
 *		it is not yet running.
 *
 *		The return information to the child is contained in the
 *		thread state structure of the new child, and does not
 *		become visible to the child through a normal return process,
 *		since it never made the call into the kernel itself in the
 *		first place.
 *
 *		After resuming the thread, this function returns directly to
 *		the parent process which invoked the fork() system call.
 *
 * Important:	The child thread_resume occurs before the parent returns;
 *		depending on scheduling latency, this means that it is not
 *		deterministic as to whether the parent or child is scheduled
 *		to run first.  It is entirely possible that the child could
 *		run to completion prior to the parent running.
 */
int
fork(proc_t parent_proc, __unused struct fork_args *uap, int32_t *retval)
{
	thread_t child_thread;
	int err;

	retval[1] = 0;		/* flag parent return for user space */

	if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_FORK, NULL)) == 0) {
		task_t child_task;
		proc_t child_proc;

		/* Return to the parent */
		child_proc = (proc_t)get_bsdthreadtask_info(child_thread);
		retval[0] = child_proc->p_pid;

		/*
		 * Drop the signal lock on the child which was taken on our
		 * behalf by forkproc()/cloneproc() to prevent signals being
		 * received by the child in a partially constructed state.
		 */
		proc_signalend(child_proc, 0);
		proc_transend(child_proc, 0);

		/* flag the fork has occurred */
		proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
		DTRACE_PROC1(create, proc_t, child_proc);

#if CONFIG_DTRACE
		if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL)
			(*dtrace_proc_waitfor_hook)(child_proc);
#endif

		/* "Return" to the child */
		proc_clear_return_wait(child_proc, child_thread);

		/* drop the extra references we got during the creation */
		if ((child_task = (task_t)get_threadtask(child_thread)) != NULL) {
			task_deallocate(child_task);
		}
		thread_deallocate(child_thread);
	}

	return(err);
}
Esempio n. 16
0
int
_kernelrpc_mach_port_allocate_trap(struct _kernelrpc_mach_port_allocate_args *args)
{
	task_t task = port_name_to_task(args->target);
	mach_port_name_t name;
	int rv = MACH_SEND_INVALID_DEST;

	if (task != current_task())
		goto done;

	rv = mach_port_allocate(task->itk_space, args->right, &name);
	if (rv == KERN_SUCCESS)
		rv = copyout(&name, args->name, sizeof (name));

	
done:
	if (task)
		task_deallocate(task);
	return (rv);
}
IOReturn AppleSmartBatteryManagerUserClient::clientClose( void )
{
    /* remove our request for exclusive SMBus access */
    if (kSBExclusiveSMBusAccessType == fUserClientType) {
        fOwner->requestExclusiveSMBusAccess(false);
    }

    detach(fOwner);

    if(fOwningTask) {
        task_deallocate(fOwningTask);
        fOwningTask = 0;
    }

    // We only have one application client. If the app is closed,
    // we can terminate the user client.
    terminate();

    return kIOReturnSuccess;
}
Esempio n. 18
0
void
thread_deallocate(
	thread_t			thread)
{
	task_t				task;

	if (thread == THREAD_NULL)
		return;

	if (thread_deallocate_internal(thread) > 0)
		return;


	ipc_thread_terminate(thread);

	task = thread->task;

#ifdef MACH_BSD 
	{
		void *ut = thread->uthread;

		thread->uthread = NULL;
		uthread_zone_free(ut);
	}
#endif  /* MACH_BSD */   

	if (thread->t_ledger)
		ledger_dereference(thread->t_ledger);
	if (thread->t_threadledger)
		ledger_dereference(thread->t_threadledger);

	if (thread->kernel_stack != 0)
		stack_free(thread);

	lck_mtx_destroy(&thread->mutex, &thread_lck_grp);
	machine_thread_destroy(thread);

	task_deallocate(task);

	zfree(thread_zone, thread);
}
Esempio n. 19
0
__private_extern__ kern_return_t
chudxnu_free_task_list(
	task_array_t		*task_list,
	mach_msg_type_number_t	*count)
{
	vm_size_t size = (*count)*sizeof(mach_port_t);
	void *addr = *task_list;

	if(addr) {
		int i, maxCount = *count;
		for(i=0; i<maxCount; i++) {
			task_deallocate((*task_list)[i]);
		}		
		kfree(addr, size);
		*task_list = NULL;
		*count = 0;
		return KERN_SUCCESS;
	} else {
		return KERN_FAILURE;
	}
}
Esempio n. 20
0
int
_kernelrpc_mach_vm_allocate_trap(struct _kernelrpc_mach_vm_allocate_trap_args *args)
{
	mach_vm_offset_t addr;
	task_t task = port_name_to_task(args->target);
	int rv = MACH_SEND_INVALID_DEST;

	if (task != current_task())
		goto done;

	if (copyin(args->addr, (char *)&addr, sizeof (addr)))
		goto done;

	rv = mach_vm_allocate(task->map, &addr, args->size, args->flags);
	if (rv == KERN_SUCCESS)
		rv = copyout(&addr, args->addr, sizeof (addr));
	
done:
	if (task)
		task_deallocate(task);
	return (rv);
}
Esempio n. 21
0
int
_kernelrpc_mach_port_insert_right_trap(struct _kernelrpc_mach_port_insert_right_args *args)
{
	task_t task = port_name_to_task(args->target);
	ipc_port_t port;
	mach_msg_type_name_t disp;
	int rv = MACH_SEND_INVALID_DEST;

	if (task != current_task())
		goto done;

	rv = ipc_object_copyin(task->itk_space, args->poly, args->polyPoly,
	    (ipc_object_t *)&port);
	if (rv != KERN_SUCCESS)
		goto done;
	disp = ipc_object_copyin_type(args->polyPoly);

	rv = mach_port_insert_right(task->itk_space, args->name, port, disp);
	
done:
	if (task)
		task_deallocate(task);
	return (rv);
}
Esempio n. 22
0
int
_kernelrpc_mach_vm_map_trap(struct _kernelrpc_mach_vm_map_trap_args *args)
{
	mach_vm_offset_t addr;
	task_t task = port_name_to_task(args->target);
	int rv = MACH_SEND_INVALID_DEST;

	if (task != current_task())
		goto done;

	if (copyin(args->addr, (char *)&addr, sizeof (addr)))
		goto done;

	rv = mach_vm_map(task->map, &addr, args->size, args->mask, args->flags,
			IPC_PORT_NULL, 0, FALSE, args->cur_protection, VM_PROT_ALL,
			VM_INHERIT_DEFAULT);
	if (rv == KERN_SUCCESS)
		rv = copyout(&addr, args->addr, sizeof (addr));

done:
	if (task)
		task_deallocate(task);
	return (rv);
}
Esempio n. 23
0
void
ipc_port_destroy(
	ipc_port_t	port)
{
	ipc_port_t pdrequest, nsrequest;
	ipc_mqueue_t mqueue;
	ipc_kmsg_t kmsg;

#if IMPORTANCE_INHERITANCE
	task_t release_imp_task = TASK_NULL;
	thread_t self = current_thread();
	boolean_t top = (self->ith_assertions == 0);
	natural_t assertcnt = 0;
#endif /* IMPORTANCE_INHERITANCE */

	assert(ip_active(port));
	/* port->ip_receiver_name is garbage */
	/* port->ip_receiver/port->ip_destination is garbage */
	assert(port->ip_pset_count == 0);
	assert(port->ip_mscount == 0);

	/* check for a backup port */
	pdrequest = port->ip_pdrequest;

#if IMPORTANCE_INHERITANCE
	/* determine how may assertions to drop and from whom */
	if (port->ip_tempowner != 0) {
		assert(top);
		if (port->ip_taskptr != 0) {
			release_imp_task = port->ip_imp_task;
			port->ip_imp_task = TASK_NULL;
			port->ip_taskptr = 0;
			assertcnt = port->ip_impcount;
		}
		/* Otherwise, nothing to drop */
	} else {
		assert(port->ip_taskptr == 0);
		assertcnt = port->ip_impcount;
		if (pdrequest != IP_NULL)
			/* mark in limbo for the journey */
			port->ip_tempowner = 1;
	}

	if (top)
		self->ith_assertions = assertcnt;
#endif /* IMPORTANCE_INHERITANCE */

	if (pdrequest != IP_NULL) {
		/* we assume the ref for pdrequest */
		port->ip_pdrequest = IP_NULL;

		/* make port be in limbo */
		port->ip_receiver_name = MACH_PORT_NULL;
		port->ip_destination = IP_NULL;
		ip_unlock(port);

		/* consumes our refs for port and pdrequest */
		ipc_notify_port_destroyed(pdrequest, port);

		goto drop_assertions;
	}

	/* once port is dead, we don't need to keep it locked */

	port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
	port->ip_timestamp = ipc_port_timestamp();

	/*
	 * If the port has a preallocated message buffer and that buffer
	 * is not inuse, free it.  If it has an inuse one, then the kmsg
	 * free will detect that we freed the association and it can free it
	 * like a normal buffer.
	 */
	if (IP_PREALLOC(port)) {
		ipc_port_t inuse_port;

		kmsg = port->ip_premsg;
		assert(kmsg != IKM_NULL);
		inuse_port = ikm_prealloc_inuse_port(kmsg);
		IP_CLEAR_PREALLOC(port, kmsg);
		ip_unlock(port);
		if (inuse_port != IP_NULL) {
			assert(inuse_port == port);
		} else {
			ipc_kmsg_free(kmsg);
		}
	} else {
		ip_unlock(port);
	}

	/* throw away no-senders request */
	nsrequest = port->ip_nsrequest;
	if (nsrequest != IP_NULL)
		ipc_notify_send_once(nsrequest); /* consumes ref */

	/* destroy any queued messages */
	mqueue = &port->ip_messages;
	ipc_mqueue_destroy(mqueue);

	/* generate dead-name notifications */
	ipc_port_dnnotify(port);

	ipc_kobject_destroy(port);

	ip_release(port); /* consume caller's ref */

 drop_assertions:
#if IMPORTANCE_INHERITANCE
	if (release_imp_task != TASK_NULL) {
		if (assertcnt > 0) {
			assert(top);
			self->ith_assertions = 0;
			assert(release_imp_task->imp_receiver != 0);
			task_importance_drop_internal_assertion(release_imp_task, assertcnt);
		}
		task_deallocate(release_imp_task);

	} else if (assertcnt > 0) {
		if (top) {
			self->ith_assertions = 0;
			release_imp_task = current_task();
			if (release_imp_task->imp_receiver != 0) {
				task_importance_drop_internal_assertion(release_imp_task, assertcnt);
			}
		} else {
			/* the port chain we are enqueued on should cover our assertions */
			assert(assertcnt <= self->ith_assertions);
		}
	}
#endif /* IMPORTANCE_INHERITANCE */
}
Esempio n. 24
0
boolean_t
ipc_port_check_circularity(
	ipc_port_t	port,
	ipc_port_t	dest)
{
	ipc_port_t base;

#if IMPORTANCE_INHERITANCE
	task_t task = TASK_NULL;
	task_t release_task = TASK_NULL;
	int assertcnt = 0;
#endif /* IMPORTANCE_INHERITANCE */

	assert(port != IP_NULL);
	assert(dest != IP_NULL);

	if (port == dest)
		return TRUE;
	base = dest;

	/*
	 *	First try a quick check that can run in parallel.
	 *	No circularity if dest is not in transit.
	 */

	ip_lock(port);
	if (ip_lock_try(dest)) {
		if (!ip_active(dest) ||
		    (dest->ip_receiver_name != MACH_PORT_NULL) ||
		    (dest->ip_destination == IP_NULL))
			goto not_circular;

		/* dest is in transit; further checking necessary */

		ip_unlock(dest);
	}
	ip_unlock(port);

	ipc_port_multiple_lock(); /* massive serialization */

	/*
	 *	Search for the end of the chain (a port not in transit),
	 *	acquiring locks along the way.
	 */

	for (;;) {
		ip_lock(base);

		if (!ip_active(base) ||
		    (base->ip_receiver_name != MACH_PORT_NULL) ||
		    (base->ip_destination == IP_NULL))
			break;

		base = base->ip_destination;
	}

	/* all ports in chain from dest to base, inclusive, are locked */

	if (port == base) {
		/* circularity detected! */

		ipc_port_multiple_unlock();

		/* port (== base) is in limbo */

		assert(ip_active(port));
		assert(port->ip_receiver_name == MACH_PORT_NULL);
		assert(port->ip_destination == IP_NULL);

		while (dest != IP_NULL) {
			ipc_port_t next;

			/* dest is in transit or in limbo */

			assert(ip_active(dest));
			assert(dest->ip_receiver_name == MACH_PORT_NULL);

			next = dest->ip_destination;
			ip_unlock(dest);
			dest = next;
		}

		return TRUE;
	}

	/*
	 *	The guarantee:  lock port while the entire chain is locked.
	 *	Once port is locked, we can take a reference to dest,
	 *	add port to the chain, and unlock everything.
	 */

	ip_lock(port);
	ipc_port_multiple_unlock();

    not_circular:

	/* port is in limbo */

	assert(ip_active(port));
	assert(port->ip_receiver_name == MACH_PORT_NULL);
	assert(port->ip_destination == IP_NULL);

	ip_reference(dest);
	port->ip_destination = dest;

#if IMPORTANCE_INHERITANCE
	/* must have been in limbo or still bound to a task */
	assert(port->ip_tempowner != 0);

	if (port->ip_taskptr != 0) {
		/*
		 * We delayed dropping assertions from a specific task.
		 * Cache that info now (we'll drop assertions and the
		 * task reference below).
		 */
		release_task = port->ip_imp_task;
		port->ip_imp_task = TASK_NULL;
		port->ip_taskptr = 0;
	}
	assertcnt = port->ip_impcount;

	/* take the port out of limbo w.r.t. assertions */
	port->ip_tempowner = 0;

#endif /* IMPORTANCE_INHERITANCE */

	/* now unlock chain */

	ip_unlock(port);

	for (;;) {

#if IMPORTANCE_INHERITANCE
		/* every port along chain track assertions behind it */
		dest->ip_impcount += assertcnt;
#endif /* IMPORTANCE_INHERITANCE */

		if (dest == base)
			break;

		/* port is in transit */

		assert(ip_active(dest));
		assert(dest->ip_receiver_name == MACH_PORT_NULL);
		assert(dest->ip_destination != IP_NULL);

#if IMPORTANCE_INHERITANCE
		assert(dest->ip_tempowner == 0);
#endif /* IMPORTANCE_INHERITANCE */

		port = dest->ip_destination;
		ip_unlock(dest);
		dest = port;
	}

	/* base is not in transit */
	assert(!ip_active(base) ||
	       (base->ip_receiver_name != MACH_PORT_NULL) ||
	       (base->ip_destination == IP_NULL));

#if IMPORTANCE_INHERITANCE
	/*
	 * Find the task to boost (if any).
	 * We will boost "through" ports that don't know
	 * about inheritance to deliver receive rights that
	 * do.
	 */
	if (ip_active(base) && (assertcnt > 0)) {
		if (base->ip_tempowner != 0) {
			if (base->ip_taskptr != 0)
				/* specified tempowner task */
				task = base->ip_imp_task;
			/* otherwise don't boost current task */

		} else if (base->ip_receiver_name != MACH_PORT_NULL) {
			ipc_space_t space = base->ip_receiver;

			/* only spaces with boost-accepting tasks */
			if (space->is_task != TASK_NULL &&
			    space->is_task->imp_receiver != 0)
				task = space->is_task;
		}

		/* take reference before unlocking base */
		if (task != TASK_NULL) {
			assert(task->imp_receiver != 0);
			task_reference(task);
		}
	}
#endif /* IMPORTANCE_INHERITANCE */

	ip_unlock(base);

#if IMPORTANCE_INHERITANCE
	/*
	 * Transfer assertions now that the ports are unlocked.
	 * Avoid extra overhead if transferring to/from the same task.
	 */
	boolean_t transfer_assertions = (task != release_task) ? TRUE : FALSE;

	if (task != TASK_NULL) {
		if (transfer_assertions)
			task_importance_hold_internal_assertion(task, assertcnt);
		task_deallocate(task);
		task = TASK_NULL;
	}

	if (release_task != TASK_NULL) {
		if (transfer_assertions)
			task_importance_drop_internal_assertion(release_task, assertcnt);
		task_deallocate(release_task);
		release_task = TASK_NULL;
	}
#endif /* IMPORTANCE_INHERITANCE */

	return FALSE;
}
Esempio n. 25
0
boolean_t
ipc_port_importance_delta(
	ipc_port_t 		port,
	mach_port_delta_t	delta)
{
	ipc_port_t next, base;
	task_t task = TASK_NULL;
	boolean_t dropped = FALSE;

	if (delta == 0)
		return FALSE;

	base = port;

	/* if port is in transit, have to search for end of chain */
	if (ip_active(port) &&
	    port->ip_destination != IP_NULL &&
	    port->ip_receiver_name == MACH_PORT_NULL) {

		dropped = TRUE;

		ip_unlock(port);
		ipc_port_multiple_lock(); /* massive serialization */
		ip_lock(base);

		while(ip_active(base) &&
		      base->ip_destination != IP_NULL &&
		      base->ip_receiver_name == MACH_PORT_NULL) {

			base = base->ip_destination;
			ip_lock(base);
		}
		ipc_port_multiple_unlock();
	}

	/* unlock down to the base, adding a boost at each level */
	for (;;) {
		port->ip_impcount += delta;

		if (port == base)
			break;

		/* port is in transit */
		assert(port->ip_tempowner == 0);
		next = port->ip_destination;
		ip_unlock(port);
		port = next;
	}

	/* find the task (if any) to boost according to the base */
	if (ip_active(base)) {
		if (base->ip_tempowner != 0) {
			if (base->ip_taskptr != 0)
				task = base->ip_imp_task;
			/* otherwise don't boost */

		} else if (base->ip_receiver_name != MACH_PORT_NULL) {
			ipc_space_t space = base->ip_receiver;

			/* only spaces with boost-accepting tasks */
			if (space->is_task != TASK_NULL &&
			    space->is_task->imp_receiver != 0)
				task = space->is_task;
		}
	}

	/*
	 * Only the base is locked.  If we have to hold or drop task
	 * importance assertions, we'll have to drop that lock as well.
	 */
	if (task != TASK_NULL) {
		/* take a reference before unlocking base */
		assert(task->imp_receiver != 0);
		task_reference(task);

		ip_unlock(base);
		dropped = TRUE;

		if (delta > 0)
			task_importance_hold_internal_assertion(task, delta);
		else
			task_importance_drop_internal_assertion(task, -delta);

		task_deallocate(task);
	} else if (dropped == TRUE) {
		ip_unlock(base);
	}

	return dropped;
}
Esempio n. 26
0
ipc_kmsg_t
ipc_kobject_server(
	ipc_kmsg_t	request,
	mach_msg_option_t __unused option)
{
	mach_msg_size_t reply_size;
	ipc_kmsg_t reply;
	kern_return_t kr;
	ipc_port_t *destp;
	ipc_port_t  replyp = IPC_PORT_NULL;
	mach_msg_format_0_trailer_t *trailer;
	mig_hash_t *ptr;
	task_t task = TASK_NULL;
	uint32_t exec_token;
	boolean_t exec_token_changed = FALSE;

	/*
	 * Find out corresponding mig_hash entry if any
	 */
	{
	    int key = request->ikm_header->msgh_id;
	    unsigned int i = (unsigned int)MIG_HASH(key);
	    int max_iter = mig_table_max_displ;

	    do {
		ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES];
	    } while (key != ptr->num && ptr->num && --max_iter);

	    if (!ptr->routine || key != ptr->num) {
	        ptr = (mig_hash_t *)0;
		reply_size = mig_reply_size;
	    } else {
		reply_size = ptr->size;
#if	MACH_COUNTER
		ptr->callcount++;
#endif
	    }
	}

	/* round up for trailer size */
        reply_size += MAX_TRAILER_SIZE;
	reply = ipc_kmsg_alloc(reply_size);

	if (reply == IKM_NULL) {
		printf("ipc_kobject_server: dropping request\n");
		ipc_kmsg_trace_send(request, option);
		ipc_kmsg_destroy(request);
		return IKM_NULL;
	}

	/*
	 * Initialize reply message.
	 */
	{
#define	InP	((mach_msg_header_t *) request->ikm_header)
#define	OutP	((mig_reply_error_t *) reply->ikm_header)

	    /* 
	     * MIG should really assure no data leakage -
	     * but until it does, pessimistically zero the
	     * whole reply buffer.
	     */
	    bzero((void *)OutP, reply_size);

	    OutP->NDR = NDR_record;
	    OutP->Head.msgh_size = sizeof(mig_reply_error_t);

	    OutP->Head.msgh_bits =
		MACH_MSGH_BITS_SET(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0, 0, 0);
	    OutP->Head.msgh_remote_port = InP->msgh_local_port;
	    OutP->Head.msgh_local_port = MACH_PORT_NULL;
	    OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
	    OutP->Head.msgh_id = InP->msgh_id + 100;

#undef	InP
#undef	OutP
	}

	/*
	 * Find the routine to call, and call it
	 * to perform the kernel function
	 */
	ipc_kmsg_trace_send(request, option);
	{
	    if (ptr) {
		/*
		 * Check if the port is a task port, if its a task port then
		 * snapshot the task exec token before the mig routine call.
		 */
		ipc_port_t port = request->ikm_header->msgh_remote_port;
		if (IP_VALID(port) && ip_kotype(port) == IKOT_TASK) {
			task = convert_port_to_task_with_exec_token(port, &exec_token);
		}

		(*ptr->routine)(request->ikm_header, reply->ikm_header);

		/* Check if the exec token changed during the mig routine */
		if (task != TASK_NULL) {
			if (exec_token != task->exec_token) {
				exec_token_changed = TRUE;
			}
			task_deallocate(task);
		}

		kernel_task->messages_received++;
	    }
	    else {
		if (!ipc_kobject_notify(request->ikm_header, reply->ikm_header)){
#if DEVELOPMENT || DEBUG
		    printf("ipc_kobject_server: bogus kernel message, id=%d\n",
			request->ikm_header->msgh_id);
#endif	/* DEVELOPMENT || DEBUG */
		    _MIG_MSGID_INVALID(request->ikm_header->msgh_id);

		    ((mig_reply_error_t *) reply->ikm_header)->RetCode
			= MIG_BAD_ID;
		}
		else
		  kernel_task->messages_received++;
	    }
	    kernel_task->messages_sent++;
	}

	/*
	 *	Destroy destination. The following code differs from
	 *	ipc_object_destroy in that we release the send-once
	 *	right instead of generating a send-once notification
	 * 	(which would bring us here again, creating a loop).
	 *	It also differs in that we only expect send or
	 *	send-once rights, never receive rights.
	 *
	 *	We set msgh_remote_port to IP_NULL so that the kmsg
	 *	destroy routines don't try to destroy the port twice.
	 */
	destp = (ipc_port_t *) &request->ikm_header->msgh_remote_port;
	switch (MACH_MSGH_BITS_REMOTE(request->ikm_header->msgh_bits)) {
		case MACH_MSG_TYPE_PORT_SEND:
		    ipc_port_release_send(*destp);
		    break;
		
		case MACH_MSG_TYPE_PORT_SEND_ONCE:
		    ipc_port_release_sonce(*destp);
		    break;
		
		default:
		    panic("ipc_kobject_server: strange destination rights");
	}
	*destp = IP_NULL;

	/*
	 *	Destroy voucher.  The kernel MIG servers never take ownership
	 *	of vouchers sent in messages.  Swallow any such rights here.
	 */
	if (IP_VALID(request->ikm_voucher)) {
		assert(MACH_MSG_TYPE_PORT_SEND ==
		       MACH_MSGH_BITS_VOUCHER(request->ikm_header->msgh_bits));
		ipc_port_release_send(request->ikm_voucher);
		request->ikm_voucher = IP_NULL;
	}

        if (!(reply->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) &&
           ((mig_reply_error_t *) reply->ikm_header)->RetCode != KERN_SUCCESS)
	 	kr = ((mig_reply_error_t *) reply->ikm_header)->RetCode;
	else
		kr = KERN_SUCCESS;

	if ((kr == KERN_SUCCESS) || (kr == MIG_NO_REPLY)) {
		/*
		 *	The server function is responsible for the contents
		 *	of the message.  The reply port right is moved
		 *	to the reply message, and we have deallocated
		 *	the destination port right, so we just need
		 *	to free the kmsg.
		 */
		ipc_kmsg_free(request);

	} else {
		/*
		 *	The message contents of the request are intact.
		 *	Destroy everthing except the reply port right,
		 *	which is needed in the reply message.
		 */
		request->ikm_header->msgh_local_port = MACH_PORT_NULL;
		ipc_kmsg_destroy(request);
	}

	replyp = (ipc_port_t)reply->ikm_header->msgh_remote_port;

	if (kr == MIG_NO_REPLY) {
		/*
		 *	The server function will send a reply message
		 *	using the reply port right, which it has saved.
		 */

		ipc_kmsg_free(reply);

		return IKM_NULL;
	} else if (!IP_VALID(replyp)) {
		/*
		 *	Can't queue the reply message if the destination
		 *	(the reply port) isn't valid.
		 */

		ipc_kmsg_destroy(reply);

		return IKM_NULL;
	} else if (replyp->ip_receiver == ipc_space_kernel) {
		/*
		 * Don't send replies to kobject kernel ports
		 */
#if DEVELOPMENT || DEBUG
		printf("%s: refusing to send reply to kobject %d port (id:%d)\n",
		       __func__, ip_kotype(replyp),
		       request->ikm_header->msgh_id);
#endif	/* DEVELOPMENT || DEBUG */
		ipc_kmsg_destroy(reply);
		return IKM_NULL;
	}

	/* Fail the MIG call if the task exec token changed during the call */
	if (kr == KERN_SUCCESS && exec_token_changed) {
		/*
		 *	Create a new reply msg with error and destroy the old reply msg.
		 */
		ipc_kmsg_t new_reply = ipc_kmsg_alloc(reply_size);

		if (new_reply == IKM_NULL) {
			printf("ipc_kobject_server: dropping request\n");
			ipc_kmsg_destroy(reply);
			return IKM_NULL;
		}
		/*
		 *	Initialize the new reply message.
		 */
		{
#define	OutP_new	((mig_reply_error_t *) new_reply->ikm_header)
#define	OutP_old	((mig_reply_error_t *) reply->ikm_header)

		    bzero((void *)OutP_new, reply_size);

		    OutP_new->NDR = OutP_old->NDR;
		    OutP_new->Head.msgh_size = sizeof(mig_reply_error_t);
		    OutP_new->Head.msgh_bits = OutP_old->Head.msgh_bits & ~MACH_MSGH_BITS_COMPLEX;
		    OutP_new->Head.msgh_remote_port = OutP_old->Head.msgh_remote_port;
		    OutP_new->Head.msgh_local_port = MACH_PORT_NULL;
		    OutP_new->Head.msgh_voucher_port = MACH_PORT_NULL;
		    OutP_new->Head.msgh_id = OutP_old->Head.msgh_id;

		    /* Set the error as KERN_INVALID_TASK */
		    OutP_new->RetCode = KERN_INVALID_TASK;

#undef	OutP_new
#undef  OutP_old
		}

		/*
		 *	Destroy everything in reply except the reply port right,
		 *	which is needed in the new reply message.
		 */
		reply->ikm_header->msgh_remote_port = MACH_PORT_NULL;
		ipc_kmsg_destroy(reply);

		reply = new_reply;
	}

 	trailer = (mach_msg_format_0_trailer_t *)
		((vm_offset_t)reply->ikm_header + (int)reply->ikm_header->msgh_size);

 	trailer->msgh_sender = KERNEL_SECURITY_TOKEN;
 	trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
 	trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;

	return reply;
}
Esempio n. 27
0
void
act_deallocate(
	thread_act_t	act)
{
	task_t		task;
	thread_t	thread;
	void		*task_proc;

	if (act == NULL)
		return;

	act_lock(act);

	if (--act->act_ref_count > 0) {
		act_unlock(act);
		return;
	}

	assert(!act->active);

	thread = act->thread;
	assert(thread != NULL);

	thread->top_act = NULL;

	act_unlock(act);

	task = act->task;
	task_lock(task);

	task_proc = task->bsd_info;

	{
		time_value_t	user_time, system_time;

		thread_read_times(thread, &user_time, &system_time);
		time_value_add(&task->total_user_time, &user_time);
		time_value_add(&task->total_system_time, &system_time);
	
		queue_remove(&task->threads, act, thread_act_t, task_threads);
		act->task_threads.next = NULL;
		task->thread_count--;
		task->res_thread_count--;
	}

	task_unlock(task);

	act_prof_deallocate(act);
	ipc_thr_act_terminate(act);

#ifdef MACH_BSD 
	{
		extern void uthread_free(task_t, void *, void *, void *);
		void *ut = act->uthread;

		uthread_free(task, act, ut, task_proc);
		act->uthread = NULL;
	}
#endif  /* MACH_BSD */   

	task_deallocate(task);

	thread_deallocate(thread);
}
Esempio n. 28
0
/*
 *	Routine:	task_for_pid
 *	Purpose:
 *		Get the task port for another "process", named by its
 *		process ID on the same host as "target_task".
 *
 *		Only permitted to privileged processes, or processes
 *		with the same user ID.
 *
 * XXX This should be a BSD system call, not a Mach trap!!!
 */
kern_return_t
task_for_pid(
	struct task_for_pid_args *args)
{
	mach_port_name_t	target_tport = args->target_tport;
	int			pid = args->pid;
	user_addr_t		task_addr = args->t;
	struct uthread		*uthread;
	proc_t 			p = PROC_NULL;
	task_t			t1 = TASK_NULL;
	mach_port_name_t	tret = MACH_PORT_NULL;
 	ipc_port_t 		tfpport;
	void * sright;
	int error = 0;

	AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID);
	AUDIT_ARG(pid, pid);
	AUDIT_ARG(mach_port1, target_tport);

#if defined(SECURE_KERNEL)
	if (0 == pid) {
		(void ) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	}
#endif

	t1 = port_name_to_task(target_tport);
	if (t1 == TASK_NULL) {
		(void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	} 


	/*
	 * Delayed binding of thread credential to process credential, if we
	 * are not running with an explicitly set thread credential.
	 */
	uthread = get_bsdthread_info(current_thread());
	kauth_cred_uthread_update(uthread, current_proc());

	p = proc_find(pid);
	AUDIT_ARG(process, p);

	if (!(task_for_pid_posix_check(p))) {
		error = KERN_FAILURE;
		goto tfpout;
	}

	if (p->task != TASK_NULL) {
		/* If we aren't root and target's task access port is set... */
		if (!kauth_cred_issuser(kauth_cred_get()) &&
			p != current_proc() &&
			(task_get_task_access_port(p->task, &tfpport) == 0) &&
			(tfpport != IPC_PORT_NULL)) {

			if (tfpport == IPC_PORT_DEAD) {
				error = KERN_PROTECTION_FAILURE;
				goto tfpout;
			}

			/* Call up to the task access server */
			error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);

			if (error != MACH_MSG_SUCCESS) {
				if (error == MACH_RCV_INTERRUPTED)
					error = KERN_ABORTED;
				else
					error = KERN_FAILURE;
				goto tfpout;
			}
		}
#if CONFIG_MACF
		error = mac_proc_check_get_task(kauth_cred_get(), p);
		if (error) {
			error = KERN_FAILURE;
			goto tfpout;
		}
#endif

		/* Grant task port access */
		task_reference(p->task);
		sright = (void *) convert_task_to_port(p->task);
		tret = ipc_port_copyout_send(
				sright, 
				get_task_ipcspace(current_task()));
	} 
	error = KERN_SUCCESS;

tfpout:
	task_deallocate(t1);
	AUDIT_ARG(mach_port2, tret);
	(void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
	if (p != PROC_NULL)
		proc_rele(p);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Esempio n. 29
0
kern_return_t
task_name_for_pid(
	struct task_name_for_pid_args *args)
{
	mach_port_name_t	target_tport = args->target_tport;
	int			pid = args->pid;
	user_addr_t		task_addr = args->t;
	proc_t		p = PROC_NULL;
	task_t		t1;
	mach_port_name_t	tret;
	void * sright;
	int error = 0, refheld = 0;
	kauth_cred_t target_cred;

	AUDIT_MACH_SYSCALL_ENTER(AUE_TASKNAMEFORPID);
	AUDIT_ARG(pid, pid);
	AUDIT_ARG(mach_port1, target_tport);

	t1 = port_name_to_task(target_tport);
	if (t1 == TASK_NULL) {
		(void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	} 

	p = proc_find(pid);
	if (p != PROC_NULL) {
		AUDIT_ARG(process, p);
		target_cred = kauth_cred_proc_ref(p);
		refheld = 1;

		if ((p->p_stat != SZOMB)
		    && ((current_proc() == p)
			|| kauth_cred_issuser(kauth_cred_get()) 
			|| ((kauth_cred_getuid(target_cred) == kauth_cred_getuid(kauth_cred_get())) && 
			    ((kauth_cred_getruid(target_cred) == kauth_getruid()))))) {

			if (p->task != TASK_NULL) {
				task_reference(p->task);
#if CONFIG_MACF
				error = mac_proc_check_get_task_name(kauth_cred_get(),  p);
				if (error) {
					task_deallocate(p->task);
					goto noperm;
				}
#endif
				sright = (void *)convert_task_name_to_port(p->task);
				tret = ipc_port_copyout_send(sright, 
						get_task_ipcspace(current_task()));
			} else
				tret  = MACH_PORT_NULL;

			AUDIT_ARG(mach_port2, tret);
			(void) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
			task_deallocate(t1);
			error = KERN_SUCCESS;
			goto tnfpout;
		}
	}

#if CONFIG_MACF
noperm:
#endif
    task_deallocate(t1);
	tret = MACH_PORT_NULL;
	(void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
	error = KERN_FAILURE;
tnfpout:
	if (refheld != 0)
		kauth_cred_unref(&target_cred);
	if (p != PROC_NULL)
		proc_rele(p);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}
Esempio n. 30
0
/*
 *	Routine:	task_for_pid
 *	Purpose:
 *		Get the task port for another "process", named by its
 *		process ID on the same host as "target_task".
 *
 *		Only permitted to privileged processes, or processes
 *		with the same user ID.
 *
 *		Note: if pid == 0, an error is return no matter who is calling.
 *
 * XXX This should be a BSD system call, not a Mach trap!!!
 */
kern_return_t
task_for_pid(
	struct task_for_pid_args *args)
{
	mach_port_name_t	target_tport = args->target_tport;
	int			pid = args->pid;
	user_addr_t		task_addr = args->t;
	proc_t 			p = PROC_NULL;
	task_t			t1 = TASK_NULL;
	mach_port_name_t	tret = MACH_PORT_NULL;
 	ipc_port_t 		tfpport;
	void * sright;
	int error = 0;

	AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID);
	AUDIT_ARG(pid, pid);
	AUDIT_ARG(mach_port1, target_tport);

	/* Always check if pid == 0 */
	if (pid == 0) {
		(void ) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	}

	t1 = port_name_to_task(target_tport);
	if (t1 == TASK_NULL) {
		(void) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
		AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
		return(KERN_FAILURE);
	} 


	p = proc_find(pid);
	if (p == PROC_NULL) {
		error = KERN_FAILURE;
		goto tfpout;
	}

#if CONFIG_AUDIT
	AUDIT_ARG(process, p);
#endif

	if (!(task_for_pid_posix_check(p))) {
		error = KERN_FAILURE;
		goto tfpout;
	}

	if (p->task != TASK_NULL) {
		/* If we aren't root and target's task access port is set... */
		if (!kauth_cred_issuser(kauth_cred_get()) &&
			p != current_proc() &&
			(task_get_task_access_port(p->task, &tfpport) == 0) &&
			(tfpport != IPC_PORT_NULL)) {

			if (tfpport == IPC_PORT_DEAD) {
				error = KERN_PROTECTION_FAILURE;
				goto tfpout;
			}

			/* Call up to the task access server */
			error = check_task_access(tfpport, proc_selfpid(), kauth_getgid(), pid);

			if (error != MACH_MSG_SUCCESS) {
				if (error == MACH_RCV_INTERRUPTED)
					error = KERN_ABORTED;
				else
					error = KERN_FAILURE;
				goto tfpout;
			}
		}
#if CONFIG_MACF
		error = mac_proc_check_get_task(kauth_cred_get(), p);
		if (error) {
			error = KERN_FAILURE;
			goto tfpout;
		}
#endif

		/* Grant task port access */
		task_reference(p->task);
		extmod_statistics_incr_task_for_pid(p->task);

		sright = (void *) convert_task_to_port(p->task);
		tret = ipc_port_copyout_send(
				sright, 
				get_task_ipcspace(current_task()));
	} 
	error = KERN_SUCCESS;

tfpout:
	task_deallocate(t1);
	AUDIT_ARG(mach_port2, tret);
	(void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
	if (p != PROC_NULL)
		proc_rele(p);
	AUDIT_MACH_SYSCALL_EXIT(error);
	return(error);
}