Ejemplo n.º 1
0
/*
 *	ROUTINE:	lock_set_destroy	[exported]
 *	
 *	Destroys a lock set.  This call will only succeed if the
 *	specified task is the SAME task name specified at the lock set's
 *	creation.
 *
 *	NOTES:
 *	- All threads currently blocked on the lock set's ulocks are awoken.
 *	- These threads will return with the KERN_LOCK_SET_DESTROYED error.
 */
kern_return_t
lock_set_destroy (task_t task, lock_set_t lock_set)
{
	ulock_t		ulock;
	int		i;

	if (task == TASK_NULL || lock_set == LOCK_SET_NULL)
		return KERN_INVALID_ARGUMENT;

	if (lock_set->owner != task)
		return KERN_INVALID_RIGHT;

	lock_set_lock(lock_set);
	if (!lock_set->active) {
		lock_set_unlock(lock_set);
		return KERN_LOCK_SET_DESTROYED;
	}

	/*
	 *  Deactivate lock set
	 */
	lock_set->active = FALSE;

	/*
	 *  If a ulock is currently held in the target lock set:
	 *
	 *  1) Wakeup all threads blocked on the ulock (if any).  Threads
	 *     may be blocked waiting normally, or waiting for a handoff.
	 *     Blocked threads will return with KERN_LOCK_SET_DESTROYED.
	 *
	 *  2) ulock ownership is cleared.
	 *     The thread currently holding the ulock is revoked of its
	 *     ownership.
	 */
	for (i = 0; i < lock_set->n_ulocks; i++) {
		ulock = &lock_set->ulock_list[i];

		ulock_lock(ulock);

		if (ulock->accept_wait) {
			ulock->accept_wait = FALSE;
			wait_queue_wakeup64_one(&ulock->wait_queue,
					      LOCK_SET_HANDOFF,
					      THREAD_RESTART);
		}
					  
		if (ulock->holder) {
			if (ulock->blocked) {
				ulock->blocked = FALSE;
				wait_queue_wakeup64_all(&ulock->wait_queue,
						      LOCK_SET_EVENT,
						      THREAD_RESTART);
			}
			if (ulock->ho_wait) {
				ulock->ho_wait = FALSE;
				wait_queue_wakeup64_one(&ulock->wait_queue,
						      LOCK_SET_HANDOFF,
						      THREAD_RESTART);
			}
			ulock_ownership_clear(ulock);
		}
		
		ulock_unlock(ulock);
	}

	lock_set_unlock(lock_set);
	lock_set_ownership_clear(lock_set, task);

	/*
	 *  Drop the lock set reference given to the containing task,
	 *  which inturn destroys the lock set structure if the reference
	 *  count goes to zero.
	 */
	lock_set_dereference(lock_set);

	return KERN_SUCCESS;
}
Ejemplo n.º 2
0
/*
 *	ROUTINE:	lock_set_create		[exported]
 *
 *	Creates a lock set.
 *	The port representing the lock set is returned as a parameter.
 */      
kern_return_t
lock_set_create (
	task_t		task,
	lock_set_t	*new_lock_set,
	int		n_ulocks,
	int		policy)
{
	lock_set_t 	lock_set = LOCK_SET_NULL;
	ulock_t		ulock;
	vm_size_t 	size;
	int 		x;

	*new_lock_set = LOCK_SET_NULL;

	if (task == TASK_NULL || n_ulocks <= 0 || policy > SYNC_POLICY_MAX)
		return KERN_INVALID_ARGUMENT;

	if ((VM_MAX_ADDRESS - sizeof(struct lock_set))/sizeof(struct ulock) < (unsigned)n_ulocks)
		return KERN_RESOURCE_SHORTAGE;

	size = sizeof(struct lock_set) + (sizeof(struct ulock) * (n_ulocks-1));
	lock_set = (lock_set_t) kalloc (size);

	if (lock_set == LOCK_SET_NULL)
		return KERN_RESOURCE_SHORTAGE; 


	lock_set_lock_init(lock_set);
	lock_set->n_ulocks = n_ulocks;
	lock_set->ref_count = 1;

	/*
	 *  Create and initialize the lock set port
	 */
	lock_set->port = ipc_port_alloc_kernel();
	if (lock_set->port == IP_NULL) {	
		/* This will deallocate the lock set */
		lock_set_dereference(lock_set);
		return KERN_RESOURCE_SHORTAGE; 
	}

	ipc_kobject_set (lock_set->port,
			(ipc_kobject_t) lock_set,
			IKOT_LOCK_SET);

	/*
	 *  Initialize each ulock in the lock set
	 */

	for (x=0; x < n_ulocks; x++) {
		ulock = (ulock_t) &lock_set->ulock_list[x];
		ulock_lock_init(ulock);
		ulock->lock_set  = lock_set;
		ulock->holder	 = THREAD_NULL;
		ulock->blocked   = FALSE;
		ulock->unstable	 = FALSE;
		ulock->ho_wait	 = FALSE;
		wait_queue_init(&ulock->wait_queue, policy);
	}

	lock_set_ownership_set(lock_set, task);

	lock_set->active = TRUE;
	*new_lock_set = lock_set;

	return KERN_SUCCESS;
}