Пример #1
0
/*
 *	Routine:	semaphore_dereference
 *
 *	Release a reference on a semaphore.  If this is the last reference,
 *	the semaphore data structure is deallocated.
 */
void
semaphore_dereference(
	semaphore_t		semaphore)
{
	uint32_t collisions;
	spl_t spl_level;

	if (semaphore == NULL)
		return;

	if (hw_atomic_sub(&semaphore->ref_count, 1) != 0)
		return;

	/*
	 * Last ref, clean up the port [if any]
	 * associated with the semaphore, destroy
	 * it (if still active) and then free
	 * the semaphore.
	 */
	ipc_port_t port = semaphore->port;

	if (IP_VALID(port)) {
		assert(!port->ip_srights);
		ipc_port_dealloc_kernel(port);
	}

	/*
	 * Lock the semaphore to lock in the owner task reference.
	 * Then continue to try to lock the task (inverse order).
	 */
	spl_level = splsched();
	semaphore_lock(semaphore);
	for (collisions = 0; semaphore->active; collisions++) {
		task_t task = semaphore->owner;

		assert(task != TASK_NULL);
		
		if (task_lock_try(task)) {
			semaphore_destroy_internal(task, semaphore);
			/* semaphore unlocked */
			splx(spl_level);
			task_unlock(task);
			goto out;
		}
		
		/* failed to get out-of-order locks */
		semaphore_unlock(semaphore);
		splx(spl_level);
		mutex_pause(collisions);
		spl_level = splsched();
		semaphore_lock(semaphore);
	}
	semaphore_unlock(semaphore);
	splx(spl_level);

 out:
	zfree(semaphore_zone, semaphore);
}
Пример #2
0
/*
 *	Routine:	semaphore_destroy
 *
 *	Destroys a semaphore.  This call will only succeed if the
 *	specified task is the SAME task name specified at the semaphore's
 *	creation.
 *
 *	All threads currently blocked on the semaphore are awoken.  These
 *	threads will return with the KERN_TERMINATED error.
 */
kern_return_t
semaphore_destroy(
	task_t			task,
	semaphore_t		semaphore)
{
	int				old_count;
	spl_t			spl_level;


	if (task == TASK_NULL || semaphore == SEMAPHORE_NULL)
		return KERN_INVALID_ARGUMENT;

	/*
	 *  Disown semaphore
	 */
	task_lock(task);
	if (semaphore->owner != task) {
		task_unlock(task);
		return KERN_INVALID_ARGUMENT;
	}
	remqueue(&task->semaphore_list, (queue_entry_t) semaphore);
	semaphore->owner = TASK_NULL;
	task->semaphores_owned--;
	task_unlock(task);

	spl_level = splsched();
	semaphore_lock(semaphore);

	/*
	 *  Deactivate semaphore
	 */
	assert(semaphore->active);
	semaphore->active = FALSE;

	/*
	 *  Wakeup blocked threads  
	 */
	old_count = semaphore->count;
	semaphore->count = 0;

	if (old_count < 0) {
		wait_queue_wakeup64_all_locked(&semaphore->wait_queue,
					     SEMAPHORE_EVENT,
					     THREAD_RESTART,
					     TRUE);		/* unlock? */
	} else {
		semaphore_unlock(semaphore);
	}
	splx(spl_level);

	/*
	 *  Deallocate
	 *
	 *  Drop the semaphore reference, which in turn deallocates the
	 *  semaphore structure if the reference count goes to zero.
	 */
	ipc_port_dealloc_kernel(semaphore->port);
	semaphore_dereference(semaphore);
	return KERN_SUCCESS;
}
Пример #3
0
/*
 *	Routine:	semaphore_destroy
 *
 *	Destroys a semaphore and consume the caller's reference on the
 *	semaphore.
 */
kern_return_t
semaphore_destroy(
	task_t			task,
	semaphore_t		semaphore)
{
	spl_t spl_level;

	if (semaphore == SEMAPHORE_NULL)
		return KERN_INVALID_ARGUMENT;

	if (task == TASK_NULL) {
		semaphore_dereference(semaphore);
		return KERN_INVALID_ARGUMENT;
	}

	task_lock(task);
	spl_level = splsched();
	semaphore_lock(semaphore);

	if (semaphore->owner != task) {
		semaphore_unlock(semaphore);
		splx(spl_level);
		task_unlock(task);
		return KERN_INVALID_ARGUMENT;
	}
			
	semaphore_destroy_internal(task, semaphore);
	/* semaphore unlocked */

	splx(spl_level);
	task_unlock(task);

	semaphore_dereference(semaphore);
	return KERN_SUCCESS;
}
Пример #4
0
Файл: devfs.c Проект: csko/yaosp
static int devfs_add_select_request( void* fs_cookie, void* _node, void* file_cookie, select_request_t* request ) {
    int error;
    devfs_node_t* node;

    node = ( devfs_node_t* )_node;

    if ( node->is_directory ) {
        return -EISDIR;
    }

    if ( node->calls->add_select_request == NULL ) {
        switch ( ( int )request->type ) {
            case SELECT_READ :
            case SELECT_WRITE :
                request->ready = true;
                semaphore_unlock( request->sync, 1 );
                break;
        }

        error = 0;
    } else {
        error = node->calls->add_select_request( node->cookie, file_cookie, request );
    }

    return error;
}
Пример #5
0
/*
 *	Routine:	semaphore_reference
 *
 *	Take out a reference on a semaphore.  This keeps the data structure
 *	in existence (but the semaphore may be deactivated).
 */
void
semaphore_reference(
	semaphore_t		semaphore)
{
	spl_t			spl_level;

	spl_level = splsched();
	semaphore_lock(semaphore);

	semaphore->ref_count++;

	semaphore_unlock(semaphore);
	splx(spl_level);
}
Пример #6
0
/*
 *	Routine:	semaphore_dereference
 *
 *	Release a reference on a semaphore.  If this is the last reference,
 *	the semaphore data structure is deallocated.
 */
void
semaphore_dereference(
	semaphore_t		semaphore)
{
	int			ref_count;
	spl_t			spl_level;

	if (semaphore != NULL) {
	    spl_level = splsched();
	    semaphore_lock(semaphore);

	    ref_count = --(semaphore->ref_count);

	    semaphore_unlock(semaphore);
	    splx(spl_level);

	    if (ref_count == 0) {
			assert(wait_queue_empty(&semaphore->wait_queue));
			zfree(semaphore_zone, semaphore);
	    }
	}
}
Пример #7
0
/*
 *	Routine:	semaphore_destroy_internal
 *
 *	Disassociate a semaphore from its owning task, mark it inactive,
 *	and set any waiting threads running with THREAD_RESTART.
 *
 *	Conditions:
 *			task is locked
 *			semaphore is locked
 *			semaphore is owned by the specified task
 *	Returns:
 *			with semaphore unlocked
 */
static void
semaphore_destroy_internal(
	task_t			task,
	semaphore_t		semaphore)
{
	int			old_count;

	/* unlink semaphore from owning task */
	assert(semaphore->owner == task);
	remqueue((queue_entry_t) semaphore);
	semaphore->owner = TASK_NULL;
	task->semaphores_owned--;

	/*
	 *  Deactivate semaphore
	 */
	assert(semaphore->active);
	semaphore->active = FALSE;

	/*
	 *  Wakeup blocked threads  
	 */
	old_count = semaphore->count;
	semaphore->count = 0;

	if (old_count < 0) {
		waitq_wakeup64_all_locked(&semaphore->waitq,
					  SEMAPHORE_EVENT,
					  THREAD_RESTART, NULL,
					  WAITQ_ALL_PRIORITIES,
					  WAITQ_UNLOCK);
		/* waitq/semaphore is unlocked */
	} else {
		semaphore_unlock(semaphore);
	}
}
Пример #8
0
/*
 *	Routine:	semaphore_wait_internal
 *
 *		Decrements the semaphore count by one.  If the count is
 *		negative after the decrement, the calling thread blocks
 *		(possibly at a continuation and/or with a timeout).
 *
 *	Assumptions:
 *		The reference
 *		A reference is held on the signal semaphore.
 */
kern_return_t
semaphore_wait_internal(
	semaphore_t		wait_semaphore,
	semaphore_t		signal_semaphore,
	mach_timespec_t		*wait_timep,
	void 			(*caller_cont)(kern_return_t))
{
	boolean_t			nonblocking;
	int					wait_result;
	spl_t				spl_level;
	kern_return_t		kr = KERN_ALREADY_WAITING;

	spl_level = splsched();
	semaphore_lock(wait_semaphore);

	/*
	 * Decide if we really have to wait.
	 */
	nonblocking = (wait_timep != (mach_timespec_t *)0) ?
		      (wait_timep->tv_sec == 0 && wait_timep->tv_nsec == 0) :
		      FALSE;

	if (!wait_semaphore->active) {
		kr = KERN_TERMINATED;
	} else if (wait_semaphore->count > 0) {
		wait_semaphore->count--;
		kr = KERN_SUCCESS;
	} else if (nonblocking) {
		kr = KERN_OPERATION_TIMED_OUT;
	} else {
		uint64_t	abstime;
		thread_t	self = current_thread();

		wait_semaphore->count = -1;  /* we don't keep an actual count */
		thread_lock(self);
		
		/*
		 * If it is a timed wait, calculate the wake up deadline.
		 */
		if (wait_timep != (mach_timespec_t *)0) {
			nanoseconds_to_absolutetime((uint64_t)wait_timep->tv_sec *
											NSEC_PER_SEC + wait_timep->tv_nsec, &abstime);
			clock_absolutetime_interval_to_deadline(abstime, &abstime);
		}
		else
			abstime = 0;

		(void)wait_queue_assert_wait64_locked(
					&wait_semaphore->wait_queue,
					SEMAPHORE_EVENT,
					THREAD_ABORTSAFE, abstime,
					self);
		thread_unlock(self);
	}
	semaphore_unlock(wait_semaphore);
	splx(spl_level);

	/*
	 * wait_semaphore is unlocked so we are free to go ahead and
	 * signal the signal_semaphore (if one was provided).
	 */
	if (signal_semaphore != SEMAPHORE_NULL) {
		kern_return_t signal_kr;

		/*
		 * lock the signal semaphore reference we got and signal it.
		 * This will NOT block (we cannot block after having asserted
		 * our intention to wait above).
		 */
		signal_kr = semaphore_signal_internal(signal_semaphore,
						      THREAD_NULL,
						      SEMAPHORE_SIGNAL_PREPOST);

		if (signal_kr == KERN_NOT_WAITING)
			signal_kr = KERN_SUCCESS;
		else if (signal_kr == KERN_TERMINATED) {
			/* 
			 * Uh!Oh!  The semaphore we were to signal died.
			 * We have to get ourselves out of the wait in
			 * case we get stuck here forever (it is assumed
			 * that the semaphore we were posting is gating
			 * the decision by someone else to post the
			 * semaphore we are waiting on).  People will
			 * discover the other dead semaphore soon enough.
			 * If we got out of the wait cleanly (someone
			 * already posted a wakeup to us) then return that
			 * (most important) result.  Otherwise,
			 * return the KERN_TERMINATED status.
			 */
			thread_t self = current_thread();

			clear_wait(self, THREAD_INTERRUPTED);
			kr = semaphore_convert_wait_result(self->wait_result);
			if (kr == KERN_ABORTED)
				kr = KERN_TERMINATED;
		}
	}
	
	/*
	 * If we had an error, or we didn't really need to wait we can
	 * return now that we have signalled the signal semaphore.
	 */
	if (kr != KERN_ALREADY_WAITING)
		return kr;

	/*
	 * Now, we can block.  If the caller supplied a continuation
	 * pointer of his own for after the block, block with the
	 * appropriate semaphore continuation.  Thiswill gather the
	 * semaphore results, release references on the semaphore(s),
	 * and then call the caller's continuation.
	 */
	if (caller_cont) {
		thread_t self = current_thread();

		self->sth_continuation = caller_cont;
		self->sth_waitsemaphore = wait_semaphore;
		self->sth_signalsemaphore = signal_semaphore;
		wait_result = thread_block((thread_continue_t)semaphore_wait_continue);
	}
	else {
		wait_result = thread_block(THREAD_CONTINUE_NULL);
	}

	return (semaphore_convert_wait_result(wait_result));
}
Пример #9
0
/*
 *	Routine:	semaphore_signal_internal
 *
 *		Signals the semaphore as direct.  
 *	Assumptions:
 *		Semaphore is locked.
 */
kern_return_t
semaphore_signal_internal(
	semaphore_t		semaphore,
	thread_t		thread,
	int				options)
{
	kern_return_t kr;
	spl_t  spl_level;

	spl_level = splsched();
	semaphore_lock(semaphore);

	if (!semaphore->active) {
		semaphore_unlock(semaphore);
		splx(spl_level);
		return KERN_TERMINATED;
	}

	if (thread != THREAD_NULL) {
		if (semaphore->count < 0) {
			kr = wait_queue_wakeup64_thread_locked(
			        	&semaphore->wait_queue,
					SEMAPHORE_EVENT,
					thread,
					THREAD_AWAKENED,
					TRUE);  /* unlock? */
		} else {
			semaphore_unlock(semaphore);
			kr = KERN_NOT_WAITING;
		}
		splx(spl_level);
		return kr;
	} 

	if (options & SEMAPHORE_SIGNAL_ALL) {
		int old_count = semaphore->count;

		if (old_count < 0) {
			semaphore->count = 0;  /* always reset */
			kr = wait_queue_wakeup64_all_locked(
					&semaphore->wait_queue,
					SEMAPHORE_EVENT,
					THREAD_AWAKENED,
					TRUE);		/* unlock? */
		} else {
			if (options & SEMAPHORE_SIGNAL_PREPOST)
				semaphore->count++;
			semaphore_unlock(semaphore);
			kr = KERN_SUCCESS;
		}
		splx(spl_level);
		return kr;
	}
	
	if (semaphore->count < 0) {
		if (wait_queue_wakeup64_one_locked(
					&semaphore->wait_queue,
					SEMAPHORE_EVENT,
					THREAD_AWAKENED,
					FALSE) == KERN_SUCCESS) {
			semaphore_unlock(semaphore);
			splx(spl_level);
			return KERN_SUCCESS;
		} else
			semaphore->count = 0;  /* all waiters gone */
	}

	if (options & SEMAPHORE_SIGNAL_PREPOST) {
		semaphore->count++;
	}

	semaphore_unlock(semaphore);
	splx(spl_level);
	return KERN_NOT_WAITING;
}
Пример #10
0
/*
 *	Routine:	semaphore_signal_internal
 *
 *		Signals the semaphore as direct.  
 *	Assumptions:
 *		Semaphore is locked.
 */
kern_return_t
semaphore_signal_internal(
	semaphore_t		semaphore,
	thread_t		thread,
	int				options)
{
	kern_return_t kr;
	spl_t  spl_level;

	spl_level = splsched();
	semaphore_lock(semaphore);

	if (!semaphore->active) {
		semaphore_unlock(semaphore);
		splx(spl_level);
		return KERN_TERMINATED;
	}

	if (thread != THREAD_NULL) {
		if (semaphore->count < 0) {
			kr = waitq_wakeup64_thread_locked(
					&semaphore->waitq,
					SEMAPHORE_EVENT,
					thread,
					THREAD_AWAKENED,
					WAITQ_UNLOCK);
			/* waitq/semaphore is unlocked */
		} else {
			kr = KERN_NOT_WAITING;
			semaphore_unlock(semaphore);
		}
		splx(spl_level);
		return kr;
	} 

	if (options & SEMAPHORE_SIGNAL_ALL) {
		int old_count = semaphore->count;

		kr = KERN_NOT_WAITING;
		if (old_count < 0) {
			semaphore->count = 0;  /* always reset */
			kr = waitq_wakeup64_all_locked(
					&semaphore->waitq,
					SEMAPHORE_EVENT,
					THREAD_AWAKENED, NULL,
					WAITQ_ALL_PRIORITIES,
					WAITQ_UNLOCK);
			/* waitq / semaphore is unlocked */
		} else {
			if (options & SEMAPHORE_SIGNAL_PREPOST)
				semaphore->count++;
			kr = KERN_SUCCESS;
			semaphore_unlock(semaphore);
		}
		splx(spl_level);
		return kr;
	}
	
	if (semaphore->count < 0) {
		kr = waitq_wakeup64_one_locked(
					&semaphore->waitq,
					SEMAPHORE_EVENT,
					THREAD_AWAKENED, NULL,
					WAITQ_ALL_PRIORITIES,
					WAITQ_KEEP_LOCKED);
		if (kr == KERN_SUCCESS) {
			semaphore_unlock(semaphore);
			splx(spl_level);
			return KERN_SUCCESS;
		} else {
			semaphore->count = 0;  /* all waiters gone */
		}
	}

	if (options & SEMAPHORE_SIGNAL_PREPOST) {
		semaphore->count++;
	}

	semaphore_unlock(semaphore);
	splx(spl_level);
	return KERN_NOT_WAITING;
}