Example #1
0
/*
 * This API is highly deprecated in version 1 implementation base on OCF.
 * It is actually _not_ an asynchronous call. TODO fix in version 2.
 */
SaErrorT
saClmClusterNodeGetAsync(const SaClmHandleT *clmHandle,
                         SaInvocationT invocation,
                         SaClmNodeIdT nodeId,
                         SaClmClusterNodeT *clusterNode)
{
	int ret;
	__clm_handle_t *hd = GET_CLM_HANDLE(clmHandle);

	if (!hd){
		return SA_ERR_BAD_HANDLE;
	}

	if (!clusterNode) {
		cl_log(LOG_ERR, "Invalid parameter clusterNode <%p>"
		,	clusterNode);
		return SA_ERR_INVALID_PARAM;
	}
	if (!__ccm_data) {
		cl_log(LOG_ERR, "__ccm_data is NULL");
		return SA_ERR_INIT;
	}
	pthread_lock();
	if ((ret = retrieve_node_buffer(nodeId, clusterNode)) != SA_OK) {
		cl_log(LOG_ERR, "retrieve_node_buffer error [%d]", ret);
		pthread_unlock();
		return ret;
	}
	pthread_unlock();

	hd->callbacks.saClmClusterNodeGetCallback(invocation, clusterNode
	,	SA_OK);

	return SA_OK;
}
Example #2
0
int
pthread_join(pthread_t tid, void **status)
{
	pthread_thread_t	*joinee, *joiner;

	if ((joinee = tidtothread(tid)) == NULL_THREADPTR)
		return EINVAL;
	
	joiner = CURPTHREAD();

	assert_preemption_enabled();
	disable_preemption();

	pthread_lock(&(joinee->lock));
	if (joinee->flags & THREAD_DETACHED) {
		pthread_unlock(&(joinee->lock));
		enable_preemption();
		return EINVAL;
	}
	pthread_unlock(&(joinee->lock));
	enable_preemption();

	/*
	 * Use a mutex here. This avoids specialized handling in the cancel
	 * and signal code. It works becase the "dead" flag is independent,
	 * protected by a spinning mutex in the reaper code. 
	 */
	pthread_mutex_lock(&joinee->mutex);
	while (!joinee->dead) {
		/*
		 * join must be called with cancelation DEFERRED!
		 */
		pthread_testcancel();

		pthread_cond_wait(&joinee->cond, &joinee->mutex);
	}

	/*
	 * Not allowed to detach the target thread if this thread is canceled.
	 */
	pthread_testcancel();

	disable_preemption();
	if (status)
		*status = (void *) joinee->exitval;

	pthread_mutex_unlock(&joinee->mutex);
	pthread_destroy_internal(joinee);
	enable_preemption();

	return 0;
}
Example #3
0
/*
 * This is first routine called for every thread.
 */
void
pthread_start_thread(pthread_thread_t *pthread)
{
	/*
	 * Threads start with the current thread pointer not set yet.
	 */
	SETCURPTHREAD(pthread);

	/*
	 * All threads start with the schedlock locked.
	 */
	pthread_unlock(&pthread->schedlock);

	/*
	 * All threads start out with interrupts and preemptions blocked,
	 * which must be reset.
	 */
	enable_interrupts();
	enable_preemption();

	DPRINTF("(%d): P:%p, T:%d, F:%p A:%p\n", THISCPU,
		pthread, (int) pthread->tid, pthread->func, pthread->cookie);

	/*
	 * If the function returns a value, it is passed to pthread_exit.
	 */
	pthread_exit((*pthread->func)(pthread->cookie));

        /* NOTREACHED */
}
Example #4
0
static void
ccm_events(oc_ed_t event, void *cookie, size_t size, const void *data)
{
	pthread_lock();

	/* dereference old cache */
	if (__ccm_cookie)
		oc_ev_callback_done(__ccm_cookie);

	__ccm_cookie = cookie;
	__ccm_event = event;
	__ccm_data = (const oc_ev_membership_t *)data;

#if CLM_DEBUG
	cl_log(LOG_DEBUG, "__ccm_data = <0x%x>"
	,	(unsigned int)data);
#endif
	pthread_unlock();

	if (event == OC_EV_MS_EVICTED || event == OC_EV_MS_NOT_PRIMARY
	||	event == OC_EV_MS_PRIMARY_RESTORED) {
		/* We do not care about this info */
		return;
	}

	if (!data) {
		cl_log(LOG_ERR, "CCM event callback return NULL data");
		return;
	}

	/*
	 * Note: No need to worry about the buffer free problem, OCF
	 * callback mechanism did this for us.
	 */
}
Example #5
0
SaErrorT 
saClmClusterNodeGet(SaClmNodeIdT nodeId, SaTimeT timeout,
                    SaClmClusterNodeT *clusterNode)
{
	int i;
	SaErrorT ret;

	if (!clusterNode) {
		cl_log(LOG_ERR, "Invalid parameter clusterNode <%p>"
		,	clusterNode);
		return SA_ERR_INVALID_PARAM;
	}
	for (i = 0; i < timeout; i++) {
		if (__ccm_data){
			break;
		}
		sleep(1);
	}
	if (i == timeout){
		return SA_ERR_TIMEOUT;
	}

	pthread_lock();
	ret = retrieve_node_buffer(nodeId, clusterNode);
	pthread_unlock();
	return ret;
}
Example #6
0
/*
 * Priority inheritance entrypoint. Donate CPU time to a thread being
 * waited for.
 */
void
pthread_sched_thread_wait(pthread_thread_t *waiting_on,
			  queue_head_t *queue, pthread_lock_t *plock)
{
	pthread_thread_t	*pthread = CURPTHREAD();
	int			enabled;

	enabled = save_disable_interrupts();
	
	if (queue)
		queue_enter(queue, pthread, pthread_thread_t *, chain);

	/*
	 * No thread locking, but need to release the provided lock
	 * in case it was built with locks enabled.
	 */
	if (plock)
		pthread_unlock(plock);
	
/*	cpuprintf("pthread_sched_thread_wait: h:0x%x(%d) p:0x%x(%d)\n",
		  (int) waiting_on, waiting_on->tid,
		  (int) pthread, pthread->tid); */

	/*
	 * Try for the donation. If it fails, fall back to a simple
	 * reschedule. Note that the pthread lock is still locked.
	 */
	if (!pthread_sched_thread_donate(waiting_on, WAKEUP_NEVER, 0))
		pthread_sched_reschedule(RESCHED_BLOCK, &pthread->lock);

	restore_interrupt_enable(enabled);
}
Example #7
0
SaErrorT 
saClmClusterTrackStart(const SaClmHandleT *clmHandle,
                       SaUint8T trackFlags,
                       SaClmClusterNotificationT *notificationBuffer,
                       SaUint32T numberOfItems)
{
	__clm_handle_t *hd = GET_CLM_HANDLE(clmHandle);

	if (!hd){
		return SA_ERR_BAD_HANDLE;
	}

	hd->trackflags = trackFlags;
	hd->itemnum = numberOfItems;
	hd->nbuf = notificationBuffer;

	if (trackFlags & SA_TRACK_CURRENT) {
		const oc_ev_membership_t *oc;
		SaUint32T itemnum;
		
		/* Clear SA_TRACK_CURRENT, it's no use since now. */
		hd->trackflags &= ~SA_TRACK_CURRENT;

		if (__ccm_data == NULL) {
			return SA_ERR_LIBRARY;
		}
		
		oc = __ccm_data;
		itemnum = oc->m_n_member;
		if (itemnum > numberOfItems) {
			hd->callbacks.saClmClusterTrackCallback(hd->nbuf
			,	hd->itemnum, oc->m_n_member, oc->m_instance
			,	SA_ERR_NO_SPACE);
			return SA_OK;
		}
		pthread_lock();
		retrieve_current_buffer(hd);
		pthread_unlock();
		hd->callbacks.saClmClusterTrackCallback(hd->nbuf, itemnum
		,	oc->m_n_member, oc->m_instance, SA_OK);
		return SA_OK;
	}

	return SA_OK;
}
Example #8
0
/*
 * Generic switch code. Find a new thread to run and switch to it.
 */
int
pthread_sched_reschedule(resched_flags_t reason, pthread_lock_t *plock)
{
	int			enabled, rc;

	enabled = save_disable_interrupts();

	/*
	 * No thread locking, but need to release the provided lock
	 * in case it was built with locks enabled.
	 */
	if (plock)
		pthread_unlock(plock);
	
	rc = pthread_sched_dispatch(reason);

	restore_interrupt_enable(enabled);
	return rc;
}
Example #9
0
/*
 * Remove a Named Semaphore
 */
int
oskit_sem_unlink(const char *name)
{
	int i;
	sem_t *sem;

	/* Lock the semaphore name space */
	pthread_mutex_lock(&semn_space.semn_lock);

	/* Unlink the specified queue from the queue name space */
	for (i = 0 ; i < semn_space.semn_arraylen ; i++) {
		if (semn_space.semn_array[i]
		    && (strcmp(semn_space.semn_array[i]->sem_name, name)
			== 0)) {
			/* Found */
			sem = semn_space.semn_array[i];
			/* Lock the semaphore */
			pthread_lock(&sem->sem_lock);
			if (sem->sem_refcount != 0) {
				sem->sem_flag |= SEM_UNLINK_FLAG;
				pthread_unlock(&sem->sem_lock);
			} else {
				sem_remove(sem);
			}
			/*
			 * Unlink the association between the semaphore
			 * name space to the semaphore.
			 */
			semn_space.semn_array[i] = NULL;
			pthread_mutex_unlock(&semn_space.semn_lock);
			return 0;
		}
	}

	/* Unlock the queue name space */
	pthread_mutex_unlock(&semn_space.semn_lock);
	return ENOENT;
}
Example #10
0
SaErrorT
saClmDispatch(const SaClmHandleT *clmHandle, 
              SaDispatchFlagsT dispatchFlags)
{
	int ret;
	const oc_ev_membership_t *oc;
	uint itemnum;
	__clm_handle_t *hd = GET_CLM_HANDLE(clmHandle);

	if (!hd){
		return SA_ERR_BAD_HANDLE;
	}

	if ((ret = oc_ev_handle_event(hd->ev_token)) != 0) {
		if (ret == EINVAL){
			return SA_ERR_BAD_HANDLE;
		}

		/* else we must be evicted */
	}

	/* We did not lock for read here because other writers will set it
	 * with the same value (if there really exist some). Otherwise we
	 * need to lock here.
	 */
	if (__ccm_event == OC_EV_MS_EVICTED) {
		cl_log(LOG_WARNING
		,	"This node is evicted from the current partition!");
		return SA_ERR_LIBRARY;
	}
	if (__ccm_event == OC_EV_MS_NOT_PRIMARY
	||	__ccm_event == OC_EV_MS_PRIMARY_RESTORED) {
		cl_log(LOG_DEBUG, "Received not interested event [%d]"
		,	__ccm_event);
		return SA_OK;
	}
	if (!__ccm_data){
		return SA_ERR_INIT;
	}

	oc = __ccm_data;

	if(CLM_TRACK_STOP == hd->trackflags){
		return SA_OK;
	}

	/* SA_TRACK_CURRENT is cleared in saClmClusterTrackStart, hence we 
	 * needn't to deal with it now*/
	if (hd->trackflags & SA_TRACK_CHANGES) {
		itemnum = oc->m_n_member + oc->m_n_out;
		if (itemnum > hd->itemnum) {
			hd->callbacks.saClmClusterTrackCallback(hd->nbuf
			,	hd->itemnum, oc->m_n_member, oc->m_instance
			,	SA_ERR_NO_SPACE);
			return SA_OK;
		}
		pthread_lock();
		retrieve_changes_buffer(hd);
		pthread_unlock();
		hd->callbacks.saClmClusterTrackCallback(hd->nbuf, itemnum
		,	oc->m_n_member, oc->m_instance, SA_OK);
	} else if (hd->trackflags & SA_TRACK_CHANGES_ONLY) {
		itemnum = oc->m_n_in + oc->m_n_out;
		if (itemnum > hd->itemnum) {
			hd->callbacks.saClmClusterTrackCallback(hd->nbuf
			,	hd->itemnum, oc->m_n_member, oc->m_instance
			,	SA_ERR_NO_SPACE);
			return SA_OK;
		}
		pthread_lock();
		retrieve_changes_only_buffer(hd);
		pthread_unlock();
		hd->callbacks.saClmClusterTrackCallback(hd->nbuf, itemnum
		,	oc->m_n_member, oc->m_instance, SA_OK);

	} else {
		assert(0);
	}
	/* unlock */

	return SA_OK;
}