Example #1
0
/*
doc:	<routine name="rt_processor_new_private_queue" return_type="int" export="shared">
doc:		<summary> Create a new private queue whose supplier is this processor. </summary>
doc:		<param name="self" type="struct rt_processor*"> The processor object. Must not be NULL. </param>
doc:		<param name="result" type="struct rt_private_queue**"> A pointer to the location where the result shall be stored. Must not be NULL. </param>
doc:		<return> T_OK on success. T_NO_MORE_MEMORY or a mutex creation error code when a resource could not be allocated. </return>
doc:		<thread_safety> Safe. </thread_safety>
doc:		<synchronization> None required. </synchronization>
doc:	</routine>
*/
int rt_processor_new_private_queue (struct rt_processor* self, struct rt_private_queue** result)
{
	int error = T_OK;
	struct rt_private_queue* l_queue = NULL;

	REQUIRE ("self_not_nul", self);
	REQUIRE ("result_not_null", result);

	l_queue = (struct rt_private_queue*) malloc (sizeof (struct rt_private_queue));

	if (l_queue) {
		error = rt_private_queue_init (l_queue, self);

		if (T_OK == error) {

			RT_TRACE (eif_pthread_mutex_lock (self->generated_private_queues_mutex));

			error = private_queue_list_t_extend (&self->generated_private_queues, l_queue);

			RT_TRACE (eif_pthread_mutex_unlock (self->generated_private_queues_mutex));
		}

		if (T_OK == error) {
			*result = l_queue;
		} else {
				/* An error occured. Free allocated resources and return. */
			rt_private_queue_deinit (l_queue);
			free (l_queue);
		}

	} else {
		error = T_NO_MORE_MEMORY;
	}
	return error;
}
Example #2
0
/*
doc:	<routine name="rt_processor_publish_wait_condition" return_type="void" export="shared">
doc:		<summary> Notify all processors in the 'self->wait_condition_subscribers' vector that a wait condition has changed. </summary>
doc:		<param name="self" type="struct rt_processor*"> The processor with the subscribers list. Must not be NULL. </param>
doc:		<thread_safety> Not safe. </thread_safety>
doc:		<synchronization> The feature rt_processor_subscribe_wait_condition must only be called when the thread executing 'self' is synchronized with a client.
doc:			This ensures that rt_publish_wait_condition cannot be executed at the same time. </synchronization>
doc:	</routine>
*/
rt_shared void rt_processor_publish_wait_condition (struct rt_processor* self)
{
	struct subscriber_list_t* subscribers = NULL;

	REQUIRE ("self_not_null", self);

	subscribers = &self->wait_condition_subscribers;

	while (0 != subscriber_list_t_count(subscribers)) {

		struct rt_processor* item = subscriber_list_t_last (subscribers);
		subscriber_list_t_remove_last (subscribers);

		if (item) {
				/* Lock the registered processor's condition variable mutex. */
			RT_TRACE (eif_pthread_mutex_lock (item->wait_condition_mutex));

				/* Send a signal. */
			RT_TRACE (eif_pthread_cond_signal (item->wait_condition));

				/* Release the lock. */
			RT_TRACE (eif_pthread_mutex_unlock (item->wait_condition_mutex));
		}
	}
}
/*
doc:	<routine name="rt_request_group_lock" return_type="void" export="shared">
doc:		<summary> Lock all processors in the request group. </summary>
doc:		<param name="self" type="struct rt_request_group*"> The request group struct. Must not be NULL. </param>
doc:		<thread_safety> Not safe. </thread_safety>
doc:		<synchronization> None. </synchronization>
doc:	</routine>
*/
rt_shared void rt_request_group_lock (struct rt_request_group* self)
{
    size_t i, l_count = rt_request_group_count (self);

    REQUIRE ("self_not_null", self);
    REQUIRE ("not_locked", !self->is_locked);

    /* We first need to sort the array based on the ID of the processor.
     * At a global scale, this avoids deadlocks and enables the
     * "atomic locking" guarantee of multiple arguments. */
    if (!self->is_sorted) {

        /* The array is usually very small (1 to 5 items), so having
         * lightweight bubblesort algorithm is probably most efficient. */
        bubble_sort (self->area, self->count);
        self->is_sorted = 1;
    }

    /* Temporarily lock the queue-of-queue of all suppliers. */
    for (i = 0; i < l_count; ++i) {
        struct rt_processor* l_supplier = rt_request_group_item (self, i)->supplier;
        RT_TRACE (eif_pthread_mutex_lock (l_supplier->queue_of_queues_mutex));
    }

    /* Add all private queues to the queue-of-queues */
    for (i = 0; i < l_count; ++i) {
        rt_private_queue_lock (rt_request_group_item (self, i), self->client);
    }

    /* Release the queue-of-queue locks. */
    for (i = 0; i < l_count; ++i) {
        struct rt_processor* l_supplier = rt_request_group_item (self, i)->supplier;
        RT_TRACE (eif_pthread_mutex_unlock (l_supplier->queue_of_queues_mutex));
    }

    /* Synchronize with all passive processors. */
    for (i=0; i < l_count; ++i) {
        struct rt_private_queue* l_queue = rt_request_group_item (self, i);
        if (l_queue->supplier->is_passive_region) {
            rt_private_queue_synchronize (l_queue, self->client);
        }

    }

    self->is_locked = 1;

    ENSURE ("sorted", self->is_sorted);
    ENSURE ("locked", self->is_locked);
}
int print_err_msg (FILE *err, char *StrFmt, ...)
{
	va_list ap;
	int r;
	FILE *exception_saved;
	char saved_cwd [PATH_MAX + 1];

	eif_show_console ();

		/* Write error to `err'. */
	va_start (ap, StrFmt);
	r = vfprintf (err, StrFmt, ap);
	va_end (ap);

		/* Now try to write error into `exception_trace.log' file */
#ifdef EIF_THREADS
		/* FIXME: This is not thread safe at all. */
	if (!eif_exception_trace_mutex) {
		RT_TRACE(eif_pthread_mutex_create(&eif_exception_trace_mutex));
	}

	if (eif_exception_trace_mutex) {
		RT_TRACE(eif_pthread_mutex_lock(eif_exception_trace_mutex));
	}
#endif

	getcwd(saved_cwd, PATH_MAX);
	chdir (starting_working_directory);

		/* If we are not allowed to write the exception, we don't do it */
	if ((exception_saved = fopen( "exception_trace.log", "at" )) != NULL) {
		va_start (ap, StrFmt);
		r = vfprintf (exception_saved, StrFmt, ap);
		va_end (ap);
		fclose (exception_saved);
	}
	chdir (saved_cwd);

#ifdef EIF_THREADS
	if (eif_exception_trace_mutex) {
		RT_TRACE(eif_pthread_mutex_unlock(eif_exception_trace_mutex));
	}
#endif
	return r;
}
/*
doc:	<routine name="rt_request_group_wait" return_type="int" export="shared">
doc:		<summary>
doc:			Release all locks and wait for a change notification from any processor in the group.
doc:			This feature is usually called after a wait condition fails.
doc:			It can only be called when the request group is locked.
doc:			Note: The wait() operation is blocking! </summary>
doc:		<param name="self" type="struct rt_request_group*"> The request group struct. Must not be NULL. </param>
doc:		<return> T_OK on success. T_NO_MORE_MEMORY if memory allocation fails, in which case the request group remains locked. </return>
doc:		<thread_safety> Not safe. </thread_safety>
doc:		<synchronization> None. </synchronization>
doc:		<fixme> Instead of unlocking normally after the wait condition, we could have a special 'unlock-after-wait-condition-failure'.
doc:			That way we can avoid sending unnecessary notifications after the evaluation of a wait condition. </fixme>
doc:	</routine>
*/
rt_shared int rt_request_group_wait (struct rt_request_group* self)
{
    size_t i, l_count = rt_request_group_count (self);
    struct rt_processor* l_client = self->client;
    int error = T_OK;

    REQUIRE ("self_not_null", self);
    REQUIRE ("sorted", self->is_sorted);
    REQUIRE ("locked", self->is_locked);

    /* Register the current client with the suppliers, such that we
     * can later get a notification if a wait condition may have changed. */
    for (i = 0; i < l_count; ++i) {
        struct rt_private_queue* l_queue = rt_request_group_item (self, i);

        /* We only register on queues which are currently synchronized.
         * Those are the ones that have executed a query during the wait
         * condition, and thus the only ones that matter.
         * Moreover, because the suppliers are currently synchronized, we
         * know that they cannot access their notification queue at the
         * moment, so we can safely modify the list from this thread. */
        if (rt_private_queue_is_synchronized (l_queue)) {
            error = rt_private_queue_register_wait (l_queue, l_client);

            /* We bail out if we can't register for a wait condition change. */
            if (error != T_OK) {
                return error;
            }
        }
    }

    /* Inform the GC that we're about to be blocked. */
    EIF_ENTER_C;

    /* Before we unlock the synchronized queues, we have to acquire the
     * lock to our condition variable mutex. This has to happen before
     * rt_request_group_unlock to avoid missed signals. */
    RT_TRACE (eif_pthread_mutex_lock (l_client->wait_condition_mutex));

    /* Release the locks on the suppliers. After this statement they can
     * execute calls from other processors and signal back a wait condition
     * change. If we wouldn't hold the lock acquired in the previous step,
     * we might miss those signals and thus remain stuck in a wait condition
     * forever. */
    rt_request_group_unlock (self);

    /* Now we perform the blocking wait on our condition.
     * This also releases the mutex, such that our suppliers may send signals to it.
     * Note: Usually these wait operations are performed inside a loop that checks whether
     * the wait condition became true. Our loop is compiler-generated however,
     * that's why we don't see it here. */
    RT_TRACE (eif_pthread_cond_wait (l_client->wait_condition, l_client->wait_condition_mutex));

    /* After the wakeup signal, we can release the mutex.
     * We're not interested in any further signals, as we re-register anyway if the
     * wait condition fails again. */
    RT_TRACE (eif_pthread_mutex_unlock (l_client->wait_condition_mutex));

    /* Synchronize with the GC again. */
    EIF_EXIT_C;
    RTGC;

    /* Note: We do not clean up the registrations here, because it would involve
    * unnecessary locking and a risk of deadlocks. Instead, the suppliers delete
    * our registration during notification, and the GC will clean up any leftover registrations. */
    ENSURE ("not_locked", !self->is_locked);
    return error;
}