示例#1
0
/*
doc:	<routine name="rt_processor_subscribe_wait_condition" return_type="int" export="shared">
doc:		<summary> Register for a notification when the memory region handled by processor 'self' may have changed.
doc:			This is used to implement wait condition change signalling.
doc:			The registration is only valid for a single notification and will be deleted by 'self' afterwards.
doc:			Note: This feature is executed by the 'client' processor (i.e. thread), and can only be called when the
doc:			supplier 'self' is synchronized with the client. </summary>
doc:		<param name="self" type="struct rt_processor*"> The processor that will send the notification in the future. Must not be NULL. </param>
doc:		<param name="client" type="struct rt_processor*"> The processor interested in wait condition changes. Must not be NULL. </param>
doc:		<return> T_OK on success. T_NO_MORE_MEMORY in case of a memory allocation failure. </return>
doc:		<thread_safety> Not safe. </thread_safety>
doc:		<synchronization> Only call when 'self' is synchronized with 'client'. </synchronization>
doc:	</routine>
*/
rt_shared int rt_processor_subscribe_wait_condition (struct rt_processor* self, struct rt_processor* client)
{
#ifdef EIF_ASSERTIONS
	struct rt_private_queue* pq = NULL; /* For assertion checking. */
#endif
	REQUIRE ("self_not_null", self);
	REQUIRE ("client_not_null", client);
	REQUIRE ("queue_available", T_OK == rt_queue_cache_retrieve (&client->cache, self, &pq));
	REQUIRE ("synchronized", rt_private_queue_is_synchronized (pq));

	return subscriber_list_t_extend (&self->wait_condition_subscribers, client);
}
/*
doc:	<routine name="rt_request_group_wait" return_type="int" export="shared">
doc:		<summary>
doc:			Release all locks and wait for a change notification from any processor in the group.
doc:			This feature is usually called after a wait condition fails.
doc:			It can only be called when the request group is locked.
doc:			Note: The wait() operation is blocking! </summary>
doc:		<param name="self" type="struct rt_request_group*"> The request group struct. Must not be NULL. </param>
doc:		<return> T_OK on success. T_NO_MORE_MEMORY if memory allocation fails, in which case the request group remains locked. </return>
doc:		<thread_safety> Not safe. </thread_safety>
doc:		<synchronization> None. </synchronization>
doc:		<fixme> Instead of unlocking normally after the wait condition, we could have a special 'unlock-after-wait-condition-failure'.
doc:			That way we can avoid sending unnecessary notifications after the evaluation of a wait condition. </fixme>
doc:	</routine>
*/
rt_shared int rt_request_group_wait (struct rt_request_group* self)
{
    size_t i, l_count = rt_request_group_count (self);
    struct rt_processor* l_client = self->client;
    int error = T_OK;

    REQUIRE ("self_not_null", self);
    REQUIRE ("sorted", self->is_sorted);
    REQUIRE ("locked", self->is_locked);

    /* Register the current client with the suppliers, such that we
     * can later get a notification if a wait condition may have changed. */
    for (i = 0; i < l_count; ++i) {
        struct rt_private_queue* l_queue = rt_request_group_item (self, i);

        /* We only register on queues which are currently synchronized.
         * Those are the ones that have executed a query during the wait
         * condition, and thus the only ones that matter.
         * Moreover, because the suppliers are currently synchronized, we
         * know that they cannot access their notification queue at the
         * moment, so we can safely modify the list from this thread. */
        if (rt_private_queue_is_synchronized (l_queue)) {
            error = rt_private_queue_register_wait (l_queue, l_client);

            /* We bail out if we can't register for a wait condition change. */
            if (error != T_OK) {
                return error;
            }
        }
    }

    /* Inform the GC that we're about to be blocked. */
    EIF_ENTER_C;

    /* Before we unlock the synchronized queues, we have to acquire the
     * lock to our condition variable mutex. This has to happen before
     * rt_request_group_unlock to avoid missed signals. */
    RT_TRACE (eif_pthread_mutex_lock (l_client->wait_condition_mutex));

    /* Release the locks on the suppliers. After this statement they can
     * execute calls from other processors and signal back a wait condition
     * change. If we wouldn't hold the lock acquired in the previous step,
     * we might miss those signals and thus remain stuck in a wait condition
     * forever. */
    rt_request_group_unlock (self);

    /* Now we perform the blocking wait on our condition.
     * This also releases the mutex, such that our suppliers may send signals to it.
     * Note: Usually these wait operations are performed inside a loop that checks whether
     * the wait condition became true. Our loop is compiler-generated however,
     * that's why we don't see it here. */
    RT_TRACE (eif_pthread_cond_wait (l_client->wait_condition, l_client->wait_condition_mutex));

    /* After the wakeup signal, we can release the mutex.
     * We're not interested in any further signals, as we re-register anyway if the
     * wait condition fails again. */
    RT_TRACE (eif_pthread_mutex_unlock (l_client->wait_condition_mutex));

    /* Synchronize with the GC again. */
    EIF_EXIT_C;
    RTGC;

    /* Note: We do not clean up the registrations here, because it would involve
    * unnecessary locking and a risk of deadlocks. Instead, the suppliers delete
    * our registration during notification, and the GC will clean up any leftover registrations. */
    ENSURE ("not_locked", !self->is_locked);
    return error;
}