/* doc: <routine name="rt_processor_request_group_stack_remove" return_type="void" export="shared"> doc: <summary> Remove the last 'count' elements from the group stack and free any resources. Note: This feature also performs an unlock operation. </summary> doc: <param name="self" type="struct rt_processor*"> The processor that owns the group stack. Must not be NULL. </param> doc: <param name="count" type="size_t"> The number of elements to be removed. </param> doc: <thread_safety> Not safe. </thread_safety> doc: <synchronization> None. </synchronization> doc: </routine> */ rt_shared void rt_processor_request_group_stack_remove (struct rt_processor* self, size_t count) { struct rt_request_group* l_last = NULL; size_t i; REQUIRE ("self_not_null", self); REQUIRE ("enough_elements", request_group_stack_t_count (&self->request_group_stack) >= count); for (i = 0; i < count; ++i) { l_last = rt_processor_request_group_stack_last (self); rt_request_group_unlock (l_last, EIF_FALSE); rt_request_group_deinit (l_last); request_group_stack_t_remove_last (&self->request_group_stack); } }
/* doc: <routine name="rt_request_group_wait" return_type="int" export="shared"> doc: <summary> doc: Release all locks and wait for a change notification from any processor in the group. doc: This feature is usually called after a wait condition fails. doc: It can only be called when the request group is locked. doc: Note: The wait() operation is blocking! </summary> doc: <param name="self" type="struct rt_request_group*"> The request group struct. Must not be NULL. </param> doc: <return> T_OK on success. T_NO_MORE_MEMORY if memory allocation fails, in which case the request group remains locked. </return> doc: <thread_safety> Not safe. </thread_safety> doc: <synchronization> None. </synchronization> doc: <fixme> Instead of unlocking normally after the wait condition, we could have a special 'unlock-after-wait-condition-failure'. doc: That way we can avoid sending unnecessary notifications after the evaluation of a wait condition. </fixme> doc: </routine> */ rt_shared int rt_request_group_wait (struct rt_request_group* self) { size_t i, l_count = rt_request_group_count (self); struct rt_processor* l_client = self->client; int error = T_OK; REQUIRE ("self_not_null", self); REQUIRE ("sorted", self->is_sorted); REQUIRE ("locked", self->is_locked); /* Register the current client with the suppliers, such that we * can later get a notification if a wait condition may have changed. */ for (i = 0; i < l_count; ++i) { struct rt_private_queue* l_queue = rt_request_group_item (self, i); /* We only register on queues which are currently synchronized. * Those are the ones that have executed a query during the wait * condition, and thus the only ones that matter. * Moreover, because the suppliers are currently synchronized, we * know that they cannot access their notification queue at the * moment, so we can safely modify the list from this thread. */ if (rt_private_queue_is_synchronized (l_queue)) { error = rt_private_queue_register_wait (l_queue, l_client); /* We bail out if we can't register for a wait condition change. */ if (error != T_OK) { return error; } } } /* Inform the GC that we're about to be blocked. */ EIF_ENTER_C; /* Before we unlock the synchronized queues, we have to acquire the * lock to our condition variable mutex. This has to happen before * rt_request_group_unlock to avoid missed signals. */ RT_TRACE (eif_pthread_mutex_lock (l_client->wait_condition_mutex)); /* Release the locks on the suppliers. After this statement they can * execute calls from other processors and signal back a wait condition * change. If we wouldn't hold the lock acquired in the previous step, * we might miss those signals and thus remain stuck in a wait condition * forever. */ rt_request_group_unlock (self); /* Now we perform the blocking wait on our condition. * This also releases the mutex, such that our suppliers may send signals to it. * Note: Usually these wait operations are performed inside a loop that checks whether * the wait condition became true. Our loop is compiler-generated however, * that's why we don't see it here. */ RT_TRACE (eif_pthread_cond_wait (l_client->wait_condition, l_client->wait_condition_mutex)); /* After the wakeup signal, we can release the mutex. * We're not interested in any further signals, as we re-register anyway if the * wait condition fails again. */ RT_TRACE (eif_pthread_mutex_unlock (l_client->wait_condition_mutex)); /* Synchronize with the GC again. */ EIF_EXIT_C; RTGC; /* Note: We do not clean up the registrations here, because it would involve * unnecessary locking and a risk of deadlocks. Instead, the suppliers delete * our registration during notification, and the GC will clean up any leftover registrations. */ ENSURE ("not_locked", !self->is_locked); return error; }