void sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue) { GrayQueueSection *section; HEAVY_STAT (stat_gray_queue_section_alloc ++); if (queue->alloc_prepare_func) queue->alloc_prepare_func (queue); if (queue->free_list) { /* Use the previously allocated queue sections if possible */ section = queue->free_list; queue->free_list = section->next; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING); } else { /* Allocate a new section */ section = (GrayQueueSection *)sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE); STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING); } section->size = SGEN_GRAY_QUEUE_SECTION_SIZE; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED); /* Link it with the others */ section->next = queue->first; queue->first = section; queue->cursor = section->entries - 1; }
void sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section, gboolean is_parallel) { STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED); if (queue->first) queue->first->size = queue->cursor - queue->first->entries + 1; section->next = queue->first; section->prev = NULL; if (queue->first) queue->first->prev = section; else queue->last = section; queue->first = section; queue->cursor = queue->first->entries + queue->first->size - 1; #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE if (queue->enqueue_check_func) { int i; for (i = 0; i < section->size; ++i) queue->enqueue_check_func (section->entries [i].obj); } #endif if (is_parallel) { mono_memory_write_barrier (); mono_atomic_inc_i32 (&queue->num_sections); } else { queue->num_sections++; } }
GrayQueueEntry sgen_gray_object_dequeue (SgenGrayQueue *queue) { GrayQueueEntry entry; HEAVY_STAT (stat_gray_queue_dequeue_slow_path ++); if (sgen_gray_object_queue_is_empty (queue)) { entry.obj = NULL; return entry; } STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED); SGEN_ASSERT (9, queue->cursor >= GRAY_FIRST_CURSOR_POSITION (queue->first), "gray queue %p underflow", queue); entry = *queue->cursor--; #ifdef SGEN_HEAVY_BINARY_PROTOCOL binary_protocol_gray_dequeue (queue, queue->cursor + 1, entry.obj); #endif if (G_UNLIKELY (queue->cursor < GRAY_FIRST_CURSOR_POSITION (queue->first))) { GrayQueueSection *section = queue->first; queue->first = section->next; section->next = queue->free_list; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FREE_LIST); queue->free_list = section; queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL; } return entry; }
GrayQueueSection* sgen_gray_object_dequeue_section (SgenGrayQueue *queue) { GrayQueueSection *section; if (!queue->first) return NULL; /* We never steal from this queue */ queue->num_sections--; section = queue->first; queue->first = section->next; if (queue->first) queue->first->prev = NULL; else queue->last = NULL; section->next = NULL; section->size = queue->cursor - section->entries + 1; queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING); return section; }
void sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue, gboolean is_parallel) { GrayQueueSection *section; if (queue->free_list) { /* Use the previously allocated queue sections if possible */ section = queue->free_list; queue->free_list = section->next; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING); } else { HEAVY_STAT (stat_gray_queue_section_alloc ++); /* Allocate a new section */ section = (GrayQueueSection *)sgen_alloc_internal (INTERNAL_MEM_GRAY_QUEUE); STATE_SET (section, GRAY_QUEUE_SECTION_STATE_FLOATING); } /* Section is empty */ section->size = 0; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED); /* Link it with the others */ section->next = queue->first; section->prev = NULL; if (queue->first) queue->first->prev = section; else queue->last = section; queue->first = section; queue->cursor = section->entries - 1; if (is_parallel) { mono_memory_write_barrier (); /* * FIXME * we could probably optimize the code to only rely on the write barrier * for synchronization with the stealer thread. Additionally we could also * do a write barrier once every other gray queue change, and request * to have a minimum of sections before stealing, to keep consistency. */ mono_atomic_inc_i32 (&queue->num_sections); } else { queue->num_sections++; } }
void sgen_gray_object_free_queue_section (GrayQueueSection *section) { HEAVY_STAT (stat_gray_queue_section_free ++); STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_FREED); sgen_free_internal (section, INTERNAL_MEM_GRAY_QUEUE); }
GrayQueueEntry sgen_gray_object_dequeue (SgenGrayQueue *queue, gboolean is_parallel) { GrayQueueEntry entry; HEAVY_STAT (stat_gray_queue_dequeue_slow_path ++); if (sgen_gray_object_queue_is_empty (queue)) { entry.obj = NULL; return entry; } STATE_ASSERT (queue->first, GRAY_QUEUE_SECTION_STATE_ENQUEUED); SGEN_ASSERT (9, queue->cursor >= GRAY_FIRST_CURSOR_POSITION (queue->first), "gray queue %p underflow", queue); entry = *queue->cursor--; #ifdef SGEN_HEAVY_BINARY_PROTOCOL binary_protocol_gray_dequeue (queue, queue->cursor + 1, entry.obj); #endif if (G_UNLIKELY (queue->cursor < GRAY_FIRST_CURSOR_POSITION (queue->first))) { GrayQueueSection *section; gint32 old_num_sections = 0; if (is_parallel) old_num_sections = mono_atomic_dec_i32 (&queue->num_sections); else queue->num_sections--; if (is_parallel && old_num_sections <= 0) { mono_os_mutex_lock (&queue->steal_mutex); } section = queue->first; queue->first = section->next; if (queue->first) { queue->first->prev = NULL; } else { queue->last = NULL; SGEN_ASSERT (0, !old_num_sections, "Why do we have an inconsistent number of sections ?"); } section->next = queue->free_list; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FREE_LIST); queue->free_list = section; queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL; if (is_parallel && old_num_sections <= 0) { mono_os_mutex_unlock (&queue->steal_mutex); } } return entry; }
void sgen_gray_object_queue_deinit (SgenGrayQueue *queue) { g_assert (!queue->first); while (queue->free_list) { GrayQueueSection *next = queue->free_list->next; STATE_TRANSITION (queue->free_list, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING); sgen_gray_object_free_queue_section (queue->free_list); queue->free_list = next; } }
GrayQueueSection* sgen_gray_object_steal_section (SgenGrayQueue *queue) { gint32 sections_remaining; GrayQueueSection *section = NULL; /* * With each push/pop into the queue we increment the number of sections. * There is only one thread accessing the top (the owner) and potentially * multiple workers trying to steal sections from the bottom, so we need * to lock. A num sections decrement from the owner means that the first * section is reserved, while a decrement by the stealer means that the * last section is reserved. If after we decrement the num sections, we * have at least one more section present, it means we can't race with * the other thread. If this is not the case the steal end abandons the * pop, setting back the num_sections, while the owner end will take a * lock to make sure we are not racing with the stealer (since the stealer * might have popped an entry and be in the process of updating the entry * that the owner is trying to pop. */ if (queue->num_sections <= 1) return NULL; /* Give up if there is contention on the last section */ if (mono_os_mutex_trylock (&queue->steal_mutex) != 0) return NULL; sections_remaining = mono_atomic_dec_i32 (&queue->num_sections); if (sections_remaining <= 0) { /* The section that we tried to steal might be the head of the queue. */ mono_atomic_inc_i32 (&queue->num_sections); } else { /* We have reserved for us the tail section of the queue */ section = queue->last; SGEN_ASSERT (0, section, "Why we don't have any sections to steal?"); SGEN_ASSERT (0, !section->next, "Why aren't we stealing the tail?"); queue->last = section->prev; section->prev = NULL; SGEN_ASSERT (0, queue->last, "Why are we stealing the last section?"); queue->last->next = NULL; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING); } mono_os_mutex_unlock (&queue->steal_mutex); return section; }
void sgen_gray_object_queue_trim_free_list (SgenGrayQueue *queue) { GrayQueueSection *section, *next; int i = 0; for (section = queue->free_list; section && i < GRAY_QUEUE_LENGTH_LIMIT - 1; section = section->next) { STATE_ASSERT (section, GRAY_QUEUE_SECTION_STATE_FREE_LIST); i ++; } if (!section) return; while (section->next) { next = section->next; section->next = next->next; STATE_TRANSITION (next, GRAY_QUEUE_SECTION_STATE_FREE_LIST, GRAY_QUEUE_SECTION_STATE_FLOATING); sgen_gray_object_free_queue_section (next); } }
void sgen_section_gray_queue_enqueue (SgenSectionGrayQueue *queue, GrayQueueSection *section) { STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED); lock_section_queue (queue); section->next = queue->first; queue->first = section; #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE if (queue->enqueue_check_func) { int i; for (i = 0; i < section->size; ++i) queue->enqueue_check_func (section->entries [i].obj); } #endif unlock_section_queue (queue); }
void sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section) { STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_FLOATING, GRAY_QUEUE_SECTION_STATE_ENQUEUED); if (queue->first) queue->first->size = queue->cursor - queue->first->entries + 1; section->next = queue->first; queue->first = section; queue->cursor = queue->first->entries + queue->first->size - 1; #ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE if (queue->enqueue_check_func) { int i; for (i = 0; i < section->size; ++i) queue->enqueue_check_func (section->entries [i].obj); } #endif }
GrayQueueSection* sgen_gray_object_dequeue_section (SgenGrayQueue *queue) { GrayQueueSection *section; if (!queue->first) return NULL; section = queue->first; queue->first = section->next; section->next = NULL; section->size = queue->cursor - section->entries + 1; queue->cursor = queue->first ? queue->first->entries + queue->first->size - 1 : NULL; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING); return section; }
GrayQueueSection* sgen_section_gray_queue_dequeue (SgenSectionGrayQueue *queue) { GrayQueueSection *section; lock_section_queue (queue); if (queue->first) { section = queue->first; queue->first = section->next; STATE_TRANSITION (section, GRAY_QUEUE_SECTION_STATE_ENQUEUED, GRAY_QUEUE_SECTION_STATE_FLOATING); section->next = NULL; } else { section = NULL; } unlock_section_queue (queue); return section; }
static bool s_displayDirty = false; static bool s_BlinkState = false; /* Application state machine */ DEFINE_SM_STATES(DISPLAY, EDIT, WRITING); DEFINE_SM_EVENTS(BTN_DIGIT_SELECT, BTN_UP, BTN_IDLE, WRITE_COMPLETE); DEFINE_STATES(sm) = { {DISPLAY, NULL, NULL}, {EDIT, NULL, NULL}, {WRITING, NULL, NULL} }; DEFINE_STATE_TRANSITIONS(sm) = { STATE_TRANSITION(sm, DISPLAY, BTN_UP, incDigit, EDIT ), STATE_TRANSITION(sm, DISPLAY, BTN_DIGIT_SELECT, NULL, EDIT ), STATE_TRANSITION(sm, EDIT, BTN_UP, incDigit, EDIT ), STATE_TRANSITION(sm, EDIT, BTN_DIGIT_SELECT, NULL, EDIT ), STATE_TRANSITION(sm, EDIT, BTN_IDLE, startWrite, WRITING ), STATE_TRANSITION(sm, WRITING, WRITE_COMPLETE, NULL, DISPLAY ), }; DEFINE_STATE_MACHINE(sm, DISPLAY); /* END spplication state machine */ static UNIX_TIMESTAMP s_unixtime = COMPILE_TIME_INT;
/** Signal that there's been an error on the connection * */ void fr_ldap_state_error(fr_ldap_connection_t *c) { STATE_TRANSITION(FR_LDAP_STATE_ERROR); fr_ldap_state_next(c); }
USES_APPLE_DEPRECATED_API #include <freeradius-devel/ldap/base.h> #define STATE_TRANSITION(_new) \ do { \ DEBUG4("Changed state %s -> %s", \ fr_int2str(fr_ldap_connection_states, c->state, "<INVALID>"), \ fr_int2str(fr_ldap_connection_states, _new, "<INVALID>")); \ c->state = _new; \ } while (0) /** Move between LDAP connection states * * Bringing up an LDAP connection is quite complex, as we need to do multiple operations * before we can install the main mux/demux functions which do the work of sending * requests to the directory and processing the responses. * * This function moves the connection through different states, setting different I/O * handlers. * * If any of the states */ fr_ldap_state_t fr_ldap_state_next(fr_ldap_connection_t *c) { again: switch (c->state) { /* * Start by negotiating TLS, or binding */ case FR_LDAP_STATE_INIT: if (c->config->start_tls) { if (fr_ldap_start_tls_async(c, NULL, NULL) < 0) { STATE_TRANSITION(FR_LDAP_STATE_ERROR); goto again; } STATE_TRANSITION(FR_LDAP_STATE_START_TLS); break; } /* FALL-THROUGH */ /* * If we're successful in negotiating TLS, * bind to the server as the credentials * will now be protected. */ case FR_LDAP_STATE_START_TLS: STATE_TRANSITION(FR_LDAP_STATE_BIND); /* * SASL uses a different (and more complex) codepath */ #ifdef WITH_SASL if (c->config->admin_sasl.mech) { if (fr_ldap_sasl_bind_async(c, c->config->admin_sasl.mech, c->config->admin_identity, c->config->admin_password, c->config->admin_sasl.proxy, c->config->admin_sasl.realm, NULL, NULL) < 0) { STATE_TRANSITION(FR_LDAP_STATE_ERROR); goto again; } break; } #endif /* * Normal binds are just a simple request/response pair */ if (fr_ldap_bind_async(c, c->config->admin_identity, c->config->admin_password, NULL, NULL) < 0) { STATE_TRANSITION(FR_LDAP_STATE_ERROR); goto again; } break; /* * After binding install the mux (write) and * demux (read) I/O functions. */ case FR_LDAP_STATE_BIND: STATE_TRANSITION(FR_LDAP_STATE_RUN); /* if (fr_ldap_mux_async(c) < 0) { STATE_TRANSITION(FR_LDAP_STATE_ERROR); goto again; } */ break; /* * Something went wrong */ case FR_LDAP_STATE_RUN: /* There's no next state for run, so this an error */ case FR_LDAP_STATE_ERROR: STATE_TRANSITION(FR_LDAP_STATE_INIT); fr_connection_signal_reconnect(c->conn); break; } return c->state; }