Ejemplo n.º 1
0
void thorium_message_multiplexer_set_worker(struct thorium_message_multiplexer *self,
                struct thorium_worker *worker)
{
    struct core_memory_pool *pool;

    self->worker = worker;
    pool = thorium_worker_get_memory_pool(self->worker,
                            MEMORY_POOL_NAME_WORKER_PERSISTENT);

#ifdef THORIUM_MULTIPLEXER_USE_TREE
    core_red_black_tree_init(&self->timeline, sizeof(uint64_t), sizeof(int),
                    pool);
    core_red_black_tree_use_uint64_t_keys(&self->timeline);

#elif defined(THORIUM_MULTIPLEXER_USE_HEAP)
    core_binary_heap_init(&self->timeline, sizeof(uint64_t), sizeof(int),
                    CORE_BINARY_HEAP_MIN | CORE_BINARY_HEAP_UINT64_T_KEYS);
    core_binary_heap_set_memory_pool(&self->timeline, pool);

#elif defined(THORIUM_MULTIPLEXER_USE_QUEUE)

    core_queue_init(&self->timeline, sizeof(int));

#endif

    if (thorium_node_name(self->node) == 0 && thorium_worker_name(self->worker) == 0
                    && thorium_node_must_print_data(self->node)) {
        thorium_printf("[thorium] message_multiplexer: disabled=%d buffer_size_in_bytes=%d timeout_in_nanoseconds=%d\n",
                            CORE_BITMAP_GET_FLAG(self->flags, FLAG_DISABLED),
                        self->buffer_size_in_bytes, self->timeout_in_nanoseconds);
    }
}
Ejemplo n.º 2
0
void thorium_worker_enable_profiler(struct thorium_worker *self)
{
    char file_name[100];

    core_bitmap_set_bit_uint32_t(&self->flags, FLAG_ENABLE_ACTOR_LOAD_PROFILER);

    sprintf(file_name, "node_%d_worker_%d_actor_load_profile.txt", thorium_node_name(self->node),
                    self->name);

    core_buffered_file_writer_init(&self->load_profile_writer, file_name);

    core_buffered_file_writer_printf(&self->load_profile_writer, "start_time\tend_time\tactor\tscript\taction\tcompute_time\twaiting_time\n");
}
Ejemplo n.º 3
0
void thorium_balancer_migrate(struct thorium_balancer *self, struct thorium_migration *migration)
{
    struct thorium_worker *old_worker_object;
    struct thorium_worker *new_worker_object;
    int old_worker;
    int new_worker;
    int actor_name;
    struct thorium_actor *actor;

    old_worker = thorium_migration_get_old_worker(migration);
    new_worker = thorium_migration_get_new_worker(migration);
    actor_name = thorium_migration_get_actor(migration);
    actor = thorium_node_get_actor_from_name(thorium_worker_pool_get_node(self->pool), actor_name);

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
    printf("MIGRATION node %d migrated actor %d from worker %d to worker %d\n",
                    thorium_node_name(thorium_worker_pool_get_node(self->pool)), actor_name,
                    old_worker, new_worker);
#endif

    old_worker_object = thorium_worker_pool_get_worker(self->pool, old_worker);
    new_worker_object = thorium_worker_pool_get_worker(self->pool, new_worker);

    /* evict the actor from the old worker
     */
    thorium_worker_evict_actor(old_worker_object, actor_name);

    /* Redirect messages for this actor to the
     * new worker
     */
    thorium_actor_set_assigned_worker(actor, new_worker);

#ifdef THORIUM_WORKER_POOL_DEBUG_MIGRATION
    printf("ROUTE actor %d ->  worker %d\n", actor_name, new_worker);
#endif

    thorium_worker_enqueue_actor_special(new_worker_object, actor);

}
Ejemplo n.º 4
0
void thorium_message_multiplexer_print_traffic_reduction(struct thorium_message_multiplexer *self)
{
    char buffer[1024];
    int position;
    int i;
    int size;
    struct thorium_multiplexed_buffer *multiplexed_buffer;
    int original_message_count;
    int real_message_count;
    float reduction;

    position = 0;

    position += sprintf(buffer + position, "[thorium] node %d worker %d multiplexer channels",
                    thorium_node_name(self->node), thorium_worker_name(self->worker));

    size = core_vector_size(&self->buffers);
    for (i = 0; i < size; ++i) {
        multiplexed_buffer = core_vector_at(&self->buffers, i);

        original_message_count = thorium_multiplexed_buffer_original_message_count(multiplexed_buffer);
        real_message_count = thorium_multiplexed_buffer_real_message_count(multiplexed_buffer);

        if (original_message_count == 0)
            continue;

        reduction = (0.0 + original_message_count - real_message_count) / original_message_count;
        reduction *= 100.0;

        position += sprintf(buffer + position, " [%d: %d %d %.2f%%]",
                        i, original_message_count, real_message_count,
                        reduction);
    }

    position += sprintf(buffer + position, "\n");

    thorium_printf("%s", buffer);
}
Ejemplo n.º 5
0
void thorium_worker_pool_print_load(struct thorium_worker_pool *self, int type)
{
    int count;
    int i;
    float epoch_load;
    struct thorium_worker *worker;
    float loop_load;

    uint64_t epoch_wake_up_count;
    uint64_t loop_wake_up_count;
    /*
    int scheduling_score;
    */
    int node_name;
    char *buffer;
    char *buffer_for_wake_up_events;
    char *buffer_for_future_timeline;
    int allocated;
    int offset;
    int offset_for_wake_up;
    int offset_for_future;
    int extra;
    time_t current_time;
    int elapsed;
    float selected_load;
    uint64_t selected_wake_up_count;
    float sum;
    char loop[] = "COMPUTATION";
    char epoch[] = "EPOCH";
    char *description;
    float load;

    description = NULL;

    if (type == THORIUM_WORKER_POOL_LOAD_LOOP) {
        description = loop;
    } else if (type == THORIUM_WORKER_POOL_LOAD_EPOCH) {
        description = epoch;
    } else {
        return;
    }

    current_time = time(NULL);
    elapsed = current_time - self->starting_time;

    extra = 100;

    count = thorium_worker_pool_worker_count(self);
    allocated = count * 20 + 20 + extra;

    buffer = core_memory_allocate(allocated, MEMORY_WORKER_POOL_KEY);
    buffer_for_wake_up_events = core_memory_allocate(allocated, MEMORY_WORKER_POOL_KEY);
    buffer_for_future_timeline = core_memory_allocate(allocated, MEMORY_WORKER_POOL_KEY);
    node_name = thorium_node_name(self->node);
    offset = 0;
    offset_for_wake_up = 0;
    offset_for_future = 0;
    i = 0;
    sum = 0;

    while (i < count && offset + extra < allocated) {

        worker = thorium_worker_pool_get_worker(self, i);

        epoch_load = thorium_worker_get_epoch_load(worker);
        loop_load = thorium_worker_get_loop_load(worker);
        epoch_wake_up_count = thorium_worker_get_epoch_wake_up_count(worker);
        loop_wake_up_count = thorium_worker_get_loop_wake_up_count(worker);

        selected_load = epoch_load;
        selected_wake_up_count = epoch_wake_up_count;

        if (type == THORIUM_WORKER_POOL_LOAD_EPOCH) {
            selected_load = epoch_load;
            selected_wake_up_count = epoch_wake_up_count;

        } else if (type == THORIUM_WORKER_POOL_LOAD_LOOP) {
            selected_load = loop_load;
            selected_wake_up_count = loop_wake_up_count;
        }

        /*
        offset += sprintf(buffer + offset, " [%d %d %.2f]", i,
                        scheduling_score,
                        selected_load);
                        */
        offset += sprintf(buffer + offset, " %.2f",
                        selected_load);

        offset_for_wake_up += sprintf(buffer_for_wake_up_events + offset_for_wake_up, " %" PRIu64 "",
                        selected_wake_up_count);

        offset_for_future += sprintf(buffer_for_future_timeline + offset_for_future, " %d",
                        thorium_worker_get_scheduled_actor_count(worker));

        sum += selected_load;

        ++i;
    }

    load = sum / count;

    printf("thorium_worker_pool: node/%d %s LOAD %d s %.2f/%d (%.2f)%s\n",
                    node_name,
                    description, elapsed,
                    sum, count, load, buffer);

    printf("thorium_worker_pool: node/%d %s FUTURE_TIMELINE %d s %s\n",
                    node_name,
                    description, elapsed,
                    buffer_for_future_timeline);

    printf("thorium_worker_pool: node/%d %s WAKE_UP_COUNT %d s %s\n",
                    node_name,
                    description, elapsed,
                    buffer_for_wake_up_events);

    core_memory_free(buffer, MEMORY_WORKER_POOL_KEY);
    core_memory_free(buffer_for_wake_up_events, MEMORY_WORKER_POOL_KEY);
    core_memory_free(buffer_for_future_timeline, MEMORY_WORKER_POOL_KEY);
}
Ejemplo n.º 6
0
void thorium_message_multiplexer_flush(struct thorium_message_multiplexer *self, int index, int force)
{
    char *buffer;
    struct thorium_message message;
    int tag;
    int count;
    int current_size;
    int maximum_size;
    struct thorium_multiplexed_buffer *multiplexed_buffer;
    int destination_node;
    /*
    int elapsed;
    int message_count;
    */

    if (CORE_BITMAP_GET_FLAG(self->flags, FLAG_DISABLED)) {
        return;
    }

#ifdef THORIUM_MULTIPLEXER_TRACK_BUFFERS_WITH_CONTENT
#ifdef CORE_DEBUGGER_ASSERT_ENABLED
    if (!(core_set_find(&self->buffers_with_content, &index))) {
        multiplexed_buffer = core_vector_at(&self->buffers, index);
        thorium_printf("index %d has no content\n", index);

        thorium_multiplexed_buffer_print(multiplexed_buffer);
    }
#endif

    CORE_DEBUGGER_ASSERT(core_set_find(&self->buffers_with_content, &index));
#endif

    multiplexed_buffer = core_vector_at(&self->buffers, index);
    current_size = thorium_multiplexed_buffer_current_size(multiplexed_buffer);
    maximum_size = thorium_multiplexed_buffer_maximum_size(multiplexed_buffer);

    /*
     * The buffer was still in the timeline, but it was flushed elsewhere.
     */
    if (current_size == 0) {
        return;

        /*
    if (force == FORCE_NO && current_size < maximum_size) {
        return;

    } else if (force == FORCE_YES_TIME) {
        elapsed = core_timer_get_nanoseconds(&self->timer) - multiplexed_buffer->timestamp_;

        if (elapsed < self->timeout_in_nanoseconds) {
            return;
        }
    } else if (force == FORCE_YES_DOA) {
        message_count = multiplexed_buffer->message_count_;

        if (message_count < self->degree_of_aggregation_limit) {
            return;
        }
        */
    }

#ifdef CORE_DEBUGGER_ASSERT_ENABLED
    if (current_size <= 0)
        thorium_printf("current_size %d maximum_size %d\n", current_size, maximum_size);
#endif

    CORE_DEBUGGER_ASSERT(current_size > 0);

    buffer = thorium_multiplexed_buffer_buffer(multiplexed_buffer);

    count = current_size + THORIUM_MESSAGE_METADATA_SIZE;

    tag = ACTION_MULTIPLEXER_MESSAGE;

    /*
     * This count does not include metadata for the final big message.
     *
     * Avoid this copy by using an array of pointers in the first place.
     */

    destination_node = index;

    thorium_message_init(&message, tag, count, buffer);
    thorium_message_set_destination(&message,
                    destination_node);
    thorium_message_set_source(&message,
            thorium_node_name(self->node));
    /*
     * Mark the message so that the buffer is eventually sent back here
     * for recycling.
     */
    thorium_message_set_worker(&message, thorium_worker_name(self->worker));

    thorium_message_write_metadata(&message);

#ifdef DEBUG_MULTIPLEXER_FLUSH
    thorium_printf("DEBUG_MULTIPLEXER thorium_message_multiplexer_flush index %d buffer %p force %d current_size %d maximum_size %d"
                    " destination_node %d\n",
                    index, buffer, force,
                    current_size, maximum_size,
                    thorium_message_destination_node(&message));

    thorium_printf("message in flush\n");
    thorium_multiplexed_buffer_print(multiplexed_buffer);
    thorium_message_print(&message);
#endif

    CORE_DEBUGGER_ASSERT_NOT_NULL(self->worker);

    /*
     * Make a copy of the buffer because the multiplexer does not have communication buffers.
     */
    thorium_worker_enqueue_outbound_message(self->worker, &message);

    /*
    thorium_printf("MULTIPLEXER FLUSH\n");
    */

    ++self->real_message_count;
    thorium_message_destroy(&message);

    thorium_multiplexed_buffer_reset(multiplexed_buffer);

#ifdef THORIUM_MULTIPLEXER_TRACK_BUFFERS_WITH_CONTENT
    core_set_delete(&self->buffers_with_content, &index);
#endif
}
Ejemplo n.º 7
0
void thorium_message_multiplexer_destroy(struct thorium_message_multiplexer *self)
{
    int i;
    int size;
    struct thorium_multiplexed_buffer *multiplexed_buffer;
    float ratio;

    if (thorium_node_must_print_data(self->node)) {

        ratio = 0.0;

        if (self->original_message_count != 0) {
            ratio = self->real_message_count / (0.0 + self->original_message_count);

            /*
             * Jack M. Nilles
             * "Traffic reduction by telecommuting: A status review and selected bibliography"
             * Transportation Research Part A: General
             * Volume 22, Issue 4, July 1988, Pages 301–317.
             *
             * @see http://www.sciencedirect.com/science/article/pii/0191260788900088
             * @see http://ww2.cityofpasadena.net/councilagendas/2007%20agendas/feb_26_07/pasadena%20traffic%20reduction%20strategies%2011-20-06%20draft.pdf
             */
            thorium_printf("[thorium] node %d worker %d message_multiplexer:"
                            " original_message_count %d real_message_count %d (traffic reduction: %.2f%%)\n",
                            thorium_node_name(self->node), thorium_worker_name(self->worker),
                    self->original_message_count, self->real_message_count, (1.0 - ratio) * 100);

            thorium_message_multiplexer_print_traffic_reduction(self);
        }
    }

#ifdef CORE_DEBUGGER_ASSERT_ENABLED
#endif
    size = core_vector_size(&self->buffers);

#ifdef THORIUM_MULTIPLEXER_TRACK_BUFFERS_WITH_CONTENT
    /*
     * There can be no messages that are not flushed already.
     */
    CORE_DEBUGGER_ASSERT(core_set_empty(&self->buffers_with_content));

    core_set_destroy(&self->buffers_with_content);
#endif

    for (i = 0; i < size; ++i) {
        multiplexed_buffer = core_vector_at(&self->buffers, i);

        CORE_DEBUGGER_ASSERT(thorium_multiplexed_buffer_current_size(multiplexed_buffer) == 0);

        thorium_multiplexed_buffer_destroy(multiplexed_buffer);
    }

    core_vector_destroy(&self->buffers);

    self->node = NULL;

    self->buffer_size_in_bytes = -1;
    self->timeout_in_nanoseconds = -1;

    core_timer_destroy(&self->timer);

#ifdef THORIUM_MULTIPLEXER_USE_TREE
    core_red_black_tree_destroy(&self->timeline);
#elif defined(THORIUM_MULTIPLEXER_USE_HEAP)
    core_binary_heap_destroy(&self->timeline);
#elif defined(THORIUM_MULTIPLEXER_USE_QUEUE)
    core_queue_destroy(&self->timeline);

#endif

    thorium_decision_maker_destroy(&self->decision_maker);

    thorium_router_destroy(&self->router);
}
Ejemplo n.º 8
0
void thorium_worker_init(struct thorium_worker *worker, int name, struct thorium_node *node)
{
    int capacity;
    int ephemeral_memory_block_size;
    int injected_buffer_ring_size;
    int argc;
    char **argv;

    worker->tick_count = 0;

    thorium_load_profiler_init(&worker->profiler);

    argc = thorium_node_argc(node);
    argv = thorium_node_argv(node);

#ifdef THORIUM_WORKER_DEBUG_INJECTION
    worker->counter_allocated_outbound_buffers = 0;
    worker->counter_freed_outbound_buffers_from_self = 0;
    worker->counter_freed_outbound_buffers_from_other_workers = 0;
    worker->counter_injected_outbound_buffers_other_local_workers= 0;
    worker->counter_injected_inbound_buffers_from_thorium_core = 0;
#endif

    core_map_init(&worker->actor_received_messages, sizeof(int), sizeof(int));

    worker->waiting_is_enabled = 0;
    worker->waiting_start_time = 0;

    core_timer_init(&worker->timer);
    capacity = THORIUM_WORKER_RING_CAPACITY;
    /*worker->work_queue = work_queue;*/
    worker->node = node;
    worker->name = name;
    core_bitmap_clear_bit_uint32_t(&worker->flags, FLAG_DEAD);
    worker->last_warning = 0;

    worker->last_wake_up_count = 0;

    /*worker->work_queue = &worker->works;*/

    /* There are two options:
     * 1. enable atomic operations for change visibility
     * 2. Use volatile head and tail.
     */
    core_fast_ring_init(&worker->actors_to_schedule, capacity, sizeof(struct thorium_actor *));

#ifdef THORIUM_NODE_INJECT_CLEAN_WORKER_BUFFERS
    injected_buffer_ring_size = capacity;
    core_fast_ring_init(&worker->injected_clean_outbound_buffers,
                    injected_buffer_ring_size, sizeof(void *));

    core_fast_ring_init(&worker->clean_message_ring_for_triage,
                    injected_buffer_ring_size,
                    sizeof(struct thorium_message));

    core_fast_queue_init(&worker->clean_message_queue_for_triage,
                    sizeof(struct thorium_message));
#endif

    thorium_scheduler_init(&worker->scheduler, thorium_node_name(worker->node),
                    worker->name);
    core_map_init(&worker->actors, sizeof(int), sizeof(int));
    core_map_iterator_init(&worker->actor_iterator, &worker->actors);

    core_fast_ring_init(&worker->outbound_message_queue, capacity, sizeof(struct thorium_message));

    core_fast_queue_init(&worker->outbound_message_queue_buffer, sizeof(struct thorium_message));

    core_bitmap_clear_bit_uint32_t(&worker->flags, FLAG_DEBUG);
    core_bitmap_clear_bit_uint32_t(&worker->flags, FLAG_BUSY);
    core_bitmap_clear_bit_uint32_t(&node->flags, FLAG_ENABLE_ACTOR_LOAD_PROFILER);

    worker->flags = 0;
    core_bitmap_clear_bit_uint32_t(&worker->flags, FLAG_DEBUG_ACTORS);

    if (core_command_has_argument(argc, argv, DEBUG_WORKER_OPTION)) {

#if 0
        printf("DEBUG has option %s\n", DEBUG_WORKER_OPTION);
#endif

        if (thorium_node_name(worker->node) == 0
                    && thorium_worker_name(worker) == 0) {

#if 0
            printf("DEBUG setting bit FLAG_DEBUG_ACTORS because %s\n", DEBUG_WORKER_OPTION);
#endif
            core_bitmap_set_bit_uint32_t(&worker->flags, FLAG_DEBUG_ACTORS);
        }
    }

    worker->epoch_used_nanoseconds = 0;
    worker->loop_used_nanoseconds = 0;
    worker->scheduling_epoch_used_nanoseconds = 0;

    worker->started_in_thread = 0;

/* 2 MiB is the default size for Linux huge pages.
 * \see https://wiki.debian.org/Hugepages
 * \see http://lwn.net/Articles/376606/
 */

    /*
     * 8 MiB
     */
    ephemeral_memory_block_size = 8388608;
    /*ephemeral_memory_block_size = 16777216;*/
    core_memory_pool_init(&worker->ephemeral_memory, ephemeral_memory_block_size,
                    MEMORY_POOL_NAME_WORKER_EPHEMERAL);

    core_memory_pool_disable_tracking(&worker->ephemeral_memory);
    core_memory_pool_enable_ephemeral_mode(&worker->ephemeral_memory);

#ifdef THORIUM_WORKER_ENABLE_LOCK
    core_lock_init(&worker->lock);
#endif

    core_set_init(&worker->evicted_actors, sizeof(int));

    core_memory_pool_init(&worker->outbound_message_memory_pool,
                    CORE_MEMORY_POOL_MESSAGE_BUFFER_BLOCK_SIZE, MEMORY_POOL_NAME_WORKER_OUTBOUND);

    /*
     * Disable the pool so that it uses allocate and free
     * directly.
     */

#ifdef CORE_MEMORY_POOL_DISABLE_MESSAGE_BUFFER_POOL
    core_memory_pool_disable(&worker->outbound_message_memory_pool);
#endif

    /*
     * Transport message buffers are fancy objects.
     */
    core_memory_pool_enable_normalization(&worker->outbound_message_memory_pool);
    core_memory_pool_enable_alignment(&worker->outbound_message_memory_pool);

    worker->ticks_without_production = 0;

    thorium_priority_assigner_init(&worker->assigner, thorium_worker_name(worker));

    /*
     * This variables should be set in
     * thorium_worker_start, but when running on 1 process with 1 thread,
     * thorium_worker_start is never called...
     */
    worker->last_report = time(NULL);
    worker->epoch_start_in_nanoseconds = core_timer_get_nanoseconds(&worker->timer);
    worker->loop_start_in_nanoseconds = worker->epoch_start_in_nanoseconds;
    worker->loop_end_in_nanoseconds = worker->loop_start_in_nanoseconds;
    worker->scheduling_epoch_start_in_nanoseconds = worker->epoch_start_in_nanoseconds;

    /*
     * Avoid valgrind warnings.
     */
    worker->epoch_load = 0;
}
Ejemplo n.º 9
0
void thorium_worker_print_actors(struct thorium_worker *worker, struct thorium_balancer *scheduler)
{
    struct core_map_iterator iterator;
    int name;
    int count;
    struct thorium_actor *actor;
    int producers;
    int consumers;
    int received;
    int difference;
    int script;
    struct core_map distribution;
    int frequency;
    struct thorium_script *script_object;
    int dead;
    int node_name;
    int worker_name;
    int previous_amount;

    node_name = thorium_node_name(worker->node);
    worker_name = worker->name;

    core_map_iterator_init(&iterator, &worker->actors);

    printf("node/%d worker/%d %d queued messages, received: %d busy: %d load: %f ring: %d scheduled actors: %d/%d\n",
                    node_name, worker_name,
                    thorium_worker_get_scheduled_message_count(worker),
                    thorium_worker_get_sum_of_received_actor_messages(worker),
                    thorium_worker_is_busy(worker),
                    thorium_worker_get_scheduling_epoch_load(worker),
                    core_fast_ring_size_from_producer(&worker->actors_to_schedule),
                    thorium_scheduler_size(&worker->scheduler),
                    (int)core_map_size(&worker->actors));

    core_map_init(&distribution, sizeof(int), sizeof(int));

    while (core_map_iterator_get_next_key_and_value(&iterator, &name, NULL)) {

        actor = thorium_node_get_actor_from_name(worker->node, name);

        if (actor == NULL) {
            continue;
        }

        dead = thorium_actor_dead(actor);

        if (dead) {
            continue;
        }

        count = thorium_actor_get_mailbox_size(actor);
        received = thorium_actor_get_sum_of_received_messages(actor);
        producers = core_map_size(thorium_actor_get_received_messages(actor));
        consumers = core_map_size(thorium_actor_get_sent_messages(actor));
        previous_amount = 0;

        core_map_get_value(&worker->actor_received_messages, &name,
                        &previous_amount);
        difference = received - previous_amount;;

        if (!core_map_update_value(&worker->actor_received_messages, &name,
                        &received)) {
            core_map_add_value(&worker->actor_received_messages, &name, &received);
        }

        printf("  [%s/%d] mailbox: %d received: %d (+%d) producers: %d consumers: %d\n",
                        thorium_actor_script_name(actor),
                        name, count, received,
                       difference,
                       producers, consumers);

        script = thorium_actor_script(actor);

        if (core_map_get_value(&distribution, &script, &frequency)) {
            ++frequency;
            core_map_update_value(&distribution, &script, &frequency);
        } else {
            frequency = 1;
            core_map_add_value(&distribution, &script, &frequency);
        }
    }

    /*printf("\n");*/
    core_map_iterator_destroy(&iterator);

    core_map_iterator_init(&iterator, &distribution);

    printf("node/%d worker/%d Frequency list\n", node_name, worker_name);

    while (core_map_iterator_get_next_key_and_value(&iterator, &script, &frequency)) {

        script_object = thorium_node_find_script(worker->node, script);

        CORE_DEBUGGER_ASSERT(script_object != NULL);

        printf("node/%d worker/%d Frequency %s => %d\n",
                        node_name,
                        worker->name,
                        thorium_script_name(script_object),
                        frequency);
    }

    core_map_iterator_destroy(&iterator);
    core_map_destroy(&distribution);
}
Ejemplo n.º 10
0
void thorium_worker_display(struct thorium_worker *worker)
{
    printf("[thorium_worker_main] node %i worker %i\n",
                    thorium_node_name(worker->node),
                    thorium_worker_name(worker));
}
Ejemplo n.º 11
0
void thorium_worker_run(struct thorium_worker *worker)
{
    struct thorium_actor *actor;
    struct thorium_message other_message;

#ifdef THORIUM_NODE_INJECT_CLEAN_WORKER_BUFFERS
    void *buffer;
#endif

#ifdef THORIUM_NODE_ENABLE_INSTRUMENTATION
    time_t current_time;
    int elapsed;
    int period;
    uint64_t current_nanoseconds;
    uint64_t elapsed_nanoseconds;
#endif

#ifdef THORIUM_WORKER_DEBUG
    int tag;
    int destination;
    struct thorium_message *message;
#endif

#ifdef THORIUM_WORKER_ENABLE_LOCK
    thorium_worker_lock(worker);
#endif

#ifdef THORIUM_NODE_ENABLE_INSTRUMENTATION
    period = THORIUM_NODE_LOAD_PERIOD;
    current_time = time(NULL);

    elapsed = current_time - worker->last_report;

    if (elapsed >= period) {

        current_nanoseconds = core_timer_get_nanoseconds(&worker->timer);

#ifdef THORIUM_WORKER_DEBUG_LOAD
        printf("DEBUG Updating load report\n");
#endif
        elapsed_nanoseconds = current_nanoseconds - worker->epoch_start_in_nanoseconds;

        if (elapsed_nanoseconds > 0) {
            worker->epoch_load = (0.0 + worker->epoch_used_nanoseconds) / elapsed_nanoseconds;
            worker->epoch_used_nanoseconds = 0;
            worker->last_wake_up_count = core_thread_get_wake_up_count(&worker->thread);

            /* \see http://stackoverflow.com/questions/9657993/negative-zero-in-c
             */
            if (worker->epoch_load == 0) {
                worker->epoch_load = 0;
            }

            worker->epoch_start_in_nanoseconds = current_nanoseconds;
            worker->last_report = current_time;
        }

#ifdef THORIUM_WORKER_PRINT_SCHEDULING_QUEUE

        /*
        if (thorium_node_name(worker->node) == 0
                        && worker->name == 0) {
                        */

        thorium_scheduler_print(&worker->scheduler,
                        thorium_node_name(worker->node),
                        worker->name);
            /*
        }
        */
#endif

        if (core_bitmap_get_bit_uint32_t(&worker->flags, FLAG_DEBUG_ACTORS)) {
            thorium_worker_print_actors(worker, NULL);
        }
    }
#endif

#ifdef THORIUM_WORKER_DEBUG
    if (core_bitmap_get_bit_uint32_t(&worker->flags, FLAG_DEBUG)) {
        printf("DEBUG worker/%d thorium_worker_run\n",
                        thorium_worker_name(worker));
    }
#endif

    /* check for messages in inbound FIFO */
    if (thorium_worker_dequeue_actor(worker, &actor)) {

#ifdef THORIUM_WORKER_DEBUG
        message = biosal_work_message(&work);
        tag = thorium_message_action(message);
        destination = thorium_message_destination(message);

        if (tag == ACTION_ASK_TO_STOP) {
            printf("DEBUG pulled ACTION_ASK_TO_STOP for %d\n",
                            destination);
        }
#endif

        /*
         * Update the priority of the actor
         * before starting the timer because this is part of the
         * runtime system (RTS).
         */

#ifdef THORIUM_UPDATE_SCHEDULING_PRIORITIES
        thorium_priority_assigner_update(&worker->scheduler, actor);
#endif

#ifdef THORIUM_NODE_ENABLE_INSTRUMENTATION
        core_timer_start(&worker->timer);
#endif

        core_bitmap_set_bit_uint32_t(&worker->flags, FLAG_BUSY);

        /*
         * Dispatch message to a worker
         */
        thorium_worker_work(worker, actor);

        core_bitmap_clear_bit_uint32_t(&worker->flags, FLAG_BUSY);

#ifdef THORIUM_NODE_ENABLE_INSTRUMENTATION
        core_timer_stop(&worker->timer);

        elapsed_nanoseconds = core_timer_get_elapsed_nanoseconds(&worker->timer);

        if (elapsed_nanoseconds >= THORIUM_GRANULARITY_WARNING_THRESHOLD) {
        }

        worker->epoch_used_nanoseconds += elapsed_nanoseconds;
        worker->loop_used_nanoseconds += elapsed_nanoseconds;
        worker->scheduling_epoch_used_nanoseconds += elapsed_nanoseconds;

        worker->last_elapsed_nanoseconds = elapsed_nanoseconds;
#endif
    }

    /* queue buffered message
     */
    if (core_fast_queue_dequeue(&worker->outbound_message_queue_buffer, &other_message)) {

        if (!core_fast_ring_push_from_producer(&worker->outbound_message_queue, &other_message)) {

#ifdef SHOW_FULL_RING_WARNINGS
            printf("thorium_worker: Warning: ring is full => outbound_message_queue\n");
#endif

            core_fast_queue_enqueue(&worker->outbound_message_queue_buffer, &other_message);
        }
    }

#ifdef THORIUM_NODE_INJECT_CLEAN_WORKER_BUFFERS
    /*
     * Free outbound buffers, if any
     */

    if (thorium_worker_fetch_clean_outbound_buffer(worker, &buffer)) {
        core_memory_pool_free(&worker->outbound_message_memory_pool, buffer);

#ifdef THORIUM_WORKER_DEBUG_INJECTION
        ++worker->counter_freed_outbound_buffers_from_other_workers;
#endif
    }
#endif

    /*
     * Transfer messages for triage
     */

    if (core_fast_queue_dequeue(&worker->clean_message_queue_for_triage, &other_message)) {

        CORE_DEBUGGER_ASSERT(thorium_message_buffer(&other_message) != NULL);
        thorium_worker_enqueue_message_for_triage(worker, &other_message);
    }

#ifdef THORIUM_WORKER_ENABLE_LOCK
    thorium_worker_unlock(worker);
#endif
}
Ejemplo n.º 12
0
void thorium_balancer_generate_symmetric_migrations(struct thorium_balancer *self, struct core_map *symmetric_actor_scripts,
                struct core_vector *migrations)
{
    int i;
    int worker_count;
    struct thorium_worker *worker;
    struct core_map *set;
    struct core_map_iterator iterator;
    struct thorium_migration migration;
    struct core_map script_current_worker;
    struct core_map script_current_worker_actor_count;
    int frequency;
    int current_worker;
    int current_worker_actor_count;
    int old_worker;
#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
    struct thorium_script *actual_script;
#endif
    struct thorium_node *node;
    int actor_name;
    int script;
    int new_worker;
    struct thorium_actor *actor;
    int enabled;

    /* Gather symmetric actors:
     */

#ifdef THORIUM_SCHEDULER_ENABLE_SYMMETRIC_SCHEDULING
    enabled = 1;
#else
    enabled = 0;
#endif

    core_map_init(&script_current_worker, sizeof(int), sizeof(int));
    core_map_init(&script_current_worker_actor_count, sizeof(int), sizeof(int));

    node = thorium_worker_pool_get_node(self->pool);
    worker_count = thorium_worker_pool_worker_count(self->pool);

    for (i = 0; i < worker_count; i++) {

        worker = thorium_worker_pool_get_worker(self->pool, i);

        set = thorium_worker_get_actors(worker);

        core_map_iterator_init(&iterator, set);

        while (core_map_iterator_get_next_key_and_value(&iterator, &actor_name, NULL)) {
            actor = thorium_node_get_actor_from_name(node, actor_name);

            if (actor == NULL) {
                continue;
            }

            script = thorium_actor_script(actor);

            /*
             * Check if the actor is symmetric
             */
            if (core_map_get_value(symmetric_actor_scripts, &script, &frequency)) {

                current_worker = 0;
                if (!core_map_get_value(&script_current_worker, &script, &current_worker)) {
                    core_map_add_value(&script_current_worker, &script, &current_worker);
                }
                current_worker_actor_count = 0;
                if (!core_map_get_value(&script_current_worker_actor_count, &script, &current_worker_actor_count)) {
                    core_map_add_value(&script_current_worker_actor_count, &script, &current_worker_actor_count);
                }

                /*
                 * Emit migration instruction
                 */

                old_worker = thorium_balancer_get_actor_worker(self, actor_name);
                new_worker = current_worker;
#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
                actual_script = thorium_node_find_script(node, script);
#endif

                if (enabled && old_worker != new_worker) {
                    thorium_migration_init(&migration, actor_name, old_worker, new_worker);
                    core_vector_push_back(migrations, &migration);
                    thorium_migration_destroy(&migration);

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
                    printf("[EMIT] ");
#endif
                } else {
#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
                    printf("[MOCK] ");
#endif
                }

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
                printf("SCHEDULER -> symmetric placement... %s/%d scheduled for execution on worker/%d of node/%d\n",
                                thorium_script_description(actual_script),
                                actor_name,
                                new_worker,
                                thorium_node_name(node));
#endif

                ++current_worker_actor_count;
                core_map_update_value(&script_current_worker_actor_count, &script, &current_worker_actor_count);

                /* The current worker is full.
                 * Increment the current worker and set the
                 * worker actor count to 0.
                 */
                if (current_worker_actor_count == frequency) {
                    ++current_worker;
                    core_map_update_value(&script_current_worker, &script, &current_worker);
                    current_worker_actor_count = 0;
                    core_map_update_value(&script_current_worker_actor_count, &script, &current_worker_actor_count);
                }
            }

        }

        core_map_iterator_destroy(&iterator);
    }

    core_map_destroy(&script_current_worker);
    core_map_destroy(&script_current_worker_actor_count);
}
Ejemplo n.º 13
0
int thorium_balancer_select_worker_least_busy(
                struct thorium_balancer *self, int *worker_score)
{
    int to_check;
    int score;
    int best_score;
    struct thorium_worker *worker;
    struct thorium_worker *best_worker;
    int selected_worker;

#if 0
    int last_worker_score;
#endif

#ifdef THORIUM_WORKER_DEBUG
    int tag;
    int destination;
    struct thorium_message *message;
#endif

    best_worker = NULL;
    best_score = 99;

    to_check = THORIUM_SCHEDULER_WORK_SCHEDULING_WINDOW;

    while (to_check--) {

        /*
         * get the worker to test for this iteration.
         */
        worker = thorium_worker_pool_get_worker(self->pool, self->worker_for_work);

        score = thorium_worker_get_epoch_load(worker);

#ifdef THORIUM_WORKER_POOL_DEBUG_ISSUE_334
        if (score >= THORIUM_WORKER_WARNING_THRESHOLD
                        && (self->last_scheduling_warning == 0
                             || score >= self->last_scheduling_warning + THORIUM_WORKER_WARNING_THRESHOLD_STRIDE)) {
            printf("Warning: node %d worker %d has a scheduling score of %d\n",
                            thorium_node_name(thorium_worker_pool_get_node(self->pool)),
                            self->worker_for_work, score);

            self->last_scheduling_warning = score;
        }
#endif

        /* if the worker is not busy and it has no work to do,
         * select it right away...
         */
        if (score == 0) {
            best_worker = worker;
            best_score = 0;
            break;
        }

        /* Otherwise, test the worker
         */
        if (best_worker == NULL || score < best_score) {
            best_worker = worker;
            best_score = score;
        }

        /*
         * assign the next worker
         */
        self->worker_for_work = thorium_worker_pool_next_worker(self->pool, self->worker_for_work);
    }

#ifdef THORIUM_WORKER_POOL_DEBUG
    message = biosal_work_message(work);
    tag = thorium_message_action(message);
    destination = thorium_message_destination(message);

    if (tag == ACTION_ASK_TO_STOP) {
        printf("DEBUG dispatching ACTION_ASK_TO_STOP for actor %d to worker %d\n",
                        destination, *start);
    }


#endif

    selected_worker = self->worker_for_work;

    /*
     * assign the next worker
     */
    self->worker_for_work = thorium_worker_pool_next_worker(self->pool, self->worker_for_work);

    *worker_score = best_score;
    /* This is a best effort algorithm
     */
    return selected_worker;
}