Esempio n. 1
0
int thorium_worker_get_producer_count(struct thorium_worker *worker, struct thorium_balancer *scheduler)
{
    struct core_map_iterator iterator;
    int name;
    struct thorium_actor *actor;
    int count;

    count = 0;
    core_map_iterator_init(&iterator, &worker->actors);

    while (core_map_iterator_get_next_key_and_value(&iterator, &name, NULL)) {

        actor = thorium_node_get_actor_from_name(worker->node, name);

        if (actor == NULL) {
            continue;
        }

        if (thorium_balancer_get_actor_production(scheduler, actor) > 0) {
            ++count;
        }

    }

    core_map_iterator_destroy(&iterator);
    return count;
}
Esempio n. 2
0
int thorium_worker_get_sum_of_received_actor_messages(struct thorium_worker *worker)
{
    int value;
    struct core_map_iterator iterator;
    int actor_name;
    int messages;
    struct thorium_actor *actor;

    core_map_iterator_init(&iterator, &worker->actors);

    value = 0;
    while (core_map_iterator_get_next_key_and_value(&iterator, &actor_name, NULL)) {

        actor = thorium_node_get_actor_from_name(worker->node, actor_name);

        if (actor == NULL) {
            continue;
        }

        messages = thorium_actor_get_sum_of_received_messages(actor);

        value += messages;
    }

    core_map_iterator_destroy(&iterator);

    return value;
}
Esempio n. 3
0
void thorium_worker_evict_actor(struct thorium_worker *worker, int actor_name)
{
    struct thorium_actor *actor;
    int name;
    struct core_fast_queue saved_actors;
    int count;
    int value;

    core_set_add(&worker->evicted_actors, &actor_name);
    core_map_delete(&worker->actors, &actor_name);
    core_fast_queue_init(&saved_actors, sizeof(struct thorium_actor *));

    /* evict the actor from the scheduling queue
     */
    while (thorium_scheduler_dequeue(&worker->scheduler, &actor)) {

        name = thorium_actor_name(actor);

        if (name != actor_name) {

            core_fast_queue_enqueue(&saved_actors,
                            &actor);
        }
    }

    while (core_fast_queue_dequeue(&saved_actors, &actor)) {
        thorium_scheduler_enqueue(&worker->scheduler, actor);
    }

    core_fast_queue_destroy(&saved_actors);

    /* Evict the actor from the ring
     */

    count = core_fast_ring_size_from_consumer(&worker->actors_to_schedule);

    while (count-- && core_fast_ring_pop_from_consumer(&worker->actors_to_schedule,
                            &actor)) {

        name = thorium_actor_name(actor);

        if (name != actor_name) {

            /*
             * This can not fail logically.
             */
            value = core_fast_ring_push_from_producer(&worker->actors_to_schedule,
                            &actor);

            CORE_DEBUGGER_ASSERT(value);
        }
    }

    core_map_iterator_destroy(&worker->actor_iterator);
    core_map_iterator_init(&worker->actor_iterator, &worker->actors);
}
Esempio n. 4
0
void biosal_input_controller_destroy(struct thorium_actor *actor)
{
    struct biosal_input_controller *concrete_actor;
    int i;
    char *pointer;
    struct core_map_iterator iterator;
    struct core_vector *vector;

    concrete_actor = (struct biosal_input_controller *)thorium_actor_concrete_actor(actor);

    core_timer_destroy(&concrete_actor->input_timer);
    core_timer_destroy(&concrete_actor->counting_timer);
    core_timer_destroy(&concrete_actor->distribution_timer);

    biosal_dna_codec_destroy(&concrete_actor->codec);

    for (i = 0; i < core_vector_size(&concrete_actor->files); i++) {
        pointer = *(char **)core_vector_at(&concrete_actor->files, i);
        core_memory_free(pointer, MEMORY_CONTROLLER);
    }

    core_vector_destroy(&concrete_actor->mega_block_vector);
    core_vector_destroy(&concrete_actor->counting_streams);
    core_vector_destroy(&concrete_actor->reading_streams);
    core_vector_destroy(&concrete_actor->partition_commands);
    core_vector_destroy(&concrete_actor->consumer_active_requests);
    core_vector_destroy(&concrete_actor->stream_consumers);
    core_vector_destroy(&concrete_actor->files);
    core_vector_destroy(&concrete_actor->spawners);
    core_vector_destroy(&concrete_actor->counts);
    core_vector_destroy(&concrete_actor->consumers);
    core_vector_destroy(&concrete_actor->stores_per_spawner);
    core_queue_destroy(&concrete_actor->unprepared_spawners);

    core_map_iterator_init(&iterator, &concrete_actor->mega_blocks);
    while (core_map_iterator_has_next(&iterator)) {
        core_map_iterator_next(&iterator, NULL, (void **)&vector);

        core_vector_destroy(vector);
    }
    core_map_iterator_destroy(&iterator);
    core_map_destroy(&concrete_actor->mega_blocks);
    core_map_destroy(&concrete_actor->assigned_blocks);
}
Esempio n. 5
0
void biosal_coverage_distribution_ask_to_stop(struct thorium_actor *self, struct thorium_message *message)
{
    struct core_map_iterator iterator;
    struct core_vector coverage_values;
    struct biosal_coverage_distribution *concrete_actor;

    uint64_t *frequency;
    int *coverage;

    concrete_actor = (struct biosal_coverage_distribution *)thorium_actor_concrete_actor(self);
    core_map_iterator_init(&iterator, &concrete_actor->distribution);

    core_vector_init(&coverage_values, sizeof(int));

    while (core_map_iterator_has_next(&iterator)) {

#if 0
        thorium_actor_log(self, "DEBUG EMIT iterator\n");
#endif
        core_map_iterator_next(&iterator, (void **)&coverage,
                        (void **)&frequency);

#ifdef CORE_DEBUGGER_ASSERT_ENABLED
        if (coverage == NULL) {
            thorium_actor_log(self, "DEBUG map has %d buckets\n", (int)core_map_size(&concrete_actor->distribution));
        }
#endif
        CORE_DEBUGGER_ASSERT(coverage != NULL);

        core_vector_push_back(&coverage_values, coverage);
    }

    core_vector_sort_int(&coverage_values);

    core_map_iterator_destroy(&iterator);

    core_vector_destroy(&coverage_values);

    thorium_actor_ask_to_stop(self, message);
}
Esempio n. 6
0
void thorium_balancer_balance(struct thorium_balancer *self)
{
    /*
     * The 95th percentile is useful:
     * \see http://en.wikipedia.org/wiki/Burstable_billing
     * \see http://www.init7.net/en/backbone/95-percent-rule
     */
    int load_percentile_50;
    struct core_timer timer;

    int i;
    struct core_vector loads;
    struct core_vector loads_unsorted;
    struct core_vector burdened_workers;
    struct core_vector stalled_workers;
    struct thorium_worker *worker;
    struct thorium_node *node;

    /*struct core_set *set;*/
    struct core_pair pair;
    struct core_vector_iterator vector_iterator;
    int old_worker;
    int actor_name;
    int messages;
    int maximum;
    int with_maximum;
    struct core_map *set;
    struct core_map_iterator set_iterator;
    int stalled_index;
    int stalled_count;
    int new_worker_index;
    struct core_vector migrations;
    struct thorium_migration migration;
    struct thorium_migration *migration_to_do;
    struct thorium_actor *actor;
    int candidates;

    int load_value;
    int remaining_load;
    int projected_load;

    struct core_vector actors_to_migrate;
    int total;
    int with_messages;
    int stalled_percentile;
    int burdened_percentile;

    int old_total;
    int old_load;
    int new_load;
    int predicted_new_load;
    struct core_pair *pair_pointer;
    struct thorium_worker *new_worker;
    /*int new_total;*/
    int actor_load;

    int test_stalled_index;
    int tests;
    int found_match;
    int spawned_actors;
    int killed_actors;
    int perfect;

#ifdef THORIUM_SCHEDULER_ENABLE_SYMMETRIC_SCHEDULING
    struct core_map symmetric_actor_scripts;
    int script;
#endif

    node = thorium_worker_pool_get_node(self->pool);

    spawned_actors = thorium_node_get_counter(node, CORE_COUNTER_SPAWNED_ACTORS);

    /* There is nothing to balance...
     */
    if (spawned_actors == 0) {
        return;
    }

    killed_actors = thorium_node_get_counter(node, CORE_COUNTER_KILLED_ACTORS);

    /*
     * The system can probably not be balanced to get in
     * a better shape anyway.
     */
    if (spawned_actors == self->last_spawned_actors
                    && killed_actors == self->last_killed_actors
                    && self->last_migrations == 0) {

        printf("SCHEDULER: balance can not be improved because nothing changed.\n");
        return;
    }

    /* Check if we have perfection
     */

    perfect = 1;
    for (i = 0; i < thorium_worker_pool_worker_count(self->pool); i++) {
        worker = thorium_worker_pool_get_worker(self->pool, i);

        load_value = thorium_worker_get_epoch_load(worker) * 100;

        if (load_value != 100) {
            perfect = 0;
            break;
        }
    }

    if (perfect) {
        printf("SCHEDULER: perfect balance can not be improved.\n");
        return;
    }

    /* update counters
     */
    self->last_spawned_actors = spawned_actors;
    self->last_killed_actors = killed_actors;

    /* Otherwise, try to balance things
     */
    core_timer_init(&timer);

    core_timer_start(&timer);

#ifdef THORIUM_SCHEDULER_ENABLE_SYMMETRIC_SCHEDULING
    core_map_init(&symmetric_actor_scripts, sizeof(int), sizeof(int));

    thorium_balancer_detect_symmetric_scripts(self, &symmetric_actor_scripts);
#endif

#ifdef THORIUM_WORKER_ENABLE_LOCK
    /* Lock all workers first
     */
    for (i = 0; i < thorium_worker_pool_worker_count(self->pool); i++) {
        worker = thorium_worker_pool_get_worker(self->pool, i);

        thorium_worker_lock(worker);
    }
#endif

    core_vector_init(&migrations, sizeof(struct thorium_migration));

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
    printf("BALANCING\n");
#endif

    core_vector_init(&loads, sizeof(int));
    core_vector_init(&loads_unsorted, sizeof(int));
    core_vector_init(&burdened_workers, sizeof(struct core_pair));
    core_vector_init(&stalled_workers, sizeof(struct core_pair));

    core_vector_init(&actors_to_migrate, sizeof(struct core_pair));

    for (i = 0; i < thorium_worker_pool_worker_count(self->pool); i++) {
        worker = thorium_worker_pool_get_worker(self->pool, i);
        load_value = thorium_worker_get_scheduling_epoch_load(worker) * SCHEDULER_PRECISION;

#if 0
        printf("DEBUG LOAD %d %d\n", i, load_value);
#endif

        core_vector_push_back(&loads, &load_value);
        core_vector_push_back(&loads_unsorted, &load_value);
    }

    core_vector_sort_int(&loads);

    stalled_percentile = core_statistics_get_percentile_int(&loads, SCHEDULER_WINDOW);
    /*load_percentile_25 = core_statistics_get_percentile_int(&loads, 25);*/
    load_percentile_50 = core_statistics_get_percentile_int(&loads, 50);
    /*load_percentile_75 = core_statistics_get_percentile_int(&loads, 75);*/
    burdened_percentile = core_statistics_get_percentile_int(&loads, 100 - SCHEDULER_WINDOW);

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
    printf("Percentiles for epoch loads: ");
    core_statistics_print_percentiles_int(&loads);
#endif

    for (i = 0; i < thorium_worker_pool_worker_count(self->pool); i++) {
        worker = thorium_worker_pool_get_worker(self->pool, i);
        load_value = core_vector_at_as_int(&loads_unsorted, i);

        set = thorium_worker_get_actors(worker);

        if (stalled_percentile == burdened_percentile) {

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
            printf("scheduling_class:%s ",
                            THORIUM_CLASS_NORMAL_STRING);
#endif

        } else if (load_value <= stalled_percentile) {

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
            printf("scheduling_class:%s ",
                            THORIUM_CLASS_STALLED_STRING);
#endif

            core_pair_init(&pair, load_value, i);
            core_vector_push_back(&stalled_workers, &pair);

        } else if (load_value >= burdened_percentile) {

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
            printf("scheduling_class:%s ",
                            THORIUM_CLASS_BURDENED_STRING);
#endif

            core_pair_init(&pair, load_value, i);
            core_vector_push_back(&burdened_workers, &pair);
        } else {
#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
            printf("scheduling_class:%s ",
                            THORIUM_CLASS_NORMAL_STRING);
#endif
        }

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
        thorium_worker_print_actors(worker, self);
#endif

    }

    core_vector_sort_int_reverse(&burdened_workers);
    core_vector_sort_int(&stalled_workers);

    stalled_count = core_vector_size(&stalled_workers);

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
    printf("MIGRATIONS (stalled: %d, burdened: %d)\n", (int)core_vector_size(&stalled_workers),
                    (int)core_vector_size(&burdened_workers));
#endif

    stalled_index = 0;
    core_vector_iterator_init(&vector_iterator, &burdened_workers);

    while (stalled_count > 0
                    && core_vector_iterator_get_next_value(&vector_iterator, &pair)) {

        old_worker = core_pair_get_second(&pair);

        worker = thorium_worker_pool_get_worker(self->pool, old_worker);
        set = thorium_worker_get_actors(worker);

        /*
        thorium_worker_print_actors(worker);
        printf("\n");
        */

        /*
         * Lock the worker and try to select actors for migration
         */
        core_map_iterator_init(&set_iterator, set);

        maximum = -1;
        with_maximum = 0;
        total = 0;
        with_messages = 0;

        while (core_map_iterator_get_next_key_and_value(&set_iterator, &actor_name, NULL)) {

            actor = thorium_node_get_actor_from_name(thorium_worker_pool_get_node(self->pool), actor_name);
            messages = thorium_balancer_get_actor_production(self, actor);

            if (maximum == -1 || messages > maximum) {
                maximum = messages;
                with_maximum = 1;
            } else if (messages == maximum) {
                with_maximum++;
            }

            if (messages > 0) {
                ++with_messages;
            }

            total += messages;
        }

        core_map_iterator_destroy(&set_iterator);

        core_map_iterator_init(&set_iterator, set);

        --with_maximum;

        candidates = 0;
        load_value = thorium_worker_get_scheduling_epoch_load(worker) * SCHEDULER_PRECISION;

        remaining_load = load_value;

#if 0
        printf("maximum %d with_maximum %d\n", maximum, with_maximum);
#endif

        while (core_map_iterator_get_next_key_and_value(&set_iterator, &actor_name, NULL)) {

            actor = thorium_node_get_actor_from_name(thorium_worker_pool_get_node(self->pool), actor_name);

            if (actor == NULL) {
                continue;
            }
            messages = thorium_balancer_get_actor_production(self, actor);

#ifdef THORIUM_SCHEDULER_ENABLE_SYMMETRIC_SCHEDULING
            script = thorium_actor_script(actor);


            /* symmetric actors are migrated elsewhere.
             */
            if (core_map_get_value(&symmetric_actor_scripts, &script, NULL)) {
                continue;
            }
#endif

            /* Simulate the remaining load
             */
            projected_load = remaining_load;
            projected_load -= ((0.0 + messages) / total) * load_value;

#ifdef THORIUM_SCHEDULER_DEBUG
            printf(" TESTING actor %d, production was %d, projected_load is %d (- %d * (1 - %d/%d)\n",
                            actor_name, messages, projected_load,
                            load_value, messages, total);
#endif

            /* An actor without any queued messages should not be migrated
             */
            if (messages > 0
                            && ((with_maximum > 0 && messages == maximum) || messages < maximum)
                /*
                 * Avoid removing too many actors because
                 * generating a stalled one is not desired
                 */
                    && (projected_load >= load_percentile_50

                /*
                 * The previous rule does not apply when there
                 * are 2 actors.
                 */
                   || with_messages == 2) ) {

                remaining_load = projected_load;

                candidates++;

                if (messages == maximum) {
                    --with_maximum;
                }


                core_pair_init(&pair, messages, actor_name);
                core_vector_push_back(&actors_to_migrate, &pair);

#ifdef THORIUM_SCHEDULER_DEBUG
                printf("early CANDIDATE for migration: actor %d, worker %d\n",
                                actor_name, old_worker);
#endif
            }
        }
        core_map_iterator_destroy(&set_iterator);

    }

    core_vector_iterator_destroy(&vector_iterator);

    /* Sort the candidates
     */

    /*
    core_vector_sort_int(&actors_to_migrate);

    printf("Percentiles for production: ");
    core_statistics_print_percentiles_int(&actors_to_migrate);
    */

    /* Sort them in reverse order.
     */
    core_vector_sort_int_reverse(&actors_to_migrate);

    core_vector_iterator_init(&vector_iterator, &actors_to_migrate);

    /* For each highly active actor,
     * try to match it with a stalled worker
     */
    while (core_vector_iterator_get_next_value(&vector_iterator, &pair)) {

        actor_name = core_pair_get_second(&pair);

        actor = thorium_node_get_actor_from_name(thorium_worker_pool_get_node(self->pool), actor_name);

        if (actor == NULL) {
           continue;
        }

        messages = thorium_balancer_get_actor_production(self, actor);
        old_worker = thorium_actor_assigned_worker(actor);

        worker = thorium_worker_pool_get_worker(self->pool, old_worker);

        /* old_total can not be 0 because otherwise the would not
         * be burdened.
         */
        old_total = thorium_worker_get_production(worker, self);
        with_messages = thorium_worker_get_producer_count(worker, self);
        old_load = thorium_worker_get_scheduling_epoch_load(worker) * SCHEDULER_PRECISION;
        actor_load = ((0.0 + messages) / old_total) * old_load;

        /* Try to find a stalled worker that can take it.
         */

        test_stalled_index = stalled_index;
        tests = 0;
        predicted_new_load = 0;

        found_match = 0;
        while (tests < stalled_count) {

            core_vector_get_value(&stalled_workers, test_stalled_index, &pair);
            new_worker_index = core_pair_get_second(&pair);

            new_worker = thorium_worker_pool_get_worker(self->pool, new_worker_index);
            new_load = thorium_worker_get_scheduling_epoch_load(new_worker) * SCHEDULER_PRECISION;
        /*new_total = thorium_worker_get_production(new_worker);*/

            predicted_new_load = new_load + actor_load;

            if (predicted_new_load > SCHEDULER_PRECISION /* && with_messages != 2 */) {
#ifdef THORIUM_SCHEDULER_DEBUG
                printf("Scheduler: skipping actor %d, predicted load is %d >= 100\n",
                           actor_name, predicted_new_load);
#endif

                ++tests;
                ++test_stalled_index;

                if (test_stalled_index == stalled_count) {
                    test_stalled_index = 0;
                }
                continue;
            }

            /* Otherwise, this stalled worker is fine...
             */
            stalled_index = test_stalled_index;
            found_match = 1;

            break;
        }

        /* This actor can not be migrated to any stalled worker.
         */
        if (!found_match) {
            continue;
        }

        /* Otherwise, update the load of the stalled one and go forward with the change.
         */

        pair_pointer = (struct core_pair *)core_vector_at(&stalled_workers, stalled_index);

        core_pair_set_first(pair_pointer, predicted_new_load);

        ++stalled_index;

        if (stalled_index == stalled_count) {
            stalled_index = 0;
        }


#if 0
        new_worker = thorium_worker_pool_get_worker(pool, new_worker_index);
        printf(" CANDIDATE: actor %d old worker %d (%d - %d = %d) new worker %d (%d + %d = %d)\n",
                        actor_name,
                        old_worker, value, messages, 2new_score,
                        new_worker_index, new_worker_old_score, messages, new_worker_new_score);
#endif

        thorium_migration_init(&migration, actor_name, old_worker, new_worker_index);
        core_vector_push_back(&migrations, &migration);
        thorium_migration_destroy(&migration);

    }

    core_vector_iterator_destroy(&vector_iterator);

    core_vector_destroy(&stalled_workers);
    core_vector_destroy(&burdened_workers);
    core_vector_destroy(&loads);
    core_vector_destroy(&loads_unsorted);
    core_vector_destroy(&actors_to_migrate);

    /* Update the last values
     */
    for (i = 0; i < thorium_worker_pool_worker_count(self->pool); i++) {

        worker = thorium_worker_pool_get_worker(self->pool, i);

        set = thorium_worker_get_actors(worker);

        core_map_iterator_init(&set_iterator, set);

        while (core_map_iterator_get_next_key_and_value(&set_iterator, &actor_name, NULL)) {
            actor = thorium_node_get_actor_from_name(thorium_worker_pool_get_node(self->pool), actor_name);
            thorium_balancer_update_actor_production(self, actor);
        }
        core_map_iterator_destroy(&set_iterator);

        thorium_worker_reset_scheduling_epoch(worker);
    }

#ifdef THORIUM_SCHEDULER_ENABLE_SYMMETRIC_SCHEDULING
    /* Generate migrations for symmetric actors.
     */

    thorium_balancer_generate_symmetric_migrations(self, &symmetric_actor_scripts, &migrations);
#endif

    /* Actually do the migrations
     */
    core_vector_iterator_init(&vector_iterator, &migrations);

    while (core_vector_iterator_next(&vector_iterator, (void **)&migration_to_do)) {

        thorium_balancer_migrate(self, migration_to_do);
    }

    core_vector_iterator_destroy(&vector_iterator);

    self->last_migrations = core_vector_size(&migrations);

    core_vector_destroy(&migrations);

#ifdef THORIUM_WORKER_ENABLE_LOCK
    /* Unlock all workers
     */
    for (i = 0; i < thorium_worker_pool_worker_count(self->pool); i++) {
        worker = thorium_worker_pool_get_worker(self->pool, i);

        thorium_worker_unlock(worker);
    }
#endif

#ifdef THORIUM_SCHEDULER_ENABLE_SYMMETRIC_SCHEDULING
    core_map_destroy(&symmetric_actor_scripts);
#endif

    core_timer_stop(&timer);

    printf("SCHEDULER: elapsed time for balancing: %d us, %d migrations performed\n",
                    (int)(core_timer_get_elapsed_nanoseconds(&timer) / 1000),
                    self->last_migrations);
}
Esempio n. 7
0
void biosal_assembly_graph_store_yield_reply_summary(struct thorium_actor *self, struct thorium_message *message)
{
    struct biosal_assembly_graph_store *concrete_self;
    int limit;
    int processed;
    int new_count;
    void *new_buffer;
    struct biosal_assembly_vertex *vertex;
    int coverage;
    int parent_count;
    int child_count;
    /*int child_count;*/

    concrete_self = thorium_actor_concrete_actor(self);

    limit = 4321;
    processed = 0;

    /*
     * This loop gather canonical information only.
     */
    while (processed < limit
                    && core_map_iterator_has_next(&concrete_self->iterator)) {

        core_map_iterator_next(&concrete_self->iterator, NULL, (void **)&vertex);

        coverage = biosal_assembly_vertex_coverage_depth(vertex);

        parent_count = biosal_assembly_vertex_parent_count(vertex);
        child_count = biosal_assembly_vertex_child_count(vertex);

        /*
         * Don't count any real arc twice.
         */
        /*
        child_count = biosal_assembly_vertex_child_count(vertex);
        concrete_self->arc_count += child_count;
        */

        /*
         * Gather degree information too !
         */

        biosal_assembly_graph_summary_add(&concrete_self->graph_summary, coverage, parent_count, child_count);
        biosal_assembly_graph_summary_add(&concrete_self->graph_summary, coverage, child_count, parent_count);

        ++processed;
    }

    if (core_map_iterator_has_next(&concrete_self->iterator)) {

        thorium_actor_send_to_self_empty(self, ACTION_YIELD);

    } else {

        /*
         * Send the answer
         */

        new_count = biosal_assembly_graph_summary_pack_size(&concrete_self->graph_summary);
        new_buffer = thorium_actor_allocate(self, new_count);
        biosal_assembly_graph_summary_pack(&concrete_self->graph_summary, new_buffer);
        thorium_actor_send_buffer(self, concrete_self->source_for_summary,
                        ACTION_ASSEMBLY_GET_SUMMARY_REPLY, new_count, new_buffer);

        /*
         * Reset the iterator.
         */

        core_map_iterator_destroy(&concrete_self->iterator);
        core_map_iterator_init(&concrete_self->iterator, &concrete_self->table);
    }
}
Esempio n. 8
0
void biosal_assembly_graph_store_push_kmer_block(struct thorium_actor *self, struct thorium_message *message)
{
    struct core_memory_pool *ephemeral_memory;
    struct biosal_dna_kmer_frequency_block block;
    struct biosal_assembly_vertex *bucket;
    void *packed_kmer;
    struct core_map_iterator iterator;
    struct biosal_assembly_graph_store *concrete_self;
    /*int tag;*/
    void *key;
    struct core_map *kmers;
    struct biosal_dna_kmer kmer;
    void *buffer;
    int count;
    struct biosal_dna_kmer encoded_kmer;
    char *raw_kmer;
    int period;
    struct biosal_dna_kmer *kmer_pointer;
    int *frequency;

    ephemeral_memory = thorium_actor_get_ephemeral_memory(self);
    concrete_self = thorium_actor_concrete_actor(self);
    /*tag = thorium_message_action(message);*/
    buffer = thorium_message_buffer(message);
    count = thorium_message_count(message);

    /*
     * Handler for PUSH_DATA
     */

    biosal_dna_kmer_frequency_block_init(&block, concrete_self->kmer_length,
                    ephemeral_memory, &concrete_self->transport_codec, 0);

    biosal_dna_kmer_frequency_block_unpack(&block, buffer, ephemeral_memory,
                    &concrete_self->transport_codec);

    key = core_memory_pool_allocate(ephemeral_memory, concrete_self->key_length_in_bytes);

    kmers = biosal_dna_kmer_frequency_block_kmers(&block);
    core_map_iterator_init(&iterator, kmers);

    period = 2500000;

    raw_kmer = core_memory_pool_allocate(thorium_actor_get_ephemeral_memory(self),
                    concrete_self->kmer_length + 1);

    if (!concrete_self->printed_vertex_size) {

        printf("DEBUG VERTEX DELIVERY %d bytes\n", count);

        concrete_self->printed_vertex_size = 1;
    }

    while (core_map_iterator_has_next(&iterator)) {

        /*
         * add kmers to store
         */
        core_map_iterator_next(&iterator, (void **)&packed_kmer, (void **)&frequency);

        /* Store the kmer in 2 bit encoding
         */

        biosal_dna_kmer_init_empty(&kmer);
        biosal_dna_kmer_unpack(&kmer, packed_kmer, concrete_self->kmer_length,
                    ephemeral_memory,
                    &concrete_self->transport_codec);

        kmer_pointer = &kmer;

        if (concrete_self->codec_are_different) {
            /*
             * Get a copy of the sequence
             */
            biosal_dna_kmer_get_sequence(kmer_pointer, raw_kmer, concrete_self->kmer_length,
                        &concrete_self->transport_codec);


            biosal_dna_kmer_init(&encoded_kmer, raw_kmer, &concrete_self->storage_codec,
                        thorium_actor_get_ephemeral_memory(self));
            kmer_pointer = &encoded_kmer;
        }

        biosal_dna_kmer_pack_store_key(kmer_pointer, key,
                        concrete_self->kmer_length, &concrete_self->storage_codec,
                        thorium_actor_get_ephemeral_memory(self));

#ifdef BIOSAL_DEBUG_ISSUE_540
        if (strcmp(raw_kmer, "AGCTGGTAGTCATCACCAGACTGGAACAG") == 0
                        || strcmp(raw_kmer, "CGCGATCTGTTGCTGGGCCTAACGTGGTA") == 0
                        || strcmp(raw_kmer, "TACCACGTTAGGCCCAGCAACAGATCGCG") == 0) {
            printf("Examine store key for %s\n", raw_kmer);

            core_debugger_examine(key, concrete_self->key_length_in_bytes);
        }
#endif

        bucket = core_map_get(&concrete_self->table, key);

        if (bucket == NULL) {
            /* This is the first time that this kmer is seen.
             */
            bucket = core_map_add(&concrete_self->table, key);

            biosal_assembly_vertex_init(bucket);

#if 0
            printf("DEBUG303 ADD_KEY");
            biosal_dna_kmer_print(&encoded_kmer, concrete_self->kmer_length,
                            &concrete_self->storage_codec, ephemeral_memory);
#endif
        }

        if (concrete_self->codec_are_different) {
            biosal_dna_kmer_destroy(&encoded_kmer,
                        thorium_actor_get_ephemeral_memory(self));
        }

        biosal_dna_kmer_destroy(&kmer, ephemeral_memory);

        biosal_assembly_vertex_increase_coverage_depth(bucket, *frequency);

        if (concrete_self->received >= concrete_self->last_received + period) {
            printf("%s/%d received %" PRIu64 " kmers so far,"
                            " store has %" PRIu64 " canonical kmers, %" PRIu64 " kmers\n",
                        thorium_actor_script_name(self),
                            thorium_actor_name(self), concrete_self->received,
                            core_map_size(&concrete_self->table),
                            2 * core_map_size(&concrete_self->table));

            concrete_self->last_received = concrete_self->received;
        }

        concrete_self->received += *frequency;
    }

    core_memory_pool_free(ephemeral_memory, key);
    core_memory_pool_free(ephemeral_memory, raw_kmer);

    core_map_iterator_destroy(&iterator);
    biosal_dna_kmer_frequency_block_destroy(&block, thorium_actor_get_ephemeral_memory(self));

    thorium_actor_send_reply_empty(self, ACTION_PUSH_KMER_BLOCK_REPLY);
}
Esempio n. 9
0
void biosal_assembly_graph_store_yield_reply(struct thorium_actor *self, struct thorium_message *message)
{
    struct biosal_dna_kmer kmer;
    void *key;
    struct biosal_assembly_vertex *value;
    int coverage;
    int customer;
    uint64_t *count;
    int new_count;
    void *new_buffer;
    struct thorium_message new_message;
    struct core_memory_pool *ephemeral_memory;
    struct biosal_assembly_graph_store *concrete_self;
    int i;
    int max;

    ephemeral_memory = thorium_actor_get_ephemeral_memory(self);
    concrete_self = thorium_actor_concrete_actor(self);
    customer = concrete_self->customer;

#if 0
    printf("YIELD REPLY\n");
#endif

    i = 0;
    max = 1024;

    key = NULL;
    value = NULL;

    while (i < max
                    && core_map_iterator_has_next(&concrete_self->iterator)) {

        core_map_iterator_next(&concrete_self->iterator, (void **)&key, (void **)&value);

        biosal_dna_kmer_init_empty(&kmer);
        biosal_dna_kmer_unpack(&kmer, key, concrete_self->kmer_length,
                        ephemeral_memory,
                        &concrete_self->storage_codec);

        coverage = biosal_assembly_vertex_coverage_depth(value);

        count = (uint64_t *)core_map_get(&concrete_self->coverage_distribution, &coverage);

        if (count == NULL) {

            count = (uint64_t *)core_map_add(&concrete_self->coverage_distribution, &coverage);

            (*count) = 0;
        }

        /* increment for the lowest kmer (canonical) */
        (*count)++;

        biosal_dna_kmer_destroy(&kmer, ephemeral_memory);

        ++i;
    }

    /* yield again if the iterator is not at the end
     */
    if (core_map_iterator_has_next(&concrete_self->iterator)) {

#if 0
        printf("yield ! %d\n", i);
#endif

        thorium_actor_send_to_self_empty(self, ACTION_YIELD);

        return;
    }

    /*
    printf("ready...\n");
    */

    core_map_iterator_destroy(&concrete_self->iterator);

    new_count = core_map_pack_size(&concrete_self->coverage_distribution);

    new_buffer = thorium_actor_allocate(self, new_count);

    core_map_pack(&concrete_self->coverage_distribution, new_buffer);

    printf("SENDING %s/%d sends map to %d, %d bytes / %d entries\n",
                        thorium_actor_script_name(self),
                    thorium_actor_name(self),
                    customer, new_count,
                    (int)core_map_size(&concrete_self->coverage_distribution));

    thorium_message_init(&new_message, ACTION_PUSH_DATA, new_count, new_buffer);

    thorium_actor_send(self, customer, &new_message);
    thorium_message_destroy(&new_message);

    core_map_destroy(&concrete_self->coverage_distribution);

    thorium_actor_send_empty(self, concrete_self->source,
                            ACTION_PUSH_DATA_REPLY);
}
Esempio n. 10
0
void biosal_assembly_graph_store_print(struct thorium_actor *self)
{
    struct core_map_iterator iterator;
    struct biosal_dna_kmer kmer;
    void *key;
    struct biosal_assembly_vertex *value;
    int coverage;
    char *sequence;
    struct biosal_assembly_graph_store *concrete_self;
    int maximum_length;
    int length;
    struct core_memory_pool *ephemeral_memory;

    ephemeral_memory = thorium_actor_get_ephemeral_memory(self);
    concrete_self = thorium_actor_concrete_actor(self);

    core_map_iterator_init(&iterator, &concrete_self->table);

    printf("map size %d\n", (int)core_map_size(&concrete_self->table));

    maximum_length = 0;

    while (core_map_iterator_has_next(&iterator)) {
        core_map_iterator_next(&iterator, (void **)&key, (void **)&value);

        biosal_dna_kmer_init_empty(&kmer);
        biosal_dna_kmer_unpack(&kmer, key, concrete_self->kmer_length,
                        thorium_actor_get_ephemeral_memory(self),
                        &concrete_self->storage_codec);

        length = biosal_dna_kmer_length(&kmer, concrete_self->kmer_length);

        /*
        printf("length %d\n", length);
        */
        if (length > maximum_length) {
            maximum_length = length;
        }
        biosal_dna_kmer_destroy(&kmer, thorium_actor_get_ephemeral_memory(self));
    }

    /*
    printf("MAx length %d\n", maximum_length);
    */

    sequence = core_memory_pool_allocate(ephemeral_memory, maximum_length + 1);
    sequence[0] = '\0';
    core_map_iterator_destroy(&iterator);
    core_map_iterator_init(&iterator, &concrete_self->table);

    while (core_map_iterator_has_next(&iterator)) {
        core_map_iterator_next(&iterator, (void **)&key, (void **)&value);

        biosal_dna_kmer_init_empty(&kmer);
        biosal_dna_kmer_unpack(&kmer, key, concrete_self->kmer_length,
                        thorium_actor_get_ephemeral_memory(self),
                        &concrete_self->storage_codec);

        biosal_dna_kmer_get_sequence(&kmer, sequence, concrete_self->kmer_length,
                        &concrete_self->storage_codec);

        coverage = biosal_assembly_vertex_coverage_depth(value);

        printf("Sequence %s Coverage %d\n", sequence, coverage);

        biosal_dna_kmer_destroy(&kmer, thorium_actor_get_ephemeral_memory(self));
    }

    core_map_iterator_destroy(&iterator);
    core_memory_pool_free(ephemeral_memory, sequence);
}
Esempio n. 11
0
void biosal_coverage_distribution_receive(struct thorium_actor *self, struct thorium_message *message)
{
    int tag;
    struct core_map map;
    struct core_map_iterator iterator;
    int *coverage_from_message;
    uint64_t *count_from_message;
    uint64_t *frequency;
    int count;
    void *buffer;
    struct biosal_coverage_distribution *concrete_actor;
    int name;
    int source;
    struct core_memory_pool *ephemeral_memory;

    ephemeral_memory = thorium_actor_get_ephemeral_memory(self);
    name = thorium_actor_name(self);
    source = thorium_message_source(message);
    concrete_actor = (struct biosal_coverage_distribution *)thorium_actor_concrete_actor(self);
    tag = thorium_message_action(message);
    count = thorium_message_count(message);
    buffer = thorium_message_buffer(message);

    if (tag == ACTION_PUSH_DATA) {

        core_map_init(&map, 0, 0);
        core_map_set_memory_pool(&map, ephemeral_memory);
        core_map_unpack(&map, buffer);

        core_map_iterator_init(&iterator, &map);


        while (core_map_iterator_has_next(&iterator)) {

            core_map_iterator_next(&iterator, (void **)&coverage_from_message,
                            (void **)&count_from_message);

#ifdef BIOSAL_COVERAGE_DISTRIBUTION_DEBUG
            thorium_actor_log(self, "DEBUG DATA %d %d\n", (int)*coverage_from_message, (int)*count_from_message);
#endif

            frequency = core_map_get(&concrete_actor->distribution, coverage_from_message);

            if (frequency == NULL) {

                frequency = core_map_add(&concrete_actor->distribution, coverage_from_message);

                (*frequency) = 0;
            }

            (*frequency) += (*count_from_message);
        }

        core_map_iterator_destroy(&iterator);

        thorium_actor_send_reply_empty(self, ACTION_PUSH_DATA_REPLY);

        concrete_actor->actual++;

        thorium_actor_log(self, "distribution/%d receives coverage data from producer/%d, %d entries / %d bytes %d/%d\n",
                        name, source, (int)core_map_size(&map), count,
                        concrete_actor->actual, concrete_actor->expected);

        if (concrete_actor->expected != 0 && concrete_actor->expected == concrete_actor->actual) {

            thorium_actor_log(self, "received everything %d/%d\n", concrete_actor->actual, concrete_actor->expected);

            biosal_coverage_distribution_write_distribution(self);

            thorium_actor_send_empty(self, concrete_actor->source,
                            ACTION_NOTIFY);
        }

        core_map_destroy(&map);

    } else if (tag == ACTION_ASK_TO_STOP) {

        biosal_coverage_distribution_ask_to_stop(self, message);

    } else if (tag == ACTION_SET_EXPECTED_MESSAGE_COUNT) {

        concrete_actor->source = source;
        thorium_message_unpack_int(message, 0, &concrete_actor->expected);

        thorium_actor_log(self, "distribution %d expects %d messages\n",
                        thorium_actor_name(self),
                        concrete_actor->expected);

        thorium_actor_send_reply_empty(self, ACTION_SET_EXPECTED_MESSAGE_COUNT_REPLY);
    }
}
Esempio n. 12
0
void thorium_balancer_detect_symmetric_scripts(struct thorium_balancer *self, struct core_map *symmetric_actor_scripts)
{
    int i;
    struct thorium_worker *worker;
    struct thorium_actor *actor;
    struct core_map_iterator iterator;
    struct core_map *set;
    int actor_name;
    struct thorium_node *node;
    int script;
    int frequency;
    struct core_map frequencies;
    int worker_count;
    int population_per_worker;
#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
    struct thorium_script *actual_script;
#endif

    worker_count = thorium_worker_pool_worker_count(self->pool);
    core_map_init(&frequencies, sizeof(int), sizeof(int));

    node = thorium_worker_pool_get_node(self->pool);

    /* Gather frequencies
     */
    for (i = 0; i < worker_count; i++) {

        worker = thorium_worker_pool_get_worker(self->pool, i);

        set = thorium_worker_get_actors(worker);

        core_map_iterator_init(&iterator, set);

        while (core_map_iterator_get_next_key_and_value(&iterator, &actor_name, NULL)) {
            actor = thorium_node_get_actor_from_name(node, actor_name);

            if (actor == NULL) {
                continue;
            }
            script = thorium_actor_script(actor);

            frequency = 0;

            if (!core_map_get_value(&frequencies, &script, &frequency)) {
                core_map_add_value(&frequencies, &script, &frequency);
            }

            ++frequency;

            core_map_update_value(&frequencies, &script, &frequency);
        }

        core_map_iterator_destroy(&iterator);
    }

    /*
     * Detect symmetric scripts
     */
    core_map_iterator_init(&iterator, &frequencies);

    while (core_map_iterator_get_next_key_and_value(&iterator, &script, &frequency)) {

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
        actual_script = thorium_node_find_script(node, script);
#endif

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
        printf("SCHEDULER test symmetry %s %d\n",
                        thorium_script_description(actual_script),
                        frequency);
#endif

        if (frequency % worker_count == 0) {
            population_per_worker = frequency / worker_count;

            core_map_add_value(symmetric_actor_scripts, &script, &population_per_worker);

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
            printf("SCHEDULER: script %s is symmetric, worker_count: %d, population_per_worker: %d\n",
                            thorium_script_description(actual_script),
                            worker_count,
                            population_per_worker);
#endif
        }
    }

    core_map_iterator_destroy(&iterator);

    core_map_destroy(&frequencies);
}
Esempio n. 13
0
void biosal_coverage_distribution_write_distribution(struct thorium_actor *self)
{
    struct core_map_iterator iterator;
    int *coverage;
    uint64_t *canonical_frequency;
    uint64_t frequency;
    struct biosal_coverage_distribution *concrete_actor;
    struct core_vector coverage_values;
    struct core_vector_iterator vector_iterator;
    struct core_buffered_file_writer descriptor;
    struct core_buffered_file_writer descriptor_canonical;
    struct core_string file_name;
    struct core_string canonical_file_name;
    int argc;
    char **argv;
    int name;
    char *directory_name;

    name = thorium_actor_name(self);
    argc = thorium_actor_argc(self);
    argv = thorium_actor_argv(self);

    directory_name = biosal_command_get_output_directory(argc, argv);

    /* Create the directory if it does not exist
     */

    if (!core_directory_verify_existence(directory_name)) {

        core_directory_create(directory_name);
    }

    core_string_init(&file_name, "");
    core_string_append(&file_name, directory_name);
    core_string_append(&file_name, "/");
    core_string_append(&file_name, BIOSAL_COVERAGE_DISTRIBUTION_DEFAULT_OUTPUT_FILE);

    core_string_init(&canonical_file_name, "");
    core_string_append(&canonical_file_name, directory_name);
    core_string_append(&canonical_file_name, "/");
    core_string_append(&canonical_file_name, BIOSAL_COVERAGE_DISTRIBUTION_DEFAULT_OUTPUT_FILE_CANONICAL);

    core_buffered_file_writer_init(&descriptor, core_string_get(&file_name));
    core_buffered_file_writer_init(&descriptor_canonical, core_string_get(&canonical_file_name));

    concrete_actor = (struct biosal_coverage_distribution *)thorium_actor_concrete_actor(self);

    core_vector_init(&coverage_values, sizeof(int));
    core_map_iterator_init(&iterator, &concrete_actor->distribution);

#ifdef BIOSAL_COVERAGE_DISTRIBUTION_DEBUG
    thorium_actor_log(self, "map size %d\n", (int)core_map_size(&concrete_actor->distribution));
#endif

    while (core_map_iterator_has_next(&iterator)) {
        core_map_iterator_next(&iterator, (void **)&coverage, (void **)&canonical_frequency);

#ifdef BIOSAL_COVERAGE_DISTRIBUTION_DEBUG
        thorium_actor_log(self, "DEBUG COVERAGE %d FREQUENCY %" PRIu64 "\n", *coverage, *frequency);
#endif

        core_vector_push_back(&coverage_values, coverage);
    }

    core_map_iterator_destroy(&iterator);

    core_vector_sort_int(&coverage_values);

#ifdef BIOSAL_COVERAGE_DISTRIBUTION_DEBUG
    thorium_actor_log(self, "after sort ");
    core_vector_print_int(&coverage_values);
    thorium_actor_log(self, "\n");
#endif

    core_vector_iterator_init(&vector_iterator, &coverage_values);

#if 0
    core_buffered_file_writer_printf(&descriptor_canonical, "Coverage\tFrequency\n");
#endif

    core_buffered_file_writer_printf(&descriptor, "Coverage\tFrequency\n");
#ifdef BIOSAL_COVERAGE_DISTRIBUTION_DEBUG
#endif

    while (core_vector_iterator_has_next(&vector_iterator)) {

        core_vector_iterator_next(&vector_iterator, (void **)&coverage);

        canonical_frequency = (uint64_t *)core_map_get(&concrete_actor->distribution, coverage);

        frequency = 2 * *canonical_frequency;

        core_buffered_file_writer_printf(&descriptor_canonical, "%d %" PRIu64 "\n",
                        *coverage,
                        *canonical_frequency);

        core_buffered_file_writer_printf(&descriptor, "%d\t%" PRIu64 "\n",
                        *coverage,
                        frequency);
    }

    core_vector_destroy(&coverage_values);
    core_vector_iterator_destroy(&vector_iterator);

    thorium_actor_log(self, "distribution %d wrote %s\n", name, core_string_get(&file_name));
    thorium_actor_log(self, "distribution %d wrote %s\n", name, core_string_get(&canonical_file_name));

    core_buffered_file_writer_destroy(&descriptor);
    core_buffered_file_writer_destroy(&descriptor_canonical);

    core_string_destroy(&file_name);
    core_string_destroy(&canonical_file_name);
}
Esempio n. 14
0
void thorium_balancer_generate_symmetric_migrations(struct thorium_balancer *self, struct core_map *symmetric_actor_scripts,
                struct core_vector *migrations)
{
    int i;
    int worker_count;
    struct thorium_worker *worker;
    struct core_map *set;
    struct core_map_iterator iterator;
    struct thorium_migration migration;
    struct core_map script_current_worker;
    struct core_map script_current_worker_actor_count;
    int frequency;
    int current_worker;
    int current_worker_actor_count;
    int old_worker;
#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
    struct thorium_script *actual_script;
#endif
    struct thorium_node *node;
    int actor_name;
    int script;
    int new_worker;
    struct thorium_actor *actor;
    int enabled;

    /* Gather symmetric actors:
     */

#ifdef THORIUM_SCHEDULER_ENABLE_SYMMETRIC_SCHEDULING
    enabled = 1;
#else
    enabled = 0;
#endif

    core_map_init(&script_current_worker, sizeof(int), sizeof(int));
    core_map_init(&script_current_worker_actor_count, sizeof(int), sizeof(int));

    node = thorium_worker_pool_get_node(self->pool);
    worker_count = thorium_worker_pool_worker_count(self->pool);

    for (i = 0; i < worker_count; i++) {

        worker = thorium_worker_pool_get_worker(self->pool, i);

        set = thorium_worker_get_actors(worker);

        core_map_iterator_init(&iterator, set);

        while (core_map_iterator_get_next_key_and_value(&iterator, &actor_name, NULL)) {
            actor = thorium_node_get_actor_from_name(node, actor_name);

            if (actor == NULL) {
                continue;
            }

            script = thorium_actor_script(actor);

            /*
             * Check if the actor is symmetric
             */
            if (core_map_get_value(symmetric_actor_scripts, &script, &frequency)) {

                current_worker = 0;
                if (!core_map_get_value(&script_current_worker, &script, &current_worker)) {
                    core_map_add_value(&script_current_worker, &script, &current_worker);
                }
                current_worker_actor_count = 0;
                if (!core_map_get_value(&script_current_worker_actor_count, &script, &current_worker_actor_count)) {
                    core_map_add_value(&script_current_worker_actor_count, &script, &current_worker_actor_count);
                }

                /*
                 * Emit migration instruction
                 */

                old_worker = thorium_balancer_get_actor_worker(self, actor_name);
                new_worker = current_worker;
#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
                actual_script = thorium_node_find_script(node, script);
#endif

                if (enabled && old_worker != new_worker) {
                    thorium_migration_init(&migration, actor_name, old_worker, new_worker);
                    core_vector_push_back(migrations, &migration);
                    thorium_migration_destroy(&migration);

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
                    printf("[EMIT] ");
#endif
                } else {
#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
                    printf("[MOCK] ");
#endif
                }

#ifdef THORIUM_SCHEDULER_ENABLE_VERBOSITY
                printf("SCHEDULER -> symmetric placement... %s/%d scheduled for execution on worker/%d of node/%d\n",
                                thorium_script_description(actual_script),
                                actor_name,
                                new_worker,
                                thorium_node_name(node));
#endif

                ++current_worker_actor_count;
                core_map_update_value(&script_current_worker_actor_count, &script, &current_worker_actor_count);

                /* The current worker is full.
                 * Increment the current worker and set the
                 * worker actor count to 0.
                 */
                if (current_worker_actor_count == frequency) {
                    ++current_worker;
                    core_map_update_value(&script_current_worker, &script, &current_worker);
                    current_worker_actor_count = 0;
                    core_map_update_value(&script_current_worker_actor_count, &script, &current_worker_actor_count);
                }
            }

        }

        core_map_iterator_destroy(&iterator);
    }

    core_map_destroy(&script_current_worker);
    core_map_destroy(&script_current_worker_actor_count);
}
Esempio n. 15
0
void thorium_worker_print_actors(struct thorium_worker *worker, struct thorium_balancer *scheduler)
{
    struct core_map_iterator iterator;
    int name;
    int count;
    struct thorium_actor *actor;
    int producers;
    int consumers;
    int received;
    int difference;
    int script;
    struct core_map distribution;
    int frequency;
    struct thorium_script *script_object;
    int dead;
    int node_name;
    int worker_name;
    int previous_amount;

    node_name = thorium_node_name(worker->node);
    worker_name = worker->name;

    core_map_iterator_init(&iterator, &worker->actors);

    printf("node/%d worker/%d %d queued messages, received: %d busy: %d load: %f ring: %d scheduled actors: %d/%d\n",
                    node_name, worker_name,
                    thorium_worker_get_scheduled_message_count(worker),
                    thorium_worker_get_sum_of_received_actor_messages(worker),
                    thorium_worker_is_busy(worker),
                    thorium_worker_get_scheduling_epoch_load(worker),
                    core_fast_ring_size_from_producer(&worker->actors_to_schedule),
                    thorium_scheduler_size(&worker->scheduler),
                    (int)core_map_size(&worker->actors));

    core_map_init(&distribution, sizeof(int), sizeof(int));

    while (core_map_iterator_get_next_key_and_value(&iterator, &name, NULL)) {

        actor = thorium_node_get_actor_from_name(worker->node, name);

        if (actor == NULL) {
            continue;
        }

        dead = thorium_actor_dead(actor);

        if (dead) {
            continue;
        }

        count = thorium_actor_get_mailbox_size(actor);
        received = thorium_actor_get_sum_of_received_messages(actor);
        producers = core_map_size(thorium_actor_get_received_messages(actor));
        consumers = core_map_size(thorium_actor_get_sent_messages(actor));
        previous_amount = 0;

        core_map_get_value(&worker->actor_received_messages, &name,
                        &previous_amount);
        difference = received - previous_amount;;

        if (!core_map_update_value(&worker->actor_received_messages, &name,
                        &received)) {
            core_map_add_value(&worker->actor_received_messages, &name, &received);
        }

        printf("  [%s/%d] mailbox: %d received: %d (+%d) producers: %d consumers: %d\n",
                        thorium_actor_script_name(actor),
                        name, count, received,
                       difference,
                       producers, consumers);

        script = thorium_actor_script(actor);

        if (core_map_get_value(&distribution, &script, &frequency)) {
            ++frequency;
            core_map_update_value(&distribution, &script, &frequency);
        } else {
            frequency = 1;
            core_map_add_value(&distribution, &script, &frequency);
        }
    }

    /*printf("\n");*/
    core_map_iterator_destroy(&iterator);

    core_map_iterator_init(&iterator, &distribution);

    printf("node/%d worker/%d Frequency list\n", node_name, worker_name);

    while (core_map_iterator_get_next_key_and_value(&iterator, &script, &frequency)) {

        script_object = thorium_node_find_script(worker->node, script);

        CORE_DEBUGGER_ASSERT(script_object != NULL);

        printf("node/%d worker/%d Frequency %s => %d\n",
                        node_name,
                        worker->name,
                        thorium_script_name(script_object),
                        frequency);
    }

    core_map_iterator_destroy(&iterator);
    core_map_destroy(&distribution);
}
Esempio n. 16
0
/* This can only be called from the CONSUMER
 */
int thorium_worker_dequeue_actor(struct thorium_worker *worker, struct thorium_actor **actor)
{
    int value;
    int name;
    struct thorium_actor *other_actor;
    int other_name;
    int operations;
    int status;
    int mailbox_size;

    operations = 4;
    other_actor = NULL;

    /* Move an actor from the ring to the real actor scheduling queue
     */
    while (operations--
                    && core_fast_ring_pop_from_consumer(&worker->actors_to_schedule, &other_actor)) {

#ifdef CORE_DEBUGGER_ENABLE_ASSERT
        if (other_actor == NULL) {
            printf("NULL pointer pulled from ring, operations %d ring size %d\n",
                            operations, core_fast_ring_size_from_consumer(&worker->actors_to_schedule));
        }
#endif

        CORE_DEBUGGER_ASSERT(other_actor != NULL);

        other_name = thorium_actor_name(other_actor);

#ifdef THORIUM_WORKER_DEBUG_SCHEDULER
        printf("ring.DEQUEUE %d\n", other_name);
#endif

        if (core_set_find(&worker->evicted_actors, &other_name)) {

#ifdef THORIUM_WORKER_DEBUG_SCHEDULER
            printf("ALREADY EVICTED\n");
#endif
            continue;
        }

        if (!core_map_get_value(&worker->actors, &other_name, &status)) {
            /* Add the actor to the list of actors.
             * This does nothing if it is already in the list.
             */

            status = STATUS_IDLE;
            core_map_add_value(&worker->actors, &other_name, &status);

            core_map_iterator_destroy(&worker->actor_iterator);
            core_map_iterator_init(&worker->actor_iterator, &worker->actors);
        }

        /* If the actor is not queued, queue it
         */
        if (status == STATUS_IDLE) {
            status = STATUS_QUEUED;

            core_map_update_value(&worker->actors, &other_name, &status);

            thorium_scheduler_enqueue(&worker->scheduler, other_actor);
        } else {

#ifdef THORIUM_WORKER_DEBUG_SCHEDULER
            printf("SCHEDULER %d already scheduled to run, scheduled: %d\n", other_name,
                            (int)core_set_size(&worker->queued_actors));
#endif
        }
    }

    /* Now, dequeue an actor from the real queue.
     * If it has more than 1 message, re-enqueue it
     */
    value = thorium_scheduler_dequeue(&worker->scheduler, actor);

    /* Setting name to nobody;
     * check_production at the end uses the value and the name;
     * if value is false, check_production is not using name anyway.
     */
    name = THORIUM_ACTOR_NOBODY;

    /* an actor is ready to be run and it was dequeued from the scheduling queue.
     */
    if (value) {
        name = thorium_actor_name(*actor);

#ifdef THORIUM_WORKER_DEBUG_SCHEDULER
        printf("scheduler.DEQUEUE actor %d, removed from queued actors...\n", name);
#endif

        mailbox_size = thorium_actor_get_mailbox_size(*actor);

        /* The actor has only one message and it is going to
         * be processed now.
         */
        if (mailbox_size == 1) {
#ifdef THORIUM_WORKER_DEBUG_SCHEDULER
            printf("SCHEDULER %d has no message to schedule...\n", name);
#endif
            /* Set the status of the worker to STATUS_IDLE
             *
             * TODO: the ring new tail might not be visible too.
             * That could possibly be a problem...
             */
            status = STATUS_IDLE;
            core_map_update_value(&worker->actors, &name, &status);

        /* The actor still has a lot of messages
         * to process. Keep them coming.
         */
        } else if (mailbox_size >= 2) {

            /* Add the actor to the scheduling queue if it
             * still has messages
             */

#ifdef THORIUM_WORKER_DEBUG_SCHEDULER
            printf("Scheduling actor %d again, messages: %d\n",
                        name,
                        thorium_actor_get_mailbox_size(*actor));
#endif

            /* The status is still STATUS_QUEUED
             */
            thorium_scheduler_enqueue(&worker->scheduler, *actor);


        /* The actor is scheduled to run, but the new tail is not
         * yet visible apparently.
         *
         * Solution, push back the actor in the scheduler queue, it can take a few cycles to see cache changes across cores. (MESIF protocol)
         *
         * This is done below.
         */
        } else /* if (mailbox_size == 0) */ {

            status = STATUS_IDLE;
            core_map_update_value(&worker->actors, &name, &status);

            value = 0;
        }

    }

    thorium_worker_check_production(worker, value, name);

    return value;
}
Esempio n. 17
0
void thorium_worker_destroy(struct thorium_worker *worker)
{
    void *buffer;

    thorium_load_profiler_destroy(&worker->profiler);

    if (core_bitmap_get_bit_uint32_t(&worker->flags, FLAG_ENABLE_ACTOR_LOAD_PROFILER)) {
        core_buffered_file_writer_destroy(&worker->load_profile_writer);
    }

    core_map_destroy(&worker->actor_received_messages);

    /*
    thorium_worker_print_balance(worker);
    */
    while (thorium_worker_fetch_clean_outbound_buffer(worker, &buffer)) {

        core_memory_pool_free(&worker->outbound_message_memory_pool, buffer);

#ifdef THORIUM_WORKER_DEBUG_INJECTION
        ++worker->counter_freed_outbound_buffers_from_other_workers;
#endif
    }

#ifdef THORIUM_WORKER_DEBUG_INJECTION
    printf("AFTER COLLECTION\n");
    thorium_worker_print_balance(worker);

    printf("THORIUM-> clean_message_queue_for_triage has %d items\n",
                    core_fast_queue_size(&worker->clean_message_queue_for_triage));
    printf("THORIUM-> clean_message_ring_for_triage has %d items\n",
                    core_fast_ring_size_from_producer(&worker->clean_message_ring_for_triage));
    printf("THORIUM-> injected_clean_outbound_buffers has %d items\n",
                    core_fast_ring_size_from_producer(&worker->injected_clean_outbound_buffers));
#endif

    core_timer_destroy(&worker->timer);

#ifdef THORIUM_WORKER_ENABLE_LOCK
    core_lock_destroy(&worker->lock);
#endif

    core_fast_ring_destroy(&worker->actors_to_schedule);

#ifdef THORIUM_NODE_INJECT_CLEAN_WORKER_BUFFERS
    core_fast_ring_destroy(&worker->injected_clean_outbound_buffers);

    core_fast_ring_destroy(&worker->clean_message_ring_for_triage);
    core_fast_queue_destroy(&worker->clean_message_queue_for_triage);
#endif

    thorium_scheduler_destroy(&worker->scheduler);
    core_fast_ring_destroy(&worker->outbound_message_queue);
    core_fast_queue_destroy(&worker->outbound_message_queue_buffer);

    core_map_destroy(&worker->actors);
    core_map_iterator_destroy(&worker->actor_iterator);
    core_set_destroy(&worker->evicted_actors);

    worker->node = NULL;

    worker->name = -1;
    core_bitmap_set_bit_uint32_t(&worker->flags, FLAG_DEAD);

    core_memory_pool_destroy(&worker->ephemeral_memory);
    core_memory_pool_destroy(&worker->outbound_message_memory_pool);

    thorium_priority_assigner_destroy(&worker->assigner);
}
Esempio n. 18
0
void thorium_worker_check_production(struct thorium_worker *worker, int value, int name)
{
    uint64_t time;
    uint64_t elapsed;
    struct thorium_actor *other_actor;
    int mailbox_size;
    int status;
    uint64_t threshold;

    /*
     * If no actor is scheduled to run, things are getting out of hand
     * and this is bad for business.
     *
     * So, here, an actor is poked for inactivity
     */
    if (!value) {
        ++worker->ticks_without_production;
    } else {
        worker->ticks_without_production = 0;
    }

    /*
     * If too many cycles were spent doing nothing,
     * check the fast ring since there could be issue in the
     * cache coherency of the CPU, even with the memory fences.
     *
     * This should not happen theoretically.
     *
     */
    if (worker->ticks_without_production >= THORIUM_WORKER_UNPRODUCTIVE_TICK_LIMIT) {

        if (core_map_iterator_get_next_key_and_value(&worker->actor_iterator, &name, NULL)) {

            other_actor = thorium_node_get_actor_from_name(worker->node, name);

            mailbox_size = 0;
            if (other_actor != NULL) {
                mailbox_size = thorium_actor_get_mailbox_size(other_actor);
            }

            if (mailbox_size > 0) {
                thorium_scheduler_enqueue(&worker->scheduler, other_actor);

                status = STATUS_QUEUED;
                core_map_update_value(&worker->actors, &name, &status);
            }
        } else {

            /* Rewind the iterator.
             */
            core_map_iterator_destroy(&worker->actor_iterator);
            core_map_iterator_init(&worker->actor_iterator, &worker->actors);

            /*worker->ticks_without_production = 0;*/
        }

    /*
     * If there is still nothing, tell the operating system that the thread
     * needs to sleep.
     *
     * The operating system is:
     * - Linux on Cray XE6,
     * - Linux on Cray XC30,
     * - IBM Compute Node Kernel (CNK) on IBM Blue Gene/Q),
     */
        if (worker->waiting_is_enabled) {

            /* This is a first warning
             */
            if (worker->waiting_start_time == 0) {
                worker->waiting_start_time = core_timer_get_nanoseconds(&worker->timer);

            } else {

                time = core_timer_get_nanoseconds(&worker->timer);

                elapsed = time - worker->waiting_start_time;

                threshold = THORIUM_WORKER_UNPRODUCTIVE_MICROSECONDS_FOR_WAIT;

                /* Convert microseconds to nanoseconds
                 */
                threshold *= 1000;

                /* Verify the elapsed time.
                 * There are 1000 nanoseconds in 1 microsecond.
                 */
                if (elapsed >= threshold) {
                    /*
                     * Here, the worker will wait until it receives a signal.
                     * Such a signal will mean that something is ready to be consumed.
                     */

                    /* Reset the time
                     */
                    worker->waiting_start_time = 0;

#ifdef THORIUM_WORKER_DEBUG_WAIT_SIGNAL
                    printf("DEBUG worker/%d will wait, elapsed %d\n",
                                    worker->name, (int)elapsed);
#endif

                    /*
                     */
                    thorium_worker_wait(worker);
                }
            }
        }
    }
}