static void perfmon_setup_alloc_core_set(const struct perfmon_alloc *pa, struct core_set *cset) { int i; core_set_init(cset); for (i = 0; i < num_cores; i++) { if (pa->cores_counters[i] >= 0) core_set_setcpu(cset, i); } }
void thorium_multiplexer_policy_init(struct thorium_multiplexer_policy *self) { self->threshold_buffer_size_in_bytes = THORIUM_MESSAGE_MULTIPLEXER_SIZE_THRESHOLD_IN_BYTES; /*self->threshold_time_in_nanoseconds = THORIUM_DYNAMIC_TIMEOUT;*/ self->threshold_time_in_nanoseconds = THORIUM_MESSAGE_MULTIPLEXER_TIME_THRESHOLD_IN_NANOSECONDS; core_set_init(&self->actions_to_skip, sizeof(int)); /* * We don't want to slow down things so the following actions * are not multiplexed. */ core_set_add_int(&self->actions_to_skip, ACTION_MULTIPLEXER_MESSAGE); core_set_add_int(&self->actions_to_skip, ACTION_THORIUM_NODE_START); core_set_add_int(&self->actions_to_skip, ACTION_THORIUM_NODE_ADD_INITIAL_ACTOR); core_set_add_int(&self->actions_to_skip, ACTION_THORIUM_NODE_ADD_INITIAL_ACTORS); core_set_add_int(&self->actions_to_skip, ACTION_THORIUM_NODE_ADD_INITIAL_ACTORS_REPLY); core_set_add_int(&self->actions_to_skip, ACTION_SPAWN); core_set_add_int(&self->actions_to_skip, ACTION_SPAWN_REPLY); self->disabled = 1; }
void thorium_message_multiplexer_init(struct thorium_message_multiplexer *self, struct thorium_node *node, struct thorium_multiplexer_policy *policy) { int size; int i; /* int bytes; */ int position; struct thorium_multiplexed_buffer *multiplexed_buffer; int argc; char **argv; thorium_decision_maker_init(&self->decision_maker); self->policy = policy; self->original_message_count = 0; self->real_message_count = 0; CORE_BITMAP_CLEAR_FLAGS(self->flags); CORE_BITMAP_CLEAR_FLAG(self->flags, FLAG_DISABLED); #ifdef THORIUM_MULTIPLEXER_TRACK_BUFFERS_WITH_CONTENT core_set_init(&self->buffers_with_content, sizeof(int)); #endif core_timer_init(&self->timer); self->buffer_size_in_bytes = thorium_multiplexer_policy_size_threshold(self->policy); #ifdef CONFIG_MULTIPLEXER_USE_DECISION_MAKER self->timeout_in_nanoseconds = thorium_decision_maker_get_best_timeout(&self->decision_maker, THORIUM_TIMEOUT_NO_VALUE); #else self->timeout_in_nanoseconds = self->policy->threshold_time_in_nanoseconds; #endif CORE_DEBUGGER_ASSERT(self->timeout_in_nanoseconds >= 0); self->node = node; core_vector_init(&self->buffers, sizeof(struct thorium_multiplexed_buffer)); size = thorium_node_nodes(self->node); core_vector_resize(&self->buffers, size); /* bytes = size * self->buffer_size_in_bytes; */ #ifdef DEBUG_MULTIPLEXER thorium_printf("DEBUG_MULTIPLEXER size %d bytes %d\n", size, bytes); #endif position = 0; for (i = 0; i < size; ++i) { multiplexed_buffer = core_vector_at(&self->buffers, i); CORE_DEBUGGER_ASSERT(multiplexed_buffer != NULL); /* * Initially, these multiplexed buffers have a NULL buffer. * It is only allocated when needed because each worker is an exporter * of small messages for a subset of all the destination nodes. */ thorium_multiplexed_buffer_init(multiplexed_buffer, self->buffer_size_in_bytes, self->timeout_in_nanoseconds); position += self->buffer_size_in_bytes; #ifdef DEBUG_MULTIPLEXER1 thorium_printf("DEBUG_MULTIPLEXER thorium_message_multiplexer_init index %d buffer %p\n", i, buffer); #endif #ifdef DEBUG_MULTIPLEXER thorium_printf("DEBUG_MULTIPLEXER thorium_message_multiplexer_init (after) index %d buffer %p\n", i, core_vector_at(&self->buffers, i)); #endif } if (thorium_multiplexer_policy_is_disabled(self->policy)) { CORE_BITMAP_SET_FLAG(self->flags, FLAG_DISABLED); } if (thorium_node_nodes(self->node) < thorium_multiplexer_policy_minimum_node_count(self->policy)) { CORE_BITMAP_SET_FLAG(self->flags, FLAG_DISABLED); } self->worker = NULL; argc = node->argc; argv = node->argv; /* * Aside from the policy, the end user can also disable the multiplexer code path */ if (core_command_has_argument(argc, argv, OPTION_DISABLE_MULTIPLEXER)) { CORE_BITMAP_SET_FLAG(self->flags, FLAG_DISABLED); } self->last_send_event_count = 0; self->last_time = core_timer_get_nanoseconds(&self->timer); self->last_update_time = time(NULL); self->degree_of_aggregation_limit = self->policy->degree_of_aggregation_limit; thorium_router_init(&self->router, self->node->nodes, TOPOLOGY_POLYTOPE); if (thorium_node_must_print_data(self->node)) { thorium_router_print(&self->router); } }
void thorium_worker_init(struct thorium_worker *worker, int name, struct thorium_node *node) { int capacity; int ephemeral_memory_block_size; int injected_buffer_ring_size; int argc; char **argv; worker->tick_count = 0; thorium_load_profiler_init(&worker->profiler); argc = thorium_node_argc(node); argv = thorium_node_argv(node); #ifdef THORIUM_WORKER_DEBUG_INJECTION worker->counter_allocated_outbound_buffers = 0; worker->counter_freed_outbound_buffers_from_self = 0; worker->counter_freed_outbound_buffers_from_other_workers = 0; worker->counter_injected_outbound_buffers_other_local_workers= 0; worker->counter_injected_inbound_buffers_from_thorium_core = 0; #endif core_map_init(&worker->actor_received_messages, sizeof(int), sizeof(int)); worker->waiting_is_enabled = 0; worker->waiting_start_time = 0; core_timer_init(&worker->timer); capacity = THORIUM_WORKER_RING_CAPACITY; /*worker->work_queue = work_queue;*/ worker->node = node; worker->name = name; core_bitmap_clear_bit_uint32_t(&worker->flags, FLAG_DEAD); worker->last_warning = 0; worker->last_wake_up_count = 0; /*worker->work_queue = &worker->works;*/ /* There are two options: * 1. enable atomic operations for change visibility * 2. Use volatile head and tail. */ core_fast_ring_init(&worker->actors_to_schedule, capacity, sizeof(struct thorium_actor *)); #ifdef THORIUM_NODE_INJECT_CLEAN_WORKER_BUFFERS injected_buffer_ring_size = capacity; core_fast_ring_init(&worker->injected_clean_outbound_buffers, injected_buffer_ring_size, sizeof(void *)); core_fast_ring_init(&worker->clean_message_ring_for_triage, injected_buffer_ring_size, sizeof(struct thorium_message)); core_fast_queue_init(&worker->clean_message_queue_for_triage, sizeof(struct thorium_message)); #endif thorium_scheduler_init(&worker->scheduler, thorium_node_name(worker->node), worker->name); core_map_init(&worker->actors, sizeof(int), sizeof(int)); core_map_iterator_init(&worker->actor_iterator, &worker->actors); core_fast_ring_init(&worker->outbound_message_queue, capacity, sizeof(struct thorium_message)); core_fast_queue_init(&worker->outbound_message_queue_buffer, sizeof(struct thorium_message)); core_bitmap_clear_bit_uint32_t(&worker->flags, FLAG_DEBUG); core_bitmap_clear_bit_uint32_t(&worker->flags, FLAG_BUSY); core_bitmap_clear_bit_uint32_t(&node->flags, FLAG_ENABLE_ACTOR_LOAD_PROFILER); worker->flags = 0; core_bitmap_clear_bit_uint32_t(&worker->flags, FLAG_DEBUG_ACTORS); if (core_command_has_argument(argc, argv, DEBUG_WORKER_OPTION)) { #if 0 printf("DEBUG has option %s\n", DEBUG_WORKER_OPTION); #endif if (thorium_node_name(worker->node) == 0 && thorium_worker_name(worker) == 0) { #if 0 printf("DEBUG setting bit FLAG_DEBUG_ACTORS because %s\n", DEBUG_WORKER_OPTION); #endif core_bitmap_set_bit_uint32_t(&worker->flags, FLAG_DEBUG_ACTORS); } } worker->epoch_used_nanoseconds = 0; worker->loop_used_nanoseconds = 0; worker->scheduling_epoch_used_nanoseconds = 0; worker->started_in_thread = 0; /* 2 MiB is the default size for Linux huge pages. * \see https://wiki.debian.org/Hugepages * \see http://lwn.net/Articles/376606/ */ /* * 8 MiB */ ephemeral_memory_block_size = 8388608; /*ephemeral_memory_block_size = 16777216;*/ core_memory_pool_init(&worker->ephemeral_memory, ephemeral_memory_block_size, MEMORY_POOL_NAME_WORKER_EPHEMERAL); core_memory_pool_disable_tracking(&worker->ephemeral_memory); core_memory_pool_enable_ephemeral_mode(&worker->ephemeral_memory); #ifdef THORIUM_WORKER_ENABLE_LOCK core_lock_init(&worker->lock); #endif core_set_init(&worker->evicted_actors, sizeof(int)); core_memory_pool_init(&worker->outbound_message_memory_pool, CORE_MEMORY_POOL_MESSAGE_BUFFER_BLOCK_SIZE, MEMORY_POOL_NAME_WORKER_OUTBOUND); /* * Disable the pool so that it uses allocate and free * directly. */ #ifdef CORE_MEMORY_POOL_DISABLE_MESSAGE_BUFFER_POOL core_memory_pool_disable(&worker->outbound_message_memory_pool); #endif /* * Transport message buffers are fancy objects. */ core_memory_pool_enable_normalization(&worker->outbound_message_memory_pool); core_memory_pool_enable_alignment(&worker->outbound_message_memory_pool); worker->ticks_without_production = 0; thorium_priority_assigner_init(&worker->assigner, thorium_worker_name(worker)); /* * This variables should be set in * thorium_worker_start, but when running on 1 process with 1 thread, * thorium_worker_start is never called... */ worker->last_report = time(NULL); worker->epoch_start_in_nanoseconds = core_timer_get_nanoseconds(&worker->timer); worker->loop_start_in_nanoseconds = worker->epoch_start_in_nanoseconds; worker->loop_end_in_nanoseconds = worker->loop_start_in_nanoseconds; worker->scheduling_epoch_start_in_nanoseconds = worker->epoch_start_in_nanoseconds; /* * Avoid valgrind warnings. */ worker->epoch_load = 0; }