void ABTD_thread_func_wrapper(int func_upper, int func_lower, int arg_upper, int arg_lower) { void (*thread_func)(void *); void *p_arg; size_t ptr_size, int_size; ptr_size = sizeof(void *); int_size = sizeof(int); if (ptr_size == int_size) { thread_func = (void (*)(void *))(uintptr_t)func_lower; p_arg = (void *)(uintptr_t)arg_lower; } else if (ptr_size == int_size * 2) { uintptr_t shift_bits = CHAR_BIT * int_size; uintptr_t mask = ((uintptr_t)1 << shift_bits) - 1; thread_func = (void (*)(void *))( ((uintptr_t)func_upper << shift_bits) | ((uintptr_t)func_lower & mask)); p_arg = (void *)( ((uintptr_t)arg_upper << shift_bits) | ((uintptr_t)arg_lower & mask)); } else { ABTI_ASSERT(0); } thread_func(p_arg); /* Now, the ULT has finished its job. Terminate the ULT. * We don't need to use the atomic operation here because the ULT will be * terminated regardless of other requests. */ ABTI_thread *p_thread = ABTI_local_get_thread(); p_thread->request |= ABTI_THREAD_REQ_TERMINATE; }
void ABTI_log_pool_remove(ABTI_pool *p_pool, ABT_unit unit, ABTI_xstream *p_consumer) { if (gp_ABTI_global->use_logging == ABT_FALSE) return; ABTI_thread *p_thread = NULL; ABTI_task *p_task = NULL; switch (p_pool->u_get_type(unit)) { case ABT_UNIT_TYPE_THREAD: p_thread = ABTI_thread_get_ptr(p_pool->u_get_thread(unit)); if (p_thread->p_last_xstream) { LOG_EVENT("[U%" PRIu64 ":E%" PRIu64 "] removed from " "P%" PRIu64 " (consumer: E%" PRIu64 ")\n", ABTI_thread_get_id(p_thread), p_thread->p_last_xstream->rank, p_pool->id, p_consumer->rank); } else { LOG_EVENT("[U%" PRIu64 "] removed from P%" PRIu64 " " "(consumer: E%" PRIu64 ")\n", ABTI_thread_get_id(p_thread), p_pool->id, p_consumer->rank); } break; case ABT_UNIT_TYPE_TASK: p_task = ABTI_task_get_ptr(p_pool->u_get_task(unit)); if (p_task->p_xstream) { LOG_EVENT("[T%" PRIu64 ":E%" PRIu64 "] removed from " "P%" PRIu64 " (consumer: E%" PRIu64 ")\n", ABTI_task_get_id(p_task), p_task->p_xstream->rank, p_pool->id, p_consumer->rank); } else { LOG_EVENT("[T%" PRIu64 "] removed from P%" PRIu64 " " "(consumer: E%" PRIu64 ")\n", ABTI_task_get_id(p_task), p_pool->id, p_consumer->rank); } break; default: ABTI_ASSERT(0); break; } }
ABT_bool ABTI_sched_has_to_stop(ABTI_sched *p_sched, ABTI_xstream *p_xstream) { ABT_bool stop = ABT_FALSE; size_t size; /* Check exit request */ if (p_sched->request & ABTI_SCHED_REQ_EXIT) { ABTI_mutex_spinlock(&p_xstream->top_sched_mutex); p_sched->state = ABT_SCHED_STATE_TERMINATED; stop = ABT_TRUE; goto fn_exit; } size = ABTI_sched_get_effective_size(p_sched); if (size == 0) { if (p_sched->request & ABTI_SCHED_REQ_FINISH) { /* Check join request */ /* We need to lock in case someone wants to migrate to this * scheduler */ ABTI_mutex_spinlock(&p_xstream->top_sched_mutex); size_t size = ABTI_sched_get_effective_size(p_sched); if (size == 0) { p_sched->state = ABT_SCHED_STATE_TERMINATED; stop = ABT_TRUE; } else { ABTI_mutex_unlock(&p_xstream->top_sched_mutex); } } else if (p_sched->used == ABTI_SCHED_IN_POOL) { /* If the scheduler is a stacked one, we have to escape from the * scheduling function. The scheduler will be stopped if it is a * tasklet type. However, if the scheduler is a ULT type, we * context switch to the parent scheduler. */ if (p_sched->type == ABT_SCHED_TYPE_TASK) { p_sched->state = ABT_SCHED_STATE_TERMINATED; stop = ABT_TRUE; } else { ABTI_ASSERT(p_sched->type == ABT_SCHED_TYPE_ULT); ABTI_sched *p_par_sched; p_par_sched = ABTI_xstream_get_parent_sched(p_xstream); ABTD_thread_context_switch(p_sched->p_ctx, p_par_sched->p_ctx); } } } fn_exit: return stop; }
void ABTD_env_init(ABTI_global *p_global) { char *env; /* Get the number of available cores in the system */ p_global->num_cores = sysconf(_SC_NPROCESSORS_ONLN); /* By default, we use the CPU affinity */ p_global->set_affinity = ABT_TRUE; env = getenv("ABT_ENV_SET_AFFINITY"); if (env != NULL) { if (strcmp(env, "0") == 0 || strcmp(env, "NO") == 0 || strcmp(env, "no") == 0 || strcmp(env, "No") == 0) { p_global->set_affinity = ABT_FALSE; } } if (p_global->set_affinity == ABT_TRUE) { ABTD_affinity_init(); } #ifdef ABT_CONFIG_USE_DEBUG_LOG /* If the debug logging is set in configure, logging is turned on by * default. */ p_global->use_logging = ABT_TRUE; p_global->use_debug = ABT_TRUE; #else /* Otherwise, logging is not turned on by default. */ p_global->use_logging = ABT_FALSE; p_global->use_debug = ABT_FALSE; #endif env = getenv("ABT_ENV_USE_LOG"); if (env != NULL) { if (strcmp(env, "0") == 0 || strcmp(env, "NO") == 0 || strcmp(env, "no") == 0 || strcmp(env, "No") == 0) { p_global->use_logging = ABT_FALSE; } else { p_global->use_logging = ABT_TRUE; } } env = getenv("ABT_ENV_USE_DEBUG"); if (env != NULL) { if (strcmp(env, "0") == 0 || strcmp(env, "NO") == 0 || strcmp(env, "no") == 0 || strcmp(env, "No") == 0) { p_global->use_debug = ABT_FALSE; } else { p_global->use_debug = ABT_TRUE; } } /* Maximum size of the internal ES array */ env = getenv("ABT_ENV_MAX_NUM_XSTREAMS"); if (env != NULL) { p_global->max_xstreams = atoi(env); } else { p_global->max_xstreams = p_global->num_cores; } /* Default key table size */ env = getenv("ABT_ENV_KEY_TABLE_SIZE"); if (env != NULL) { p_global->key_table_size = (int)atoi(env); } else { p_global->key_table_size = ABTD_KEY_TABLE_DEFAULT_SIZE; } /* Default stack size for ULT */ env = getenv("ABT_ENV_THREAD_STACKSIZE"); if (env != NULL) { p_global->thread_stacksize = (size_t)atol(env); ABTI_ASSERT(p_global->thread_stacksize >= 512); } else { p_global->thread_stacksize = ABTD_THREAD_DEFAULT_STACKSIZE; } /* Default stack size for scheduler */ env = getenv("ABT_ENV_SCHED_STACKSIZE"); if (env != NULL) { p_global->sched_stacksize = (size_t)atol(env); ABTI_ASSERT(p_global->sched_stacksize >= 512); } else { p_global->sched_stacksize = ABTD_SCHED_DEFAULT_STACKSIZE; } /* Default frequency for event checking by the scheduler */ env = getenv("ABT_ENV_SCHED_EVENT_FREQ"); if (env != NULL) { p_global->sched_event_freq = (uint32_t)atol(env); ABTI_ASSERT(p_global->sched_event_freq >= 1); } else { p_global->sched_event_freq = ABTD_SCHED_EVENT_FREQ; } /* Cache line size */ env = getenv("ABT_ENV_CACHE_LINE_SIZE"); if (env != NULL) { p_global->cache_line_size = (uint32_t)atol(env); } else { p_global->cache_line_size = ABTD_CACHE_LINE_SIZE; } /* OS page size */ env = getenv("ABT_ENV_OS_PAGE_SIZE"); if (env != NULL) { p_global->os_page_size = (uint32_t)atol(env); } else { p_global->os_page_size = ABTD_OS_PAGE_SIZE; } /* Huge page size */ env = getenv("ABT_ENV_HUGE_PAGE_SIZE"); if (env != NULL) { p_global->huge_page_size = (uint32_t)atol(env); } else { p_global->huge_page_size = ABTD_HUGE_PAGE_SIZE; } #ifdef ABT_CONFIG_USE_MEM_POOL /* Page size for memory allocation */ env = getenv("ABT_ENV_MEM_PAGE_SIZE"); if (env != NULL) { p_global->mem_page_size = (uint32_t)atol(env); } else { p_global->mem_page_size = ABTD_MEM_PAGE_SIZE; } /* Stack page size for memory allocation */ env = getenv("ABT_ENV_MEM_STACK_PAGE_SIZE"); if (env != NULL) { p_global->mem_sp_size = (size_t)atol(env); } else { p_global->mem_sp_size = ABTD_MEM_STACK_PAGE_SIZE; } /* Maximum number of stacks that each ES can keep during execution */ env = getenv("ABT_ENV_MEM_MAX_NUM_STACKS"); if (env != NULL) { p_global->mem_max_stacks = (uint32_t)atol(env); } else { p_global->mem_max_stacks = ABTD_MEM_MAX_NUM_STACKS; } /* How to allocate large pages. The default is to use mmap() for huge * pages and then to fall back to allocate regular pages using mmap() when * huge pages are run out of. */ env = getenv("ABT_ENV_MEM_LP_ALLOC"); #if defined(HAVE_MAP_ANONYMOUS) || defined(HAVE_MAP_ANON) int default_lp_alloc = ABTI_MEM_LP_MMAP_HP_RP; #else int default_lp_alloc = ABTI_MEM_LP_MALLOC; #endif if (env != NULL) { if (strcasecmp(env, "malloc") == 0) { p_global->mem_lp_alloc = ABTI_MEM_LP_MALLOC; #if defined(HAVE_MAP_ANONYMOUS) || defined(HAVE_MAP_ANON) } else if (strcasecmp(env, "mmap_rp") == 0) { p_global->mem_lp_alloc = ABTI_MEM_LP_MMAP_RP; } else if (strcasecmp(env, "mmap_hp_rp") == 0) { p_global->mem_lp_alloc = ABTI_MEM_LP_MMAP_HP_RP; } else if (strcasecmp(env, "mmap_hp_thp") == 0) { p_global->mem_lp_alloc = ABTI_MEM_LP_MMAP_HP_THP; #endif } else if (strcasecmp(env, "thp") == 0) { p_global->mem_lp_alloc = ABTI_MEM_LP_THP; } else { p_global->mem_lp_alloc = default_lp_alloc; } } else { p_global->mem_lp_alloc = default_lp_alloc; } #endif #ifdef ABT_CONFIG_HANDLE_POWER_EVENT /* Hostname for power management daemon */ env = getenv("ABT_ENV_POWER_EVENT_HOSTNAME"); p_global->pm_host = (env != NULL) ? env : "localhost"; /* Port number for power management daemon */ env = getenv("ABT_ENV_POWER_EVENT_PORT"); p_global->pm_port = (env != NULL) ? atoi(env) : 60439; #endif #ifdef ABT_CONFIG_PUBLISH_INFO /* Do we need to publish exec. information? */ env = getenv("ABT_ENV_PUBLISH_INFO"); if (env != NULL) { if (strcmp(env, "0") == 0 || strcmp(env, "NO") == 0 || strcmp(env, "no") == 0 || strcmp(env, "No") == 0) { p_global->pub_needed = ABT_FALSE; } else { p_global->pub_needed = ABT_TRUE; } } else { p_global->pub_needed = ABT_TRUE; } /* Filename for exec. information publishing */ env = getenv("ABT_ENV_PUBLISH_FILENAME"); p_global->pub_filename = env ? env : ABT_CONFIG_DEFAULT_PUB_FILENAME; /* Time interval for exec. information publishing */ env = getenv("ABT_ENV_PUBLISH_INTERVAL"); p_global->pub_interval = env ? atof(env) : 1.0; #endif /* Whether to print the configuration on ABT_init() */ env = getenv("ABT_ENV_PRINT_CONFIG"); if (env != NULL) { if (strcmp(env, "1") == 0 || strcasecmp(env, "yes") == 0 || strcasecmp(env, "y") == 0) { p_global->print_config = ABT_TRUE; } else { p_global->print_config = ABT_FALSE; } } else { p_global->print_config = ABT_FALSE; } /* Init timer */ ABTD_time_init(); }