/** * @ingroup COND * @brief Free the condition variable. * * \c ABT_cond_free() deallocates the memory used for the condition variable * object associated with the handle \c cond. If it is successfully processed, * \c cond is set to \c ABT_COND_NULL. * * @param[in,out] cond handle to the condition variable * @return Error code * @retval ABT_SUCCESS on success */ int ABT_cond_free(ABT_cond *cond) { int abt_errno = ABT_SUCCESS; ABT_cond h_cond = *cond; ABTI_cond *p_cond = ABTI_cond_get_ptr(h_cond); ABTI_CHECK_NULL_COND_PTR(p_cond); ABTI_CHECK_TRUE(p_cond->num_waiters == 0, ABT_ERR_COND); /* The lock needs to be acquired to safely free the condition structure. * However, we do not have to unlock it because the entire structure is * freed here. */ ABTI_mutex_spinlock(&p_cond->mutex); ABTU_free(p_cond); /* Return value */ *cond = ABT_COND_NULL; fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/** * @ingroup SCHED * @brief Check if the scheduler needs to stop * * Check if there has been an exit or a finish request and if the conditions * are respected (empty pool for a finish request). * If we are on the primary ES, we will jump back to the main ULT, * if the scheduler has nothing to do. * * It is the user's responsibility to take proper measures to stop the * scheduling loop, depending on the value given by stop. * * @param[in] sched handle to the target scheduler * @param[out] stop indicate if the scheduler has to stop * @return Error code * @retval ABT_SUCCESS on success */ int ABT_sched_has_to_stop(ABT_sched sched, ABT_bool *stop) { int abt_errno = ABT_SUCCESS; *stop = ABT_FALSE; /* When this routine is called by an external thread, e.g., pthread */ if (lp_ABTI_local == NULL) { abt_errno = ABT_ERR_INV_XSTREAM; goto fn_exit; } ABTI_xstream *p_xstream = ABTI_local_get_xstream(); ABTI_sched *p_sched = ABTI_sched_get_ptr(sched); ABTI_CHECK_NULL_SCHED_PTR(p_sched); *stop = ABTI_sched_has_to_stop(p_sched, p_xstream); fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/** * @ingroup SELF * @brief Check if the caller's ES is the primary ES. * * \c ABT_self_on_primary_xstream() checks whether the caller work unit is * associated with the primary ES. If the caller is running on the primary ES, * \c flag is set to \c ABT_TRUE. Otherwise, \c flag is set to \c ABT_FALSE. * * @param[out] flag result (<tt>ABT_TRUE</tt>: primary ES, * <tt>ABT_FALSE</tt>: not) * @return Error code * @retval ABT_SUCCESS on success * @retval ABT_ERR_UNINITIALIZED Argobots has not been initialized * @retval ABT_ERR_INV_XSTREAM called by an external thread, e.g., pthread */ int ABT_self_on_primary_xstream(ABT_bool *flag) { int abt_errno = ABT_SUCCESS; ABTI_xstream *p_xstream; /* If Argobots has not been initialized, set flag to ABT_FALSE. */ if (gp_ABTI_global == NULL) { abt_errno = ABT_ERR_UNINITIALIZED; *flag = ABT_FALSE; goto fn_exit; } /* This is when an external thread called this routine. */ if (lp_ABTI_local == NULL) { abt_errno = ABT_ERR_INV_XSTREAM; *flag = ABT_FALSE; goto fn_exit; } p_xstream = ABTI_local_get_xstream(); ABTI_CHECK_NULL_XSTREAM_PTR(p_xstream); /* Return value */ *flag = (p_xstream->type == ABTI_XSTREAM_TYPE_PRIMARY) ? ABT_TRUE : ABT_FALSE; fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/** * @ingroup COND * @brief Broadcast a condition. * * \c ABT_cond_broadcast() signals all ULTs that are waiting on the * condition variable. * This routine shall have no effect if no ULTs are currently blocked on the * condition variable. * * @param[in] cond handle to the condition variable * @return Error code * @retval ABT_SUCCESS on success */ int ABT_cond_broadcast(ABT_cond cond) { int abt_errno = ABT_SUCCESS; ABTI_cond *p_cond = ABTI_cond_get_ptr(cond); ABTI_CHECK_NULL_COND_PTR(p_cond); ABTI_mutex_spinlock(&p_cond->mutex); if (p_cond->num_waiters == 0) { ABTI_mutex_unlock(&p_cond->mutex); goto fn_exit; } /* Wake up all waiting ULTs */ ABTI_unit *p_head = p_cond->p_head; ABTI_unit *p_unit = p_head; while (1) { ABTI_unit *p_next = p_unit->p_next; p_unit->p_prev = NULL; p_unit->p_next = NULL; if (p_unit->type == ABT_UNIT_TYPE_THREAD) { ABTI_thread *p_thread = ABTI_thread_get_ptr(p_unit->thread); ABTI_thread_set_ready(p_thread); } else { /* When the head is an external thread */ volatile int *p_ext_signal = (volatile int *)p_unit->pool; *p_ext_signal = 1; } /* Next ULT */ if (p_next != p_head) { p_unit = p_next; } else { break; } } p_cond->p_waiter_mutex = NULL; p_cond->num_waiters = 0; p_cond->p_head = NULL; p_cond->p_tail = NULL; ABTI_mutex_unlock(&p_cond->mutex); fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
int ABTI_sched_free(ABTI_sched *p_sched) { int abt_errno = ABT_SUCCESS; int p; /* If sched is currently used, free is not allowed. */ if (p_sched->used != ABTI_SCHED_NOT_USED) { abt_errno = ABT_ERR_SCHED; goto fn_fail; } /* If sched is a default provided one, it should free its pool here. * Otherwise, freeing the pool is the user's reponsibility. */ for (p = 0; p < p_sched->num_pools; p++) { ABTI_pool *p_pool = ABTI_pool_get_ptr(p_sched->pools[p]); int32_t num_scheds = ABTI_pool_release(p_pool); if (p_pool->automatic == ABT_TRUE && num_scheds == 0) { abt_errno = ABT_pool_free(p_sched->pools+p); ABTI_CHECK_ERROR(abt_errno); } } ABTU_free(p_sched->pools); /* Free the associated work unit */ if (p_sched->type == ABT_SCHED_TYPE_ULT) { if (p_sched->p_thread) { if (p_sched->p_thread->type == ABTI_THREAD_TYPE_MAIN_SCHED) { ABTI_thread_free_main_sched(p_sched->p_thread); } else { ABTI_thread_free(p_sched->p_thread); } } } else if (p_sched->type == ABT_SCHED_TYPE_TASK) { if (p_sched->p_task) { ABTI_task_free(p_sched->p_task); } } LOG_EVENT("[S%" PRIu64 "] freed\n", p_sched->id); p_sched->free(ABTI_sched_get_handle(p_sched)); p_sched->data = NULL; ABTU_free(p_sched); fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/** * @ingroup SCHED * @brief Set the specific data of the target user-defined scheduler * * This function will be called by the user during the initialization of his * user-defined scheduler. * * @param[in] sched handle to the scheduler * @param[in] data specific data of the scheduler * @return Error code * @retval ABT_SUCCESS on success */ int ABT_sched_set_data(ABT_sched sched, void *data) { int abt_errno = ABT_SUCCESS; ABTI_sched *p_sched = ABTI_sched_get_ptr(sched); ABTI_CHECK_NULL_SCHED_PTR(p_sched); p_sched->data = data; fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/** * @ingroup SCHED * @brief Ask a scheduler to stop as soon as possible * * The scheduler will stop even if its pools are not empty. It is the user's * responsibility to ensure that the left work will be done by another scheduler. * * @param[in] sched handle to the target scheduler * @return Error code * @retval ABT_SUCCESS on success */ int ABT_sched_exit(ABT_sched sched) { int abt_errno = ABT_SUCCESS; ABTI_sched *p_sched = ABTI_sched_get_ptr(sched); ABTI_CHECK_NULL_SCHED_PTR(p_sched); ABTI_sched_set_request(p_sched, ABTI_SCHED_REQ_EXIT); fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/** * @ingroup SCHED * @brief Get the number of pools associated with scheduler. * * \c ABT_sched_get_num_pools returns the number of pools associated with * the target scheduler \c sched through \c num_pools. * * @param[in] sched handle to the target scheduler * @param[out] num_pools the number of all pools associated with \c sched * @return Error code * @retval ABT_SUCCESS on success * @retval ABT_ERR_INV_SCHED invalid scheduler */ int ABT_sched_get_num_pools(ABT_sched sched, int *num_pools) { int abt_errno = ABT_SUCCESS; ABTI_sched *p_sched = ABTI_sched_get_ptr(sched); ABTI_CHECK_NULL_SCHED_PTR(p_sched); *num_pools = p_sched->num_pools; fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
int ABTI_local_finalize(void) { int abt_errno = ABT_SUCCESS; ABTI_CHECK_TRUE(lp_ABTI_local != NULL, ABT_ERR_OTHER); ABTU_free(lp_ABTI_local); lp_ABTI_local = NULL; ABTI_LOG_FINALIZE(); fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/** * @ingroup SCHED * @brief Get the sum of the sizes of the pool of \c sched. * * The size includes the blocked and migrating ULTs. * * @param[in] sched handle to the scheduler * @param[out] size total number of work units * @return Error code * @retval ABT_SUCCESS on success */ int ABT_sched_get_total_size(ABT_sched sched, size_t *size) { int abt_errno = ABT_SUCCESS; size_t pool_size = 0; ABTI_sched *p_sched = ABTI_sched_get_ptr(sched); ABTI_CHECK_NULL_SCHED_PTR(p_sched); pool_size = ABTI_sched_get_total_size(p_sched); fn_exit: *size = pool_size; return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
int ABTI_local_init(void) { int abt_errno = ABT_SUCCESS; ABTI_CHECK_TRUE(lp_ABTI_local == NULL, ABT_ERR_OTHER); lp_ABTI_local = (ABTI_local *)ABTU_malloc(sizeof(ABTI_local)); lp_ABTI_local->p_xstream = NULL; lp_ABTI_local->p_thread = NULL; lp_ABTI_local->p_task = NULL; ABTI_LOG_INIT(); fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/** * @ingroup SCHED * @brief Release the scheduler object associated with sched handle. * * If this routine successfully returns, sched is set as ABT_SCHED_NULL. The * scheduler will be automatically freed. * If \c sched is currently being used by an ES or in a pool, freeing \c sched * is not allowed. In this case, this routine fails and returns \c * ABT_ERR_SCHED. * * @param[in,out] sched handle to the target scheduler * @return Error code * @retval ABT_SUCCESS on success */ int ABT_sched_free(ABT_sched *sched) { int abt_errno = ABT_SUCCESS; ABTI_sched *p_sched = ABTI_sched_get_ptr(*sched); ABTI_CHECK_NULL_SCHED_PTR(p_sched); /* Free the scheduler */ abt_errno = ABTI_sched_free(p_sched); ABTI_CHECK_ERROR(abt_errno); /* Return value */ *sched = ABT_SCHED_NULL; fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/** * @ingroup SCHED * @brief Get the pools of the scheduler \c sched. * * @param[in] sched handle to the target scheduler * @param[in] max_pools maximum number of pools to get * @param[in] idx index of the first pool to get * @param[out] pools array of handles to the pools * @return Error code * @retval ABT_SUCCESS on success */ int ABT_sched_get_pools(ABT_sched sched, int max_pools, int idx, ABT_pool *pools) { int abt_errno = ABT_SUCCESS; ABTI_sched *p_sched = ABTI_sched_get_ptr(sched); ABTI_CHECK_NULL_SCHED_PTR(p_sched); ABTI_CHECK_TRUE(idx+max_pools <= p_sched->num_pools, ABT_ERR_SCHED); int p; for (p = idx; p < idx+max_pools; p++) { pools[p-idx] = p_sched->pools[p]; } fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/* Get the pool suitable for receiving a migrating ULT */ int ABTI_sched_get_migration_pool(ABTI_sched *p_sched, ABTI_pool *source_pool, ABTI_pool **pp_pool) { int abt_errno = ABT_SUCCESS; ABT_sched sched = ABTI_sched_get_handle(p_sched); ABTI_pool *p_pool; ABTI_CHECK_TRUE(p_sched->state != ABT_SCHED_STATE_TERMINATED, ABT_ERR_INV_SCHED); /* Find a pool */ /* If get_migr_pool is not defined, we pick the first pool */ if (p_sched->get_migr_pool == NULL) { if (p_sched->num_pools == 0) p_pool = NULL; else p_pool = p_sched->pools[0]; } else p_pool = p_sched->get_migr_pool(sched); /* Check the pool */ if (ABTI_pool_accept_migration(p_pool, source_pool) == ABT_TRUE) { *pp_pool = p_pool; } else { ABTI_CHECK_TRUE(0, ABT_ERR_INV_POOL_ACCESS); } fn_exit: return abt_errno; fn_fail: *pp_pool = NULL; HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/** * @ingroup SCHED * @brief Create a new user-defined scheduler and return its handle through * newsched. * * The pools used by the new scheduler are provided by \c pools. The contents * of this array is copied, so it can be freed. If a pool in the array is * ABT_POOL_NULL, the corresponding pool is automatically created. * The config must have been created by ABT_sched_config_create, and will be * used as argument in the initialization. If no specific configuration is * required, the parameter will be ABT_CONFIG_NULL. * * @param[in] def definition required for scheduler creation * @param[in] num_pools number of pools associated with this scheduler * @param[in] pools pools associated with this scheduler * @param[in] config specific config used during the scheduler creation * @param[out] newsched handle to a new scheduler * @return Error code * @retval ABT_SUCCESS on success */ int ABT_sched_create(ABT_sched_def *def, int num_pools, ABT_pool *pools, ABT_sched_config config, ABT_sched *newsched) { int abt_errno = ABT_SUCCESS; ABTI_sched *p_sched; int p; ABTI_CHECK_TRUE(newsched != NULL, ABT_ERR_SCHED); p_sched = (ABTI_sched *)ABTU_malloc(sizeof(ABTI_sched)); /* Copy of the contents of pools */ ABT_pool *pool_list; pool_list = (ABT_pool *)ABTU_malloc(num_pools*sizeof(ABT_pool)); for (p = 0; p < num_pools; p++) { if (pools[p] == ABT_POOL_NULL) { abt_errno = ABT_pool_create_basic(ABT_POOL_FIFO, ABT_POOL_ACCESS_MPSC, ABT_TRUE, &pool_list[p]); ABTI_CHECK_ERROR(abt_errno); } else { pool_list[p] = pools[p]; } } /* Check if the pools are available */ for (p = 0; p < num_pools; p++) { ABTI_pool_retain(ABTI_pool_get_ptr(pool_list[p])); } p_sched->used = ABTI_SCHED_NOT_USED; p_sched->automatic = ABT_FALSE; p_sched->kind = ABTI_sched_get_kind(def); p_sched->state = ABT_SCHED_STATE_READY; p_sched->request = 0; p_sched->pools = pool_list; p_sched->num_pools = num_pools; p_sched->type = def->type; p_sched->p_thread = NULL; p_sched->p_task = NULL; p_sched->p_ctx = NULL; p_sched->init = def->init; p_sched->run = def->run; p_sched->free = def->free; p_sched->get_migr_pool = def->get_migr_pool; #ifdef ABT_CONFIG_USE_DEBUG_LOG p_sched->id = ABTI_sched_get_new_id(); #endif LOG_EVENT("[S%" PRIu64 "] created\n", p_sched->id); /* Return value */ *newsched = ABTI_sched_get_handle(p_sched); /* Specific initialization */ p_sched->init(*newsched, config); fn_exit: return abt_errno; fn_fail: *newsched = ABT_SCHED_NULL; HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/** * @ingroup COND * @brief Wait on the condition. * * The ULT calling \c ABT_cond_timedwait() waits on the condition variable * until it is signaled or the absolute time specified by \c abstime passes. * If system time equals or exceeds \c abstime before \c cond is signaled, * the error code \c ABT_ERR_COND_TIMEDOUT is returned. * * The user should call this routine while the mutex specified as \c mutex is * locked. The mutex will be automatically released while waiting. After signal * is received and the waiting ULT is awakened, the mutex will be * automatically locked for use by the ULT. The user is then responsible for * unlocking mutex when the ULT is finished with it. * * @param[in] cond handle to the condition variable * @param[in] mutex handle to the mutex * @param[in] abstime absolute time for timeout * @return Error code * @retval ABT_SUCCESS on success * @retval ABT_ERR_COND_TIMEDOUT timeout */ int ABT_cond_timedwait(ABT_cond cond, ABT_mutex mutex, const struct timespec *abstime) { int abt_errno = ABT_SUCCESS; ABTI_cond *p_cond = ABTI_cond_get_ptr(cond); ABTI_CHECK_NULL_COND_PTR(p_cond); ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex); ABTI_CHECK_NULL_MUTEX_PTR(p_mutex); double tar_time = convert_timespec_to_sec(abstime); ABTI_unit *p_unit; volatile int ext_signal = 0; p_unit = (ABTI_unit *)ABTU_calloc(1, sizeof(ABTI_unit)); p_unit->pool = (ABT_pool)&ext_signal; p_unit->type = ABT_UNIT_TYPE_EXT; ABTI_mutex_spinlock(&p_cond->mutex); if (p_cond->p_waiter_mutex == NULL) { p_cond->p_waiter_mutex = p_mutex; } else { ABT_bool result = ABTI_mutex_equal(p_cond->p_waiter_mutex, p_mutex); if (result == ABT_FALSE) { ABTI_mutex_unlock(&p_cond->mutex); abt_errno = ABT_ERR_INV_MUTEX; goto fn_fail; } } if (p_cond->num_waiters == 0) { p_unit->p_prev = p_unit; p_unit->p_next = p_unit; p_cond->p_head = p_unit; p_cond->p_tail = p_unit; } else { p_cond->p_tail->p_next = p_unit; p_cond->p_head->p_prev = p_unit; p_unit->p_prev = p_cond->p_tail; p_unit->p_next = p_cond->p_head; p_cond->p_tail = p_unit; } p_cond->num_waiters++; ABTI_mutex_unlock(&p_cond->mutex); /* Unlock the mutex that the calling ULT is holding */ ABTI_mutex_unlock(p_mutex); while (!ext_signal) { double cur_time = get_cur_time(); if (cur_time >= tar_time) { remove_unit(p_cond, p_unit); abt_errno = ABT_ERR_COND_TIMEDOUT; break; } ABT_thread_yield(); } ABTU_free(p_unit); /* Lock the mutex again */ ABTI_mutex_spinlock(p_mutex); fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/** * @ingroup SCHED * @brief Create a predefined scheduler. * * \c ABT_sched_create_basic() creates a predefined scheduler and returns its * handle through \c newsched. The pools used by the new scheduler are * provided by \c pools. The contents of this array is copied, so it can be * freed. If a pool in the array is \c ABT_POOL_NULL, the corresponding pool is * automatically created. The pool array can be \c NULL. In this case, all * the pools will be created automatically. The config must have been created * by \c ABT_sched_config_create(), and will be used as argument in the * initialization. If no specific configuration is required, the parameter can * be \c ABT_CONFIG_NULL. * * NOTE: The new scheduler will be automatically freed when it is not used * anymore or its associated ES is terminated. Accordingly, the pools * associated with the new scheduler will be automatically freed if they are * exclusive to the scheduler and are not user-defined ones (i.e., created by * \c ABT_pool_create_basic() or implicitly created because \c pools is \c NULL * or a pool in the \c pools array is \c ABT_POOL_NULL). If the pools were * created using \c ABT_pool_create() by the user, they have to be freed * explicitly with \c ABT_pool_free(). * * @param[in] predef predefined scheduler * @param[in] num_pools number of pools associated with this scheduler * @param[in] pools pools associated with this scheduler * @param[in] config specific config used during the scheduler creation * @param[out] newsched handle to a new scheduler * @return Error code * @retval ABT_SUCCESS on success */ int ABT_sched_create_basic(ABT_sched_predef predef, int num_pools, ABT_pool *pools, ABT_sched_config config, ABT_sched *newsched) { int abt_errno = ABT_SUCCESS; ABT_pool_access access; ABT_bool automatic; int p; /* We set the access to the default one */ access = ABT_POOL_ACCESS_MPSC; automatic = ABT_TRUE;; /* We read the config and set the configured parameters */ abt_errno = ABTI_sched_config_read_global(config, &access, &automatic); ABTI_CHECK_ERROR(abt_errno); /* A pool array is provided, predef has to be compatible */ if (pools != NULL) { /* Copy of the contents of pools */ ABT_pool *pool_list; pool_list = (ABT_pool *)ABTU_malloc(num_pools*sizeof(ABT_pool)); for (p = 0; p < num_pools; p++) { if (pools[p] == ABT_POOL_NULL) { abt_errno = ABT_pool_create_basic(ABT_POOL_FIFO, access, ABT_TRUE, &pool_list[p]); ABTI_CHECK_ERROR(abt_errno); } else { pool_list[p] = pools[p]; } } /* Creation of the scheduler */ switch (predef) { case ABT_SCHED_DEFAULT: case ABT_SCHED_BASIC: abt_errno = ABT_sched_create(ABTI_sched_get_basic_def(), num_pools, pool_list, ABT_SCHED_CONFIG_NULL, newsched); break; case ABT_SCHED_PRIO: abt_errno = ABT_sched_create(ABTI_sched_get_prio_def(), num_pools, pool_list, ABT_SCHED_CONFIG_NULL, newsched); break; case ABT_SCHED_RANDWS: abt_errno = ABT_sched_create(ABTI_sched_get_randws_def(), num_pools, pool_list, ABT_SCHED_CONFIG_NULL, newsched); break; default: abt_errno = ABT_ERR_INV_SCHED_PREDEF; break; } ABTI_CHECK_ERROR(abt_errno); ABTU_free(pool_list); } /* No pool array is provided, predef has to be compatible */ else { /* Set the number of pools */ switch (predef) { case ABT_SCHED_DEFAULT: case ABT_SCHED_BASIC: num_pools = 1; break; case ABT_SCHED_PRIO: num_pools = ABTI_SCHED_NUM_PRIO; break; case ABT_SCHED_RANDWS: num_pools = 1; break; default: abt_errno = ABT_ERR_INV_SCHED_PREDEF; ABTI_CHECK_ERROR(abt_errno); break; } /* Creation of the pools */ /* To avoid the malloc overhead, we use a stack array. */ ABT_pool pool_list[ABTI_SCHED_NUM_PRIO]; int p; for (p = 0; p < num_pools; p++) { abt_errno = ABT_pool_create_basic(ABT_POOL_FIFO, access, ABT_TRUE, pool_list+p); ABTI_CHECK_ERROR(abt_errno); } /* Creation of the scheduler */ switch (predef) { case ABT_SCHED_DEFAULT: case ABT_SCHED_BASIC: abt_errno = ABT_sched_create(ABTI_sched_get_basic_def(), num_pools, pool_list, config, newsched); break; case ABT_SCHED_PRIO: abt_errno = ABT_sched_create(ABTI_sched_get_prio_def(), num_pools, pool_list, config, newsched); break; case ABT_SCHED_RANDWS: abt_errno = ABT_sched_create(ABTI_sched_get_randws_def(), num_pools, pool_list, config, newsched); break; default: abt_errno = ABT_ERR_INV_SCHED_PREDEF; ABTI_CHECK_ERROR(abt_errno); break; } } ABTI_CHECK_ERROR(abt_errno); ABTI_sched *p_sched = ABTI_sched_get_ptr(*newsched); p_sched->automatic = automatic; fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); *newsched = ABT_SCHED_NULL; goto fn_exit; }
/** * @ingroup COND * @brief Wait on the condition. * * The ULT calling \c ABT_cond_wait() waits on the condition variable until * it is signaled. * The user should call this routine while the mutex specified as \c mutex is * locked. The mutex will be automatically released while waiting. After signal * is received and the waiting ULT is awakened, the mutex will be * automatically locked for use by the ULT. The user is then responsible for * unlocking mutex when the ULT is finished with it. * * @param[in] cond handle to the condition variable * @param[in] mutex handle to the mutex * @return Error code * @retval ABT_SUCCESS on success */ int ABT_cond_wait(ABT_cond cond, ABT_mutex mutex) { int abt_errno = ABT_SUCCESS; ABTI_cond *p_cond = ABTI_cond_get_ptr(cond); ABTI_CHECK_NULL_COND_PTR(p_cond); ABTI_mutex *p_mutex = ABTI_mutex_get_ptr(mutex); ABTI_CHECK_NULL_MUTEX_PTR(p_mutex); ABTI_thread *p_thread; ABTI_unit *p_unit; ABT_unit_type type; volatile int ext_signal = 0; if (lp_ABTI_local != NULL) { p_thread = ABTI_local_get_thread(); ABTI_CHECK_TRUE(p_thread != NULL, ABT_ERR_COND); type = ABT_UNIT_TYPE_THREAD; p_unit = &p_thread->unit_def; p_unit->thread = ABTI_thread_get_handle(p_thread); p_unit->type = type; } else { /* external thread */ type = ABT_UNIT_TYPE_EXT; p_unit = (ABTI_unit *)ABTU_calloc(1, sizeof(ABTI_unit)); p_unit->pool = (ABT_pool)&ext_signal; p_unit->type = type; } ABTI_mutex_spinlock(&p_cond->mutex); if (p_cond->p_waiter_mutex == NULL) { p_cond->p_waiter_mutex = p_mutex; } else { ABT_bool result = ABTI_mutex_equal(p_cond->p_waiter_mutex, p_mutex); if (result == ABT_FALSE) { ABTI_mutex_unlock(&p_cond->mutex); abt_errno = ABT_ERR_INV_MUTEX; goto fn_fail; } } if (p_cond->num_waiters == 0) { p_unit->p_prev = p_unit; p_unit->p_next = p_unit; p_cond->p_head = p_unit; p_cond->p_tail = p_unit; } else { p_cond->p_tail->p_next = p_unit; p_cond->p_head->p_prev = p_unit; p_unit->p_prev = p_cond->p_tail; p_unit->p_next = p_cond->p_head; p_cond->p_tail = p_unit; } p_cond->num_waiters++; if (type == ABT_UNIT_TYPE_THREAD) { /* Change the ULT's state to BLOCKED */ ABTI_thread_set_blocked(p_thread); ABTI_mutex_unlock(&p_cond->mutex); /* Unlock the mutex that the calling ULT is holding */ /* FIXME: should check if mutex was locked by the calling ULT */ ABTI_mutex_unlock(p_mutex); /* Suspend the current ULT */ ABTI_thread_suspend(p_thread); } else { /* TYPE == ABT_UNIT_TYPE_EXT */ ABTI_mutex_unlock(&p_cond->mutex); ABTI_mutex_unlock(p_mutex); /* External thread is waiting here polling ext_signal. */ /* FIXME: need a better implementation */ while (!ext_signal) { } ABTU_free(p_unit); } /* Lock the mutex again */ ABTI_mutex_spinlock(p_mutex); fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }