/*---------------------------------------------------------------------------*/ na_return_t NA_Context_destroy(na_class_t *na_class, na_context_t *context) { struct na_private_context *na_private_context = (struct na_private_context *) context; na_return_t ret = NA_SUCCESS; if (!na_class) { NA_LOG_ERROR("NULL NA class"); ret = NA_INVALID_PARAM; goto done; } if (!context) goto done; /* Check that completion queue is empty now */ hg_thread_mutex_lock(&na_private_context->completion_queue_mutex); if (!hg_queue_is_empty(na_private_context->completion_queue)) { NA_LOG_ERROR("Completion queue should be empty"); ret = NA_PROTOCOL_ERROR; hg_thread_mutex_unlock(&na_private_context->completion_queue_mutex); goto done; } if (na_class->context_destroy) { ret = na_class->context_destroy(na_class, na_private_context->context.plugin_context); if (ret != NA_SUCCESS) { goto done; } } /* Destroy completion queue */ hg_queue_free(na_private_context->completion_queue); na_private_context->completion_queue = NULL; hg_thread_mutex_unlock(&na_private_context->completion_queue_mutex); /* Destroy completion queue mutex/cond */ hg_thread_mutex_destroy(&na_private_context->completion_queue_mutex); hg_thread_cond_destroy(&na_private_context->completion_queue_cond); /* Destroy progress mutex/cond */ hg_thread_mutex_destroy(&na_private_context->progress_mutex); hg_thread_cond_destroy(&na_private_context->progress_cond); free(na_private_context); done: return ret; }
int main(int argc, char *argv[]) { hg_thread_t thread[MERCURY_TESTING_NUM_THREADS]; int ret = EXIT_SUCCESS; int i; (void) argc; (void) argv; for (i = 0; i < MERCURY_TESTING_NUM_THREADS; i++) hg_thread_init(&thread[i]); hg_thread_mutex_init(&thread_mutex); hg_thread_cond_init(&thread_cond); for (i = 0; i < MERCURY_TESTING_NUM_THREADS; i++) hg_thread_create(&thread[i], thread_cb_cond, NULL); for (i = 0; i < MERCURY_TESTING_NUM_THREADS; i++) hg_thread_join(thread[i]); working = 1; for (i = 0; i < MERCURY_TESTING_NUM_THREADS; i++) hg_thread_create(&thread[i], thread_cb_cond_all, NULL); hg_thread_mutex_lock(&thread_mutex); working = 0; hg_thread_cond_broadcast(&thread_cond); hg_thread_mutex_unlock(&thread_mutex); for (i = 0; i < MERCURY_TESTING_NUM_THREADS; i++) hg_thread_join(thread[i]); return ret; }
/*---------------------------------------------------------------------------*/ na_return_t na_cb_completion_add(na_context_t *context, na_cb_t callback, struct na_cb_info *callback_info, na_plugin_cb_t plugin_callback, void *plugin_callback_args) { struct na_private_context *na_private_context = (struct na_private_context *) context; na_return_t ret = NA_SUCCESS; struct na_cb_completion_data *completion_data = NULL; assert(context); completion_data = (struct na_cb_completion_data *) malloc(sizeof(struct na_cb_completion_data)); if (!completion_data) { NA_LOG_ERROR("Could not allocate completion data struct"); ret = NA_NOMEM_ERROR; goto done; } completion_data->callback = callback; completion_data->callback_info = callback_info; completion_data->plugin_callback = plugin_callback; completion_data->plugin_callback_args = plugin_callback_args; hg_thread_mutex_lock(&na_private_context->completion_queue_mutex); if (!hg_queue_push_head(na_private_context->completion_queue, (hg_queue_value_t) completion_data)) { NA_LOG_ERROR("Could not push completion data to completion queue"); ret = NA_NOMEM_ERROR; hg_thread_mutex_unlock( &na_private_context->completion_queue_mutex); goto done; } /* Callback is pushed to the completion queue when something completes * so wake up anyone waiting in the trigger */ hg_thread_cond_signal(&na_private_context->completion_queue_cond); hg_thread_mutex_unlock(&na_private_context->completion_queue_mutex); done: return ret; }
static HG_THREAD_RETURN_TYPE thread_cb_cond(void *arg) { hg_thread_ret_t thread_ret = (hg_thread_ret_t) 0; (void) arg; hg_thread_mutex_lock(&thread_mutex); while (working) hg_thread_cond_wait(&thread_cond, &thread_mutex); working = 1; hg_thread_mutex_unlock(&thread_mutex); hg_thread_mutex_lock(&thread_mutex); working = 0; hg_thread_cond_signal(&thread_cond); hg_thread_mutex_unlock(&thread_mutex); hg_thread_exit(thread_ret); return thread_ret; }
static HG_THREAD_RETURN_TYPE thread_cb_cond_all(void *arg) { hg_thread_ret_t thread_ret = (hg_thread_ret_t) 0; (void) arg; hg_thread_mutex_lock(&thread_mutex); while (working) hg_thread_cond_timedwait(&thread_cond, &thread_mutex, 1000); hg_thread_mutex_unlock(&thread_mutex); hg_thread_exit(thread_ret); return thread_ret; }
/*---------------------------------------------------------------------------*/ static na_return_t na_addr_lookup_cb(const struct na_cb_info *callback_info) { na_addr_t *addr_ptr = (na_addr_t *) callback_info->arg; na_return_t ret = NA_SUCCESS; if (callback_info->ret != NA_SUCCESS) { return ret; } hg_thread_mutex_lock(&na_addr_lookup_mutex_g); *addr_ptr = callback_info->info.lookup.addr; hg_thread_mutex_unlock(&na_addr_lookup_mutex_g); return ret; }
/*---------------------------------------------------------------------------*/ HG_TEST_RPC_CB(hg_test_perf_bulk, handle) { hg_return_t ret = HG_SUCCESS; struct hg_info *hg_info = NULL; hg_bulk_t origin_bulk_handle = HG_BULK_NULL; hg_bulk_t local_bulk_handle = HG_BULK_NULL; struct hg_test_bulk_args *bulk_args = NULL; bulk_write_in_t in_struct; bulk_args = (struct hg_test_bulk_args *) malloc( sizeof(struct hg_test_bulk_args)); /* Keep handle to pass to callback */ bulk_args->handle = handle; /* Get info from handle */ hg_info = HG_Get_info(handle); /* Get input struct */ ret = HG_Get_input(handle, &in_struct); if (ret != HG_SUCCESS) { fprintf(stderr, "Could not get input struct\n"); return ret; } origin_bulk_handle = in_struct.bulk_handle; hg_atomic_set32(&bulk_args->completed_transfers, 0); /* Create a new block handle to read the data */ bulk_args->nbytes = HG_Bulk_get_size(origin_bulk_handle); bulk_args->fildes = in_struct.fildes; #ifdef MERCURY_TESTING_USE_LOCAL_BULK /* Create a new bulk handle to read the data */ HG_Bulk_create(hg_info->hg_class, 1, NULL, &bulk_args->nbytes, HG_BULK_READWRITE, &local_bulk_handle); #else #ifdef MERCURY_TESTING_HAS_THREAD_POOL hg_thread_mutex_lock(&hg_test_local_bulk_handle_mutex_g); #endif local_bulk_handle = hg_test_local_bulk_handle_g; #endif /* Pull bulk data */ ret = HG_Bulk_transfer(hg_info->context, hg_test_perf_bulk_transfer_cb, bulk_args, HG_BULK_PULL, hg_info->addr, origin_bulk_handle, 0, local_bulk_handle, 0, bulk_args->nbytes, HG_OP_ID_IGNORE); if (ret != HG_SUCCESS) { fprintf(stderr, "Could not read bulk data\n"); return ret; } #ifndef MERCURY_TESTING_USE_LOCAL_BULK #ifdef MERCURY_TESTING_HAS_THREAD_POOL hg_thread_mutex_unlock(&hg_test_local_bulk_handle_mutex_g); #endif #endif HG_Free_input(handle, &in_struct); return ret; }
/*---------------------------------------------------------------------------*/ na_return_t NA_Trigger(na_context_t *context, unsigned int timeout, unsigned int max_count, unsigned int *actual_count) { struct na_private_context *na_private_context = (struct na_private_context *) context; na_return_t ret = NA_SUCCESS; na_bool_t completion_queue_empty = 0; struct na_cb_completion_data *completion_data = NULL; unsigned int count = 0; if (!context) { NA_LOG_ERROR("NULL context"); ret = NA_INVALID_PARAM; goto done; } while (count < max_count) { hg_thread_mutex_lock(&na_private_context->completion_queue_mutex); /* Is completion queue empty */ completion_queue_empty = (na_bool_t) hg_queue_is_empty( na_private_context->completion_queue); while (completion_queue_empty) { /* TODO needed ? */ /* If queue is empty and already triggered something, just leave */ if (count) { hg_thread_mutex_unlock( &na_private_context->completion_queue_mutex); goto done; } if (!timeout) { /* Timeout is 0 so leave */ ret = NA_TIMEOUT; hg_thread_mutex_unlock( &na_private_context->completion_queue_mutex); goto done; } /* Otherwise wait timeout ms */ if (hg_thread_cond_timedwait( &na_private_context->completion_queue_cond, &na_private_context->completion_queue_mutex, timeout) != HG_UTIL_SUCCESS) { /* Timeout occurred so leave */ ret = NA_TIMEOUT; hg_thread_mutex_unlock( &na_private_context->completion_queue_mutex); goto done; } } /* Completion queue should not be empty now */ completion_data = (struct na_cb_completion_data *) hg_queue_pop_tail(na_private_context->completion_queue); if (!completion_data) { NA_LOG_ERROR("NULL completion data"); ret = NA_INVALID_PARAM; hg_thread_mutex_unlock(&na_private_context->completion_queue_mutex); goto done; } /* Unlock now so that other threads can eventually add callbacks * to the queue while callback gets executed */ hg_thread_mutex_unlock(&na_private_context->completion_queue_mutex); /* Execute callback */ if (completion_data->callback) { /* TODO should return error from callback ? */ completion_data->callback(completion_data->callback_info); } /* Execute plugin callback (free resources etc) */ if (completion_data->plugin_callback) completion_data->plugin_callback(completion_data->callback_info, completion_data->plugin_callback_args); free(completion_data); count++; } if (actual_count) *actual_count = count; done: return ret; }
/*---------------------------------------------------------------------------*/ na_return_t NA_Progress(na_class_t *na_class, na_context_t *context, unsigned int timeout) { struct na_private_context *na_private_context = (struct na_private_context *) context; double remaining = timeout / 1000.0; /* Convert timeout in ms into seconds */ na_return_t ret = NA_SUCCESS; if (!na_class) { NA_LOG_ERROR("NULL NA class"); ret = NA_INVALID_PARAM; goto done; } if (!context) { NA_LOG_ERROR("NULL context"); ret = NA_INVALID_PARAM; goto done; } if (!na_class->progress) { NA_LOG_ERROR("progress plugin callback is not defined"); ret = NA_PROTOCOL_ERROR; goto done; } /* TODO option for concurrent progress */ /* Prevent multiple threads from concurrently calling progress on the same * context */ hg_thread_mutex_lock(&na_private_context->progress_mutex); while (na_private_context->progressing) { hg_time_t t1, t2; if (remaining <= 0) { /* Timeout is 0 so leave */ hg_thread_mutex_unlock(&na_private_context->progress_mutex); ret = NA_TIMEOUT; goto done; } hg_time_get_current(&t1); if (hg_thread_cond_timedwait(&na_private_context->progress_cond, &na_private_context->progress_mutex, (unsigned int) (remaining * 1000)) != HG_UTIL_SUCCESS) { /* Timeout occurred so leave */ hg_thread_mutex_unlock(&na_private_context->progress_mutex); ret = NA_TIMEOUT; goto done; } hg_time_get_current(&t2); remaining -= hg_time_to_double(hg_time_subtract(t2, t1)); if (remaining < 0) { /* Give a chance to call progress with timeout of 0 if * progressing is NA_FALSE */ remaining = 0; } } na_private_context->progressing = NA_TRUE; hg_thread_mutex_unlock(&na_private_context->progress_mutex); /* Try to make progress for remaining time */ ret = na_class->progress(na_class, context, (unsigned int) (remaining * 1000)); hg_thread_mutex_lock(&na_private_context->progress_mutex); /* At this point, either progress succeeded or failed with NA_TIMEOUT, * meaning remaining time is now 0, so wake up other threads waiting */ na_private_context->progressing = NA_FALSE; hg_thread_cond_signal(&na_private_context->progress_cond); hg_thread_mutex_unlock(&na_private_context->progress_mutex); done: return ret; }
/*---------------------------------------------------------------------------*/ na_return_t NA_Addr_lookup_wait(na_class_t *na_class, const char *name, na_addr_t *addr) { na_addr_t new_addr = NULL; na_bool_t lookup_completed = NA_FALSE; na_context_t *context = NULL; na_return_t ret = NA_SUCCESS; assert(na_class); if (!addr) { NA_LOG_ERROR("NULL pointer to na_addr_t"); ret = NA_INVALID_PARAM; goto done; } context = NA_Context_create(na_class); if (!context) { NA_LOG_ERROR("Could not create context"); goto done; } ret = NA_Addr_lookup(na_class, context, &na_addr_lookup_cb, &new_addr, name, NA_OP_ID_IGNORE); if (ret != NA_SUCCESS) { NA_LOG_ERROR("Could not start NA_Addr_lookup"); goto done; } while (!lookup_completed) { na_return_t trigger_ret; unsigned int actual_count = 0; do { trigger_ret = NA_Trigger(context, 0, 1, &actual_count); } while ((trigger_ret == NA_SUCCESS) && actual_count); hg_thread_mutex_lock(&na_addr_lookup_mutex_g); if (new_addr) { lookup_completed = NA_TRUE; *addr = new_addr; } hg_thread_mutex_unlock(&na_addr_lookup_mutex_g); if (lookup_completed) break; ret = NA_Progress(na_class, context, NA_MAX_IDLE_TIME); if (ret != NA_SUCCESS) { NA_LOG_ERROR("Could not make progress"); goto done; } } ret = NA_Context_destroy(na_class, context); if (ret != NA_SUCCESS) { NA_LOG_ERROR("Could not destroy context"); goto done; } done: return ret; }