예제 #1
0
파일: test_atomic.c 프로젝트: carns/mercury
static HG_THREAD_RETURN_TYPE
thread_cb_cas32(void *arg)
{
    hg_thread_ret_t thread_ret = (hg_thread_ret_t) 0;
    hg_atomic_int32_t *atomic_int32 = (hg_atomic_int32_t *) arg;

    hg_atomic_incr32(atomic_int32);

    if (HG_UTIL_TRUE == hg_atomic_cas32(atomic_int32, 2, 99)) {
        hg_atomic_incr32(atomic_int32);
    }

    hg_thread_exit(thread_ret);
    return thread_ret;
}
예제 #2
0
/*---------------------------------------------------------------------------*/
static hg_return_t
hg_bulk_complete(struct hg_bulk_op_id *hg_bulk_op_id)
{
    hg_context_t *context = hg_bulk_op_id->context;
    hg_return_t ret = HG_SUCCESS;

    /* Mark operation as completed */
    hg_atomic_incr32(&hg_bulk_op_id->completed);

    if (hg_bulk_op_id->hg_bulk_origin->eager_mode) {
        /* In the case of eager bulk transfer, directly trigger the operation
         * to avoid potential deadlocks */
        ret = hg_bulk_trigger_entry(hg_bulk_op_id);
        if (ret != HG_SUCCESS) {
            HG_LOG_ERROR("Could not trigger completion entry");
            goto done;
        }
    } else {
        struct hg_completion_entry *hg_completion_entry =
            &hg_bulk_op_id->hg_completion_entry;

        hg_completion_entry->op_type = HG_BULK;
        hg_completion_entry->op_id.hg_bulk_op_id = hg_bulk_op_id;

        ret = hg_core_completion_add(context, hg_completion_entry,
            hg_bulk_op_id->is_self);
        if (ret != HG_SUCCESS) {
            HG_LOG_ERROR("Could not add HG completion entry to completion queue");
            goto done;
        }
    }

done:
    return ret;
}
예제 #3
0
/*---------------------------------------------------------------------------*/
static int
hg_bulk_transfer_cb(const struct na_cb_info *callback_info)
{
    struct hg_bulk_op_id *hg_bulk_op_id =
        (struct hg_bulk_op_id *) callback_info->arg;
    na_return_t na_ret = NA_SUCCESS;
    int ret = 0;

    if (callback_info->ret == NA_CANCELED) {
        /* If canceled, mark handle as canceled */
        hg_atomic_cas32(&hg_bulk_op_id->canceled, 0, 1);
    } else if (callback_info->ret != NA_SUCCESS) {
        HG_LOG_ERROR("Error in NA callback: %s",
            NA_Error_to_string(callback_info->ret));
        na_ret = NA_PROTOCOL_ERROR;
        goto done;
    }

    /* When all NA transfers that correspond to bulk operation complete
     * add HG user callback to completion queue
     */
    if ((unsigned int) hg_atomic_incr32(&hg_bulk_op_id->op_completed_count)
        == hg_bulk_op_id->op_count) {
        hg_bulk_complete(hg_bulk_op_id);
        ret++;
    }

done:
    (void) na_ret;
    return ret;
}
예제 #4
0
static hg_return_t
hg_test_perf_forward_cb(const struct hg_cb_info *callback_info)
{
    struct hg_test_perf_args *args =
        (struct hg_test_perf_args *) callback_info->arg;

    if ((unsigned int) hg_atomic_incr32(&args->op_completed_count)
        == args->op_count) {
        hg_request_complete(args->request);
    }

    return HG_SUCCESS;
}
예제 #5
0
파일: test_atomic.c 프로젝트: carns/mercury
static HG_THREAD_RETURN_TYPE
thread_cb_incr32(void *arg)
{
    hg_thread_ret_t thread_ret = (hg_thread_ret_t) 0;
    hg_atomic_int32_t *atomic_int32 = (hg_atomic_int32_t *) arg;
    hg_util_int32_t incr;

    incr = hg_atomic_incr32(atomic_int32);
    if (!incr)
        fprintf(stderr, "Error: incr is %d\n", incr);
    incr = hg_atomic_decr32(atomic_int32);
    if (incr)
        fprintf(stderr, "Error: incr is %d\n", incr);

    hg_thread_exit(thread_ret);
    return thread_ret;
}
예제 #6
0
/*---------------------------------------------------------------------------*/
static hg_return_t
hg_test_bulk_seg_transfer_cb(const struct hg_cb_info *hg_cb_info)
{
    struct hg_test_bulk_args *bulk_args = (struct hg_test_bulk_args *)
            hg_cb_info->arg;
    hg_bulk_t local_bulk_handle = hg_cb_info->info.bulk.local_handle;
    hg_return_t ret = HG_SUCCESS;

    bulk_write_out_t out_struct;

    void *buf;
    size_t write_ret;

    if (hg_atomic_incr32(&bulk_args->completed_transfers) != 2)
        goto done;

    /* Call bulk_write */
    HG_Bulk_access(local_bulk_handle, 0, bulk_args->nbytes, HG_BULK_READWRITE,
            1, &buf, NULL, NULL);

    write_ret = bulk_write(bulk_args->fildes, buf, 0,
        bulk_args->nbytes, 1);

    /* Fill output structure */
    out_struct.ret = write_ret;

    /* Free block handle */
    ret = HG_Bulk_free(local_bulk_handle);
    if (ret != HG_SUCCESS) {
        fprintf(stderr, "Could not free HG bulk handle\n");
        return ret;
    }

    /* Send response back */
    ret = HG_Respond(bulk_args->handle, NULL, NULL, &out_struct);
    if (ret != HG_SUCCESS) {
        fprintf(stderr, "Could not respond\n");
        return ret;
    }

    HG_Destroy(bulk_args->handle);
    free(bulk_args);

done:
    return ret;
}
예제 #7
0
/*---------------------------------------------------------------------------*/
static hg_return_t
hg_test_finalize2_cb(hg_handle_t handle)
{
    hg_return_t ret = HG_SUCCESS;

    hg_atomic_incr32(&hg_test_finalizing_count_g);

    /* Free handle and send response back */
    ret = HG_Respond(handle, NULL, NULL, NULL);
    if (ret != HG_SUCCESS) {
        fprintf(stderr, "Could not respond\n");
        return ret;
    }

    HG_Destroy(handle);

    return ret;
}
예제 #8
0
/*---------------------------------------------------------------------------*/
hg_return_t
HG_Bulk_ref_incr(hg_bulk_t handle)
{
    struct hg_bulk *hg_bulk = (struct hg_bulk *) handle;
    hg_return_t ret = HG_SUCCESS;

    if (!hg_bulk) {
        HG_LOG_ERROR("NULL memory handle passed");
        ret = HG_INVALID_PARAM;
        goto done;
    }

    /* Increment ref count */
    hg_atomic_incr32(&hg_bulk->ref_count);

done:
    return ret;
}
예제 #9
0
/*---------------------------------------------------------------------------*/
static hg_return_t
hg_bulk_transfer(hg_context_t *context, hg_cb_t callback, void *arg,
    hg_bulk_op_t op, struct hg_addr *origin_addr,
    struct hg_bulk *hg_bulk_origin, hg_size_t origin_offset,
    struct hg_bulk *hg_bulk_local, hg_size_t local_offset, hg_size_t size,
    hg_op_id_t *op_id)
{
    hg_uint32_t origin_segment_start_index = 0, local_segment_start_index = 0;
    hg_size_t origin_segment_start_offset = origin_offset,
        local_segment_start_offset = local_offset;
    struct hg_bulk_op_id *hg_bulk_op_id = NULL;
    na_bulk_op_t na_bulk_op;
    na_addr_t na_origin_addr = HG_Core_addr_get_na(origin_addr);
    na_class_t *na_class = HG_Core_class_get_na(hg_bulk_origin->hg_class);
    hg_bool_t is_self = NA_Addr_is_self(na_class, na_origin_addr);
    hg_bool_t scatter_gather =
        (na_class->mem_handle_create_segments && !is_self) ? HG_TRUE : HG_FALSE;
    hg_return_t ret = HG_SUCCESS;
    unsigned int i;

    /* Map op to NA op */
    switch (op) {
        case HG_BULK_PUSH:
            na_bulk_op = (is_self) ? hg_bulk_memcpy_put : hg_bulk_na_put;
            break;
        case HG_BULK_PULL:
            /* Eager mode can only be used when data is pulled from origin */
            na_bulk_op = (is_self || hg_bulk_origin->eager_mode) ?
                hg_bulk_memcpy_get : hg_bulk_na_get;
            break;
        default:
            HG_LOG_ERROR("Unknown bulk operation");
            ret = HG_INVALID_PARAM;
            goto done;
    }

    /* Allocate op_id */
    hg_bulk_op_id = (struct hg_bulk_op_id *) malloc(
        sizeof(struct hg_bulk_op_id));
    if (!hg_bulk_op_id) {
        HG_LOG_ERROR("Could not allocate HG Bulk operation ID");
        ret = HG_NOMEM_ERROR;
        goto done;
    }
    hg_bulk_op_id->hg_class = hg_bulk_origin->hg_class;
    hg_bulk_op_id->context = context;
    hg_bulk_op_id->callback = callback;
    hg_bulk_op_id->arg = arg;
    hg_atomic_set32(&hg_bulk_op_id->completed, 0);
    hg_atomic_set32(&hg_bulk_op_id->canceled, 0);
    hg_bulk_op_id->op_count = 1; /* Default */
    hg_atomic_set32(&hg_bulk_op_id->op_completed_count, 0);
    hg_bulk_op_id->op = op;
    hg_bulk_op_id->hg_bulk_origin = hg_bulk_origin;
    hg_atomic_incr32(&hg_bulk_origin->ref_count); /* Increment ref count */
    hg_bulk_op_id->hg_bulk_local = hg_bulk_local;
    hg_atomic_incr32(&hg_bulk_local->ref_count); /* Increment ref count */
    hg_bulk_op_id->na_op_ids = NULL;
    hg_bulk_op_id->is_self = is_self;

    /* Translate bulk_offset */
    if (origin_offset && !scatter_gather)
        hg_bulk_offset_translate(hg_bulk_origin, origin_offset,
            &origin_segment_start_index, &origin_segment_start_offset);

    /* Translate block offset */
    if (local_offset && !scatter_gather)
        hg_bulk_offset_translate(hg_bulk_local, local_offset,
            &local_segment_start_index, &local_segment_start_offset);

    /* Figure out number of NA operations required */
    if (!scatter_gather) {
        hg_bulk_transfer_pieces(NULL, NA_ADDR_NULL, hg_bulk_origin,
            origin_segment_start_index, origin_segment_start_offset,
            hg_bulk_local, local_segment_start_index,
            local_segment_start_offset, size, HG_FALSE, NULL,
            &hg_bulk_op_id->op_count);
        if (!hg_bulk_op_id->op_count) {
            HG_LOG_ERROR("Could not get bulk op_count");
            ret = HG_INVALID_PARAM;
            goto done;
        }
    }

    /* Allocate memory for NA operation IDs */
    hg_bulk_op_id->na_op_ids = malloc(sizeof(na_op_id_t) * hg_bulk_op_id->op_count);
    if (!hg_bulk_op_id->na_op_ids) {
        HG_LOG_ERROR("Could not allocate memory for op_ids");
        ret = HG_NOMEM_ERROR;
        goto done;
    }
    for (i = 0; i < hg_bulk_op_id->op_count; i++)
        hg_bulk_op_id->na_op_ids[i] = NA_OP_ID_NULL;

    /* Assign op_id */
    if (op_id && op_id != HG_OP_ID_IGNORE) *op_id = (hg_op_id_t) hg_bulk_op_id;

    /* Do actual transfer */
    ret = hg_bulk_transfer_pieces(na_bulk_op, na_origin_addr, hg_bulk_origin,
        origin_segment_start_index, origin_segment_start_offset, hg_bulk_local,
        local_segment_start_index, local_segment_start_offset, size,
        scatter_gather, hg_bulk_op_id, NULL);
    if (ret != HG_SUCCESS) {
        HG_LOG_ERROR("Could not transfer data pieces");
        goto done;
    }

done:
    if (ret != HG_SUCCESS && hg_bulk_op_id) {
        free(hg_bulk_op_id->na_op_ids);
        free(hg_bulk_op_id);
    }
    return ret;
}