static void init_base_req(mca_pml_yalla_base_request_t *req) { OMPI_REQUEST_INIT(&req->ompi, false); req->ompi.req_type = OMPI_REQUEST_PML; req->ompi.req_cancel = NULL; req->ompi.req_complete_cb = NULL; req->ompi.req_complete_cb_data = NULL; req->convertor = NULL; }
static void ompi_request_construct(ompi_request_t* req) { OMPI_REQUEST_INIT(req, false); req->req_free = NULL; req->req_cancel = NULL; req->req_complete_cb = NULL; req->req_f_to_c_index = MPI_UNDEFINED; req->req_mpi_object.comm = (struct ompi_communicator_t*) NULL; }
static int mca_coll_ml_barrier_launch(mca_coll_ml_module_t *ml_module, ompi_request_t **req) { int rc; ompi_free_list_item_t *item; mca_coll_ml_collective_operation_progress_t *coll_op; ml_payload_buffer_desc_t *src_buffer_desc = NULL; /* allocate an ml buffer for signaling purposes */ src_buffer_desc = mca_coll_ml_alloc_buffer(ml_module); while (NULL == src_buffer_desc) { opal_progress(); src_buffer_desc = mca_coll_ml_alloc_buffer(ml_module); } /* Blocking call on fragment allocation (Maybe we want to make it non blocking ?) */ OMPI_FREE_LIST_WAIT(&(ml_module->coll_ml_collective_descriptors), item, rc); coll_op = (mca_coll_ml_collective_operation_progress_t *) item; assert(NULL != coll_op); ML_VERBOSE(10, ("Get coll request %p", coll_op)); MCA_COLL_ML_OP_BASIC_SETUP(coll_op, 0, 0, NULL, NULL, ml_module->coll_ml_barrier_function); coll_op->fragment_data.buffer_desc = src_buffer_desc; coll_op->dag_description.num_tasks_completed = 0; coll_op->variable_fn_params.buffer_index = src_buffer_desc->buffer_index; coll_op->variable_fn_params.sequence_num = OPAL_THREAD_ADD64(&(ml_module->collective_sequence_num), 1); /* Pointer to a coll finalize function */ coll_op->process_fn = NULL; (*req) = &coll_op->full_message.super; OMPI_REQUEST_INIT((*req), false); (*req)->req_status._cancelled = 0; (*req)->req_state = OMPI_REQUEST_ACTIVE; (*req)->req_status.MPI_ERROR = OMPI_SUCCESS; /* Set order info if there is a bcol needs ordering */ MCA_COLL_ML_SET_ORDER_INFO(coll_op, 1); return mca_coll_ml_generic_collectives_launcher(coll_op, mca_coll_ml_barrier_task_setup); }
static void* get_coll_handle(void) { ompi_request_t *ompi_req; ompi_free_list_item_t *item; OMPI_FREE_LIST_WAIT_MT(&(mca_coll_hcoll_component.requests),item); if (OPAL_UNLIKELY(NULL == item)) { HCOL_ERROR("Wait for free list failed.\n"); return NULL; } ompi_req = (ompi_request_t *)item; OMPI_REQUEST_INIT(ompi_req,false); return (void *)ompi_req; }
static void* get_coll_handle(void) { ompi_request_t *ompi_req; opal_free_list_item_t *item; item = opal_free_list_wait (&(mca_coll_hcoll_component.requests)); if (OPAL_UNLIKELY(NULL == item)) { HCOL_ERROR("Wait for free list failed.\n"); return NULL; } ompi_req = (ompi_request_t *)item; OMPI_REQUEST_INIT(ompi_req,false); ompi_req->req_complete_cb = NULL; ompi_req->req_status.MPI_ERROR = MPI_SUCCESS; ompi_req->req_free = request_free; return (void *)ompi_req; }
/* * Return a module-specific IO MPI_Request */ int mca_io_base_request_alloc(ompi_file_t *file, mca_io_base_request_t **req) { int err; mca_io_base_module_request_once_init_fn_t func; ompi_free_list_item_t *item; /* See if we've got a request on the module's freelist (which is cached on the file, since there's only one module per MPI_File). Use a quick-but-not-entirely-accurate (but good enough) check as a slight optimization to potentially having to avoid locking and unlocking. */ if (opal_list_get_size(&file->f_io_requests) > 0) { OPAL_THREAD_LOCK(&file->f_io_requests_lock); if (opal_list_get_size(&file->f_io_requests) > 0) { *req = (mca_io_base_request_t*) opal_list_remove_first(&file->f_io_requests); (*req)->free_called = false; } else { *req = NULL; } OPAL_THREAD_UNLOCK(&file->f_io_requests_lock); } else { *req = NULL; } /* Nope, we didn't have one on the file freelist, so let's get one off the global freelist */ if (NULL == *req) { OMPI_FREE_LIST_GET(&mca_io_base_requests, item, err); *req = (mca_io_base_request_t*) item; /* Call the per-use init function, if it exists */ switch (file->f_io_version) { case MCA_IO_BASE_V_2_0_0: /* These can be set once for this request since this request will always be used with the same module (and therefore, the same MPI_File). Note that (*req)->req_ompi.rq_type is already set by the constructor. */ (*req)->req_file = file; (*req)->req_ver = file->f_io_version; (*req)->free_called = false; (*req)->super.req_free = file->f_io_selected_module.v2_0_0.io_module_request_free; (*req)->super.req_cancel = file->f_io_selected_module.v2_0_0.io_module_request_cancel; /* Call the module's once-per process init, if it exists */ func = file->f_io_selected_module.v2_0_0.io_module_request_once_init; if (NULL != func) { if (OMPI_SUCCESS != (err = func(&file->f_io_selected_module, *req))) { OMPI_FREE_LIST_RETURN(&mca_io_base_requests, item); return err; } } break; default: OMPI_FREE_LIST_RETURN(&mca_io_base_requests, item); return OMPI_ERR_NOT_IMPLEMENTED; break; } } /* Initialize the request */ OMPI_REQUEST_INIT(&((*req)->super), false); (*req)->super.req_mpi_object.file = file; /* * Copied from ompi/mca/pml/base/pml_base_recvreq.h: * always set the req_status.MPI_TAG to ANY_TAG before starting the * request. This field is used if cancelled to find out if the request * has been matched or not. */ (*req)->super.req_status.MPI_TAG = MPI_ANY_TAG; (*req)->super.req_status.MPI_ERROR = OMPI_SUCCESS; (*req)->super.req_status._count = 0; (*req)->super.req_status._cancelled = 0; /* All done */ return OMPI_SUCCESS; }