int mca_pml_cm_enable(bool enable) { /* BWB - FIX ME - need to have this actually do something, maybe? */ ompi_free_list_init_new(&mca_pml_base_send_requests, sizeof(mca_pml_cm_hvy_send_request_t) + ompi_mtl->mtl_request_size, opal_cache_line_size, OBJ_CLASS(mca_pml_cm_hvy_send_request_t), 0,opal_cache_line_size, ompi_pml_cm.free_list_num, ompi_pml_cm.free_list_max, ompi_pml_cm.free_list_inc, NULL); ompi_free_list_init_new(&mca_pml_base_recv_requests, sizeof(mca_pml_cm_hvy_recv_request_t) + ompi_mtl->mtl_request_size, opal_cache_line_size, OBJ_CLASS(mca_pml_cm_hvy_recv_request_t), 0,opal_cache_line_size, ompi_pml_cm.free_list_num, ompi_pml_cm.free_list_max, ompi_pml_cm.free_list_inc, NULL); return OMPI_SUCCESS; }
int mca_spml_ikrit_enable(bool enable) { SPML_VERBOSE(50, "*** ikrit ENABLED ****"); if (false == enable) { return OSHMEM_SUCCESS; } opal_free_list_init (&mca_spml_base_put_requests, sizeof(mca_spml_ikrit_put_request_t), opal_cache_line_size, OBJ_CLASS(opal_free_list_item_t), 0, opal_cache_line_size, mca_spml_ikrit.free_list_num, mca_spml_ikrit.free_list_max, mca_spml_ikrit.free_list_inc, NULL, 0, NULL, NULL, NULL); opal_free_list_init (&mca_spml_base_get_requests, sizeof(mca_spml_ikrit_get_request_t), opal_cache_line_size, OBJ_CLASS(opal_free_list_item_t), 0, opal_cache_line_size, mca_spml_ikrit.free_list_num, mca_spml_ikrit.free_list_max, mca_spml_ikrit.free_list_inc, NULL, 0, NULL, NULL, NULL); mca_spml_ikrit.enabled = true; return OSHMEM_SUCCESS; }
int mca_pml_dr_enable(bool enable) { if( false == enable ) return OMPI_SUCCESS; /* requests */ ompi_free_list_init_new( &mca_pml_base_send_requests, sizeof(mca_pml_dr_send_request_t), opal_cache_line_size, OBJ_CLASS(mca_pml_dr_send_request_t), 0,opal_cache_line_size, mca_pml_dr.free_list_num, mca_pml_dr.free_list_max, mca_pml_dr.free_list_inc, NULL ); ompi_free_list_init_new( &mca_pml_base_recv_requests, sizeof(mca_pml_dr_recv_request_t), opal_cache_line_size, OBJ_CLASS(mca_pml_dr_recv_request_t), 0,opal_cache_line_size, mca_pml_dr.free_list_num, mca_pml_dr.free_list_max, mca_pml_dr.free_list_inc, NULL ); /* fragments */ OBJ_CONSTRUCT(&mca_pml_dr.recv_frags, ompi_free_list_t); ompi_free_list_init_new( &mca_pml_dr.recv_frags, sizeof(mca_pml_dr_recv_frag_t), opal_cache_line_size, OBJ_CLASS(mca_pml_dr_recv_frag_t), 0,opal_cache_line_size, mca_pml_dr.free_list_num, mca_pml_dr.free_list_max, mca_pml_dr.free_list_inc, NULL ); OBJ_CONSTRUCT(&mca_pml_dr.vfrags, ompi_free_list_t); ompi_free_list_init_new( &mca_pml_dr.vfrags, sizeof(mca_pml_dr_vfrag_t), opal_cache_line_size, OBJ_CLASS(mca_pml_dr_vfrag_t), 0,opal_cache_line_size, mca_pml_dr.free_list_num, mca_pml_dr.free_list_max, mca_pml_dr.free_list_inc, NULL ); OBJ_CONSTRUCT(&mca_pml_dr.send_pending, opal_list_t); OBJ_CONSTRUCT(&mca_pml_dr.send_active, opal_list_t); OBJ_CONSTRUCT(&mca_pml_dr.acks_pending, opal_list_t); OBJ_CONSTRUCT(&mca_pml_dr.buffers, ompi_free_list_t); OBJ_CONSTRUCT(&mca_pml_dr.endpoints, opal_pointer_array_t); OBJ_CONSTRUCT(&mca_pml_dr.lock, opal_mutex_t); mca_pml_dr.enabled = true; return OMPI_SUCCESS; }
int mca_spml_yoda_enable(bool enable) { SPML_VERBOSE(50, "*** yoda ENABLED ****"); if (false == enable) { return OSHMEM_SUCCESS; } OBJ_CONSTRUCT(&mca_spml_yoda.lock, opal_mutex_t); /** *If we get here this is the SPML who get selected for the run. We * should get ownership for the put and get requests list, and * initialize them with the size of our own requests. */ opal_free_list_init (&mca_spml_base_put_requests, sizeof(mca_spml_yoda_put_request_t), opal_cache_line_size, OBJ_CLASS(mca_spml_yoda_put_request_t), 0, opal_cache_line_size, mca_spml_yoda.free_list_num, mca_spml_yoda.free_list_max, mca_spml_yoda.free_list_inc, NULL, 0, NULL, NULL, NULL); opal_free_list_init (&mca_spml_base_get_requests, sizeof(mca_spml_yoda_get_request_t), opal_cache_line_size, OBJ_CLASS(mca_spml_yoda_get_request_t), 0, opal_cache_line_size, mca_spml_yoda.free_list_num, mca_spml_yoda.free_list_max, mca_spml_yoda.free_list_inc, NULL, 0, NULL, NULL, NULL); mca_spml_yoda.enabled = true; /* The following line resolves the issue with BTL tcp and SPML yoda. In this case the * atomic_basic_lock(root_rank) function may behave as DoS attack on root_rank, since * all the procceses will do shmem_int_get from root_rank. These calls would go through * bml active messaging and will trigger replays in libevent on root rank. If the flag * OPAL_ENVLOOP_ONCE is not set then libevent will continously progress constantly * incoming events thus causing root_rank to stuck in libevent loop. */ opal_progress_set_event_flag(OPAL_EVLOOP_NONBLOCK | OPAL_EVLOOP_ONCE); #if OSHMEM_WAIT_COMPLETION_DEBUG == 1 condition_dbg_init(); #endif return OSHMEM_SUCCESS; }
static int mca_btl_scif_setup_mpools (mca_btl_scif_module_t *scif_module) { struct mca_mpool_base_resources_t mpool_resources; int rc; /* initialize the grdma mpool */ mpool_resources.pool_name = "scif"; mpool_resources.reg_data = (void *) scif_module; mpool_resources.sizeof_reg = sizeof (mca_btl_scif_reg_t); mpool_resources.register_mem = scif_reg_mem; mpool_resources.deregister_mem = scif_dereg_mem; scif_module->super.btl_mpool = mca_mpool_base_module_create("grdma", scif_module, &mpool_resources); if (NULL == scif_module->super.btl_mpool) { BTL_ERROR(("error creating grdma mpool")); return OMPI_ERROR; } /* setup free lists for fragments. dma fragments will be used for * rma operations and in-place sends. eager frags will be used for * buffered sends. */ rc = ompi_free_list_init_new (&scif_module->dma_frags, sizeof (mca_btl_scif_dma_frag_t), 64, OBJ_CLASS(mca_btl_scif_dma_frag_t), 128, getpagesize (), mca_btl_scif_component.scif_free_list_num, mca_btl_scif_component.scif_free_list_max, mca_btl_scif_component.scif_free_list_inc, NULL); if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) { return rc; } rc = ompi_free_list_init_new (&scif_module->eager_frags, sizeof (mca_btl_scif_eager_frag_t), 8, OBJ_CLASS(mca_btl_scif_eager_frag_t), 128 + scif_module->super.btl_eager_limit, 64, mca_btl_scif_component.scif_free_list_num, mca_btl_scif_component.scif_free_list_max, mca_btl_scif_component.scif_free_list_inc, NULL); if (OPAL_UNLIKELY(OMPI_SUCCESS != rc)) { BTL_ERROR(("error creating eager receive fragment free list")); return rc; } return OMPI_SUCCESS; }
/* * Initializes the mpool module. */ void mca_mpool_grdma_module_init(mca_mpool_grdma_module_t* mpool, mca_mpool_grdma_pool_t *pool) { OBJ_RETAIN(pool); mpool->pool = pool; mpool->super.mpool_component = &mca_mpool_grdma_component.super; mpool->super.mpool_base = NULL; /* no base .. */ mpool->super.mpool_alloc = mca_mpool_grdma_alloc; mpool->super.mpool_realloc = mca_mpool_grdma_realloc; mpool->super.mpool_free = mca_mpool_grdma_free; mpool->super.mpool_register = mca_mpool_grdma_register; mpool->super.mpool_find = mca_mpool_grdma_find; mpool->super.mpool_deregister = mca_mpool_grdma_deregister; mpool->super.mpool_release_memory = mca_mpool_grdma_release_memory; mpool->super.mpool_finalize = mca_mpool_grdma_finalize; mpool->super.mpool_ft_event = mca_mpool_grdma_ft_event; mpool->super.flags = MCA_MPOOL_FLAGS_MPI_ALLOC_MEM; mpool->super.rcache = pool->rcache; mpool->stat_cache_hit = mpool->stat_cache_miss = mpool->stat_evicted = 0; mpool->stat_cache_found = mpool->stat_cache_notfound = 0; OBJ_CONSTRUCT(&mpool->reg_list, ompi_free_list_t); ompi_free_list_init_new(&mpool->reg_list, mpool->resources.sizeof_reg, opal_cache_line_size, OBJ_CLASS(mca_mpool_base_registration_t), 0, opal_cache_line_size, 0, -1, 32, NULL); }
/* * Initializes the mpool module. */ void mca_mpool_rgpusm_module_init(mca_mpool_rgpusm_module_t* mpool) { mpool->super.mpool_component = &mca_mpool_rgpusm_component.super; mpool->super.mpool_base = NULL; /* no base .. */ mpool->super.mpool_alloc = NULL; mpool->super.mpool_realloc = NULL; mpool->super.mpool_free = mca_mpool_rgpusm_free; mpool->super.mpool_register = mca_mpool_rgpusm_register; mpool->super.mpool_find = mca_mpool_rgpusm_find; mpool->super.mpool_deregister = mca_mpool_rgpusm_deregister; mpool->super.mpool_release_memory = NULL; mpool->super.mpool_finalize = mca_mpool_rgpusm_finalize; mpool->super.mpool_ft_event = mca_mpool_rgpusm_ft_event; mpool->super.rcache = mca_rcache_base_module_create(mca_mpool_rgpusm_component.rcache_name); mpool->super.flags = 0; mpool->resources.reg_data = NULL; mpool->resources.sizeof_reg = sizeof(struct mca_mpool_common_cuda_reg_t); mpool->resources.register_mem = cuda_openmemhandle; mpool->resources.deregister_mem = cuda_closememhandle; OBJ_CONSTRUCT(&mpool->reg_list, ompi_free_list_t); ompi_free_list_init_new(&mpool->reg_list, mpool->resources.sizeof_reg, opal_cache_line_size, OBJ_CLASS(mca_mpool_base_registration_t), 0,opal_cache_line_size, 0, -1, 32, NULL); OBJ_CONSTRUCT(&mpool->lru_list, opal_list_t); mpool->stat_cache_hit = mpool->stat_cache_miss = mpool->stat_evicted = 0; mpool->stat_cache_found = mpool->stat_cache_notfound = 0; mpool->stat_cache_valid = mpool->stat_cache_invalid = 0; }
int32_t opal_datatype_init( void ) { const opal_datatype_t* datatype; int32_t i; opal_arch_compute_local_id( &opal_local_arch ); /** * Force he initialization of the opal_datatype_t class. This will allow us to * call OBJ_DESTRUCT without going too deep in the initialization process. */ opal_class_initialize(OBJ_CLASS(opal_datatype_t)); for( i = OPAL_DATATYPE_FIRST_TYPE; i < OPAL_DATATYPE_MAX_PREDEFINED; i++ ) { datatype = opal_datatype_basicDatatypes[i]; /* All of the predefined OPAL types don't have any GAPS! */ datatype->desc.desc[0].elem.common.flags = OPAL_DATATYPE_FLAG_PREDEFINED | OPAL_DATATYPE_FLAG_DATA | OPAL_DATATYPE_FLAG_CONTIGUOUS | OPAL_DATATYPE_FLAG_NO_GAPS; datatype->desc.desc[0].elem.common.type = i; /* datatype->desc.desc[0].elem.blocklen XXX not set at the moment, it will be needed later */ datatype->desc.desc[0].elem.count = 1; datatype->desc.desc[0].elem.disp = 0; datatype->desc.desc[0].elem.extent = datatype->size; datatype->desc.desc[1].end_loop.common.flags = 0; datatype->desc.desc[1].end_loop.common.type = OPAL_DATATYPE_END_LOOP; datatype->desc.desc[1].end_loop.items = 1; datatype->desc.desc[1].end_loop.first_elem_disp = datatype->desc.desc[0].elem.disp; datatype->desc.desc[1].end_loop.size = datatype->size; } return OPAL_SUCCESS; }
static int mca_bcol_iboffload_alloc_reg_qp_resource(int qp_index, mca_bcol_iboffload_device_t *device) { int length; mca_bcol_iboffload_component_t *cm = &mca_bcol_iboffload_component; ompi_free_list_t *frags_free = &device->frags_free[qp_index]; OBJ_CONSTRUCT(frags_free, ompi_free_list_t); length = cm->qp_infos[qp_index].size; IBOFFLOAD_VERBOSE(10, ("free list len %d\n", length)); if (OMPI_SUCCESS != ompi_free_list_init_ex_new(frags_free, sizeof(mca_bcol_iboffload_frag_t), MCA_IBOFFLOAD_CACHE_LINE_SIZE, OBJ_CLASS(mca_bcol_iboffload_frag_t), length, cm->buffer_alignment, cm->free_list_num, cm->free_list_max, cm->free_list_inc, device->mpool, mca_bcol_iboffload_frag_init, (void *) &cm->qp_infos[qp_index].qp_index)) { IBOFFLOAD_ERROR(("Failed to allocate frags_free")); return OMPI_ERROR; } return OMPI_SUCCESS; }
static int portals4_open(void) { int ret; mca_coll_portals4_component.ni_h = PTL_INVALID_HANDLE; mca_coll_portals4_component.uid = PTL_UID_ANY; mca_coll_portals4_component.pt_idx = -1; mca_coll_portals4_component.finish_pt_idx = -1; mca_coll_portals4_component.eq_h = PTL_INVALID_HANDLE; mca_coll_portals4_component.unex_me_h = PTL_INVALID_HANDLE; mca_coll_portals4_component.finish_me_h = PTL_INVALID_HANDLE; mca_coll_portals4_component.zero_md_h = PTL_INVALID_HANDLE; mca_coll_portals4_component.data_md_h = PTL_INVALID_HANDLE; OBJ_CONSTRUCT(&mca_coll_portals4_component.requests, opal_free_list_t); ret = opal_free_list_init(&mca_coll_portals4_component.requests, sizeof(ompi_coll_portals4_request_t), opal_cache_line_size, OBJ_CLASS(ompi_coll_portals4_request_t), 0, 0, 8, 0, 8, NULL, 0, NULL, NULL, NULL); if (OMPI_SUCCESS != ret) { opal_output_verbose(1, ompi_coll_base_framework.framework_output, "%s:%d: ompi_free_list_init failed: %d\n", __FILE__, __LINE__, ret); return ret; } return OMPI_SUCCESS; }
static void initFreeList(void) { mca_base_var_register("oshmem", "shmem", "java", "eager", "Java buffers eager size", MCA_BASE_VAR_TYPE_INT, NULL, 0, 0, OPAL_INFO_LVL_5, MCA_BASE_VAR_SCOPE_READONLY, &oshmem_shmem_java_eager); OBJ_CONSTRUCT(&shmem_java_buffers, opal_free_list_t); int r = opal_free_list_init(&shmem_java_buffers, sizeof(shmem_java_buffer_t), opal_cache_line_size, OBJ_CLASS(shmem_java_buffer_t), 0, /* payload size */ 0, /* payload align */ 2, /* initial elements to alloc */ -1, /* max elements */ 2, /* num elements per alloc */ NULL, /* mpool */ 0, /* mpool reg flags */ NULL, /* unused0 */ NULL, /* item_init */ NULL /* inem_init context */); if(r != OPAL_SUCCESS) { fprintf(stderr, "Unable to initialize shmem_java_buffers.\n"); exit(1); } }
/* * Initializes the mpool module. */ void mca_mpool_gpusm_module_init(mca_mpool_gpusm_module_t* mpool) { mpool->super.mpool_component = &mca_mpool_gpusm_component.super; mpool->super.mpool_base = NULL; mpool->super.mpool_alloc = NULL; mpool->super.mpool_realloc = NULL; mpool->super.mpool_free = NULL; mpool->super.mpool_register = mca_mpool_gpusm_register; mpool->super.mpool_find = mca_mpool_gpusm_find; mpool->super.mpool_deregister = mca_mpool_gpusm_deregister; mpool->super.mpool_release_memory = NULL; mpool->super.mpool_finalize = mca_mpool_gpusm_finalize; mpool->super.mpool_ft_event = mca_mpool_gpusm_ft_event; mpool->super.rcache = NULL; mpool->super.flags = 0; mpool->resources.reg_data = NULL; mpool->resources.sizeof_reg = sizeof(struct mca_mpool_common_cuda_reg_t); mpool->resources.register_mem = cuda_getmemhandle; mpool->resources.deregister_mem = cuda_ungetmemhandle; OBJ_CONSTRUCT(&mpool->reg_list, ompi_free_list_t); /* Start with 0 entries in the free list since CUDA may not have * been initialized when this free list is created and there is * some CUDA specific activities that need to be done. */ ompi_free_list_init_new(&mpool->reg_list, mpool->resources.sizeof_reg, opal_cache_line_size, OBJ_CLASS(mca_mpool_gpusm_registration_t), 0,opal_cache_line_size, 0, -1, 64, NULL); }
/* * Initializes the mpool module. */ void mca_mpool_rdma_module_init(mca_mpool_rdma_module_t* mpool) { mpool->super.mpool_component = &mca_mpool_rdma_component.super; mpool->super.mpool_base = NULL; /* no base .. */ mpool->super.mpool_alloc = mca_mpool_rdma_alloc; mpool->super.mpool_realloc = mca_mpool_rdma_realloc; mpool->super.mpool_free = mca_mpool_rdma_free; mpool->super.mpool_register = mca_mpool_rdma_register; mpool->super.mpool_find = mca_mpool_rdma_find; mpool->super.mpool_deregister = mca_mpool_rdma_deregister; mpool->super.mpool_release_memory = mca_mpool_rdma_release_memory; mpool->super.mpool_finalize = mca_mpool_rdma_finalize; mpool->super.mpool_ft_event = mca_mpool_rdma_ft_event; mpool->super.rcache = mca_rcache_base_module_create(mca_mpool_rdma_component.rcache_name); mpool->super.flags = MCA_MPOOL_FLAGS_MPI_ALLOC_MEM; OBJ_CONSTRUCT(&mpool->reg_list, ompi_free_list_t); ompi_free_list_init_new(&mpool->reg_list, mpool->resources.sizeof_reg, opal_cache_line_size, OBJ_CLASS(mca_mpool_base_registration_t), 0,opal_cache_line_size, 0, -1, 32, NULL); OBJ_CONSTRUCT(&mpool->mru_list, opal_list_t); OBJ_CONSTRUCT(&mpool->gc_list, opal_list_t); mpool->stat_cache_hit = mpool->stat_cache_miss = mpool->stat_evicted = 0; mpool->stat_cache_found = mpool->stat_cache_notfound = 0; /* Set this here (vs in component.c) because ompi_mpi_leave_pinned* may have been set after MCA params were read (e.g., by the openib btl) */ mca_mpool_rdma_component.leave_pinned = (int) (1 == ompi_mpi_leave_pinned || ompi_mpi_leave_pinned_pipeline); }
/* * Initializes the mpool module. */ void mca_mpool_rdma_module_init(mca_mpool_rdma_module_t* mpool) { mpool->super.mpool_component = &mca_mpool_rdma_component.super; mpool->super.mpool_base = NULL; /* no base .. */ mpool->super.mpool_alloc = mca_mpool_rdma_alloc; mpool->super.mpool_realloc = mca_mpool_rdma_realloc; mpool->super.mpool_free = mca_mpool_rdma_free; mpool->super.mpool_register = mca_mpool_rdma_register; mpool->super.mpool_find = mca_mpool_rdma_find; mpool->super.mpool_deregister = mca_mpool_rdma_deregister; mpool->super.mpool_release_memory = mca_mpool_rdma_release_memory; if(mca_mpool_rdma_component.print_stats == true) mpool->super.mpool_finalize = mca_mpool_rdma_finalize; else mpool->super.mpool_finalize = NULL; mpool->super.rcache = mca_rcache_base_module_create(mca_mpool_rdma_component.rcache_name); mpool->super.flags = MCA_MPOOL_FLAGS_MPI_ALLOC_MEM; OBJ_CONSTRUCT(&mpool->reg_list, ompi_free_list_t); ompi_free_list_init(&mpool->reg_list, mpool->resources.sizeof_reg, OBJ_CLASS(mca_mpool_base_registration_t), 0, -1, 32, NULL); OBJ_CONSTRUCT(&mpool->mru_list, opal_list_t); mpool->stat_cache_hit = mpool->stat_cache_miss = mpool->stat_evicted = 0; mpool->stat_cache_found = mpool->stat_cache_notfound = 0; }
static int vader_btl_first_time_init(mca_btl_vader_t *vader_btl, int n) { mca_btl_vader_component_t *component = &mca_btl_vader_component; int rc; /* generate the endpoints */ component->endpoints = (struct mca_btl_base_endpoint_t *) calloc (n, sizeof (struct mca_btl_base_endpoint_t)); component->segment_offset = (n - 1) * MCA_BTL_VADER_FBOX_PEER_SIZE + 4096; /* initialize fragment descriptor free lists */ /* initialize free list for put/get/single copy/inline fragments */ rc = ompi_free_list_init_ex_new(&component->vader_frags_user, sizeof(mca_btl_vader_frag_t), opal_cache_line_size, OBJ_CLASS(mca_btl_vader_frag_t), 0, opal_cache_line_size, component->vader_free_list_num, component->vader_free_list_max, component->vader_free_list_inc, NULL, mca_btl_vader_frag_init, (void *) (sizeof(mca_btl_vader_hdr_t) + mca_btl_vader_component.max_inline_send)); if (OMPI_SUCCESS != rc) { return rc; } /* initialize free list for buffered send fragments */ rc = ompi_free_list_init_ex_new(&component->vader_frags_eager, sizeof (mca_btl_vader_frag_t), opal_cache_line_size, OBJ_CLASS(mca_btl_vader_frag_t), 0, opal_cache_line_size, component->vader_free_list_num, component->vader_free_list_max, component->vader_free_list_inc, NULL, mca_btl_vader_frag_init, (void *) (sizeof (mca_btl_vader_hdr_t) + mca_btl_vader.super.btl_eager_limit)); if (OMPI_SUCCESS != rc) { return rc; } /* set flag indicating btl has been inited */ vader_btl->btl_inited = true; return OMPI_SUCCESS; }
int mca_spml_yoda_enable(bool enable) { SPML_VERBOSE(50, "*** yoda ENABLED ****"); if (false == enable) { return OSHMEM_SUCCESS; } OBJ_CONSTRUCT(&mca_spml_yoda.lock, opal_mutex_t); /** *If we get here this is the SPML who get selected for the run. We * should get ownership for the put and get requests list, and * initialize them with the size of our own requests. */ ompi_free_list_init_new(&mca_spml_base_put_requests, sizeof(mca_spml_yoda_put_request_t), opal_cache_line_size, OBJ_CLASS(mca_spml_yoda_put_request_t), 0, opal_cache_line_size, mca_spml_yoda.free_list_num, mca_spml_yoda.free_list_max, mca_spml_yoda.free_list_inc, NULL ); ompi_free_list_init_new(&mca_spml_base_get_requests, sizeof(mca_spml_yoda_get_request_t), opal_cache_line_size, OBJ_CLASS(mca_spml_yoda_get_request_t), 0, opal_cache_line_size, mca_spml_yoda.free_list_num, mca_spml_yoda.free_list_max, mca_spml_yoda.free_list_inc, NULL ); mca_spml_yoda.enabled = true; #if OSHMEM_WAIT_COMPLETION_DEBUG == 1 condition_dbg_init(); #endif return OSHMEM_SUCCESS; }
/** * the constructor function. creates the free list to get the nodes from * * @param object the tree that is to be used * * @retval NONE */ static void opal_rb_tree_construct(opal_object_t * object) { opal_rb_tree_t * tree = (opal_rb_tree_t *) object; tree->root_ptr = NULL; OBJ_CONSTRUCT(&(tree->free_list), opal_free_list_t); opal_free_list_init (&(tree->free_list), sizeof(opal_rb_tree_node_t), opal_cache_line_size, OBJ_CLASS(opal_rb_tree_node_t), 0,opal_cache_line_size, 0, -1 , 128, NULL, 0, NULL, NULL, NULL); }
static void initFreeList(void) { OBJ_CONSTRUCT(&ompi_java_buffers, opal_free_list_t); int r = opal_free_list_init(&ompi_java_buffers, sizeof(ompi_java_buffer_t), OBJ_CLASS(ompi_java_buffer_t), 2, -1, 2); if(r != OPAL_SUCCESS) { fprintf(stderr, "Unable to initialize ompi_java_buffers.\n"); exit(1); } }
/* * Setup the freelist of IO requests. This does not need to be * protected with a lock because it's called during MPI_INIT. */ int mca_io_base_request_create_freelist(void) { opal_list_item_t *p; const mca_base_component_t *component; const mca_io_base_component_2_0_0_t *v200; size_t size = 0; int i, init, incr; /* Find the maximum additional number of bytes required by all io components for requests and make that the request size */ for (p = opal_list_get_first(&mca_io_base_components_available); p != opal_list_get_end(&mca_io_base_components_available); p = opal_list_get_next(p)) { component = ((mca_base_component_priority_list_item_t *) p)->super.cli_component; /* Only know how to handle v2.0.0 components for now */ if (component->mca_type_major_version == 2 && component->mca_type_minor_version == 0 && component->mca_type_release_version == 0) { v200 = (mca_io_base_component_2_0_0_t *) component; if (v200->io_request_bytes > size) { size = v200->io_request_bytes; } } } /* Construct and initialized the freelist of IO requests. */ OBJ_CONSTRUCT(&mca_io_base_requests, ompi_free_list_t); mca_io_base_requests_valid = true; i = mca_base_param_find("io", NULL, "base_freelist_initial_size"); mca_base_param_lookup_int(i, &init); i = mca_base_param_find("io", NULL, "base_freelist_increment"); mca_base_param_lookup_int(i, &incr); ompi_free_list_init_new(&mca_io_base_requests, sizeof(mca_io_base_request_t) + size, CACHE_LINE_SIZE, OBJ_CLASS(mca_io_base_request_t), 0,CACHE_LINE_SIZE, init, -1, incr, NULL); /* All done */ return OMPI_SUCCESS; }
int ompi_osc_pt2pt_component_init(bool enable_progress_threads, bool enable_mpi_threads) { size_t aligned_size; OBJ_CONSTRUCT(&mca_osc_pt2pt_component.p2p_c_sendreqs, opal_free_list_t); opal_free_list_init(&mca_osc_pt2pt_component.p2p_c_sendreqs, sizeof(ompi_osc_pt2pt_sendreq_t), OBJ_CLASS(ompi_osc_pt2pt_sendreq_t), 1, -1, 1); OBJ_CONSTRUCT(&mca_osc_pt2pt_component.p2p_c_replyreqs, opal_free_list_t); opal_free_list_init(&mca_osc_pt2pt_component.p2p_c_replyreqs, sizeof(ompi_osc_pt2pt_replyreq_t), OBJ_CLASS(ompi_osc_pt2pt_replyreq_t), 1, -1, 1); OBJ_CONSTRUCT(&mca_osc_pt2pt_component.p2p_c_longreqs, opal_free_list_t); opal_free_list_init(&mca_osc_pt2pt_component.p2p_c_longreqs, sizeof(ompi_osc_pt2pt_longreq_t), OBJ_CLASS(ompi_osc_pt2pt_longreq_t), 1, -1, 1); /* adjust size to be multiple of ompi_ptr_t to avoid alignment issues*/ aligned_size = sizeof(ompi_osc_pt2pt_buffer_t) + (sizeof(ompi_osc_pt2pt_buffer_t) % sizeof(ompi_ptr_t)) + mca_osc_pt2pt_component.p2p_c_eager_size; OBJ_CONSTRUCT(&mca_osc_pt2pt_component.p2p_c_buffers, opal_free_list_t); opal_free_list_init(&mca_osc_pt2pt_component.p2p_c_buffers, aligned_size, OBJ_CLASS(ompi_osc_pt2pt_buffer_t), 1, -1, 1); return OMPI_SUCCESS; }
static void ompi_free_list_construct(ompi_free_list_t* fl) { OBJ_CONSTRUCT(&fl->fl_lock, opal_mutex_t); OBJ_CONSTRUCT(&fl->fl_condition, opal_condition_t); fl->fl_max_to_alloc = 0; fl->fl_num_allocated = 0; fl->fl_num_per_alloc = 0; fl->fl_num_waiting = 0; fl->fl_elem_size = sizeof(ompi_free_list_item_t); fl->fl_elem_class = OBJ_CLASS(ompi_free_list_item_t); fl->fl_header_space = 0; fl->fl_alignment = 0; fl->fl_mpool = 0; OBJ_CONSTRUCT(&(fl->fl_allocations), opal_list_t); }
/* * initialize the rb tree */ int mca_mpool_base_tree_init(void) { int rc; OBJ_CONSTRUCT(&mca_mpool_base_tree, opal_rb_tree_t); OBJ_CONSTRUCT(&mca_mpool_base_tree_item_free_list, opal_free_list_t); OBJ_CONSTRUCT(&tree_lock, opal_mutex_t); rc = opal_free_list_init (&mca_mpool_base_tree_item_free_list, sizeof(mca_mpool_base_tree_item_t), opal_cache_line_size, OBJ_CLASS(mca_mpool_base_tree_item_t), 0,opal_cache_line_size, 0, -1 , 4, NULL, 0, NULL, NULL, NULL); if(OPAL_SUCCESS == rc) { rc = opal_rb_tree_init(&mca_mpool_base_tree, mca_mpool_base_tree_node_compare); } return rc; }
/* * initialize the rb tree */ int mca_mpool_base_tree_init(void) { int rc; OBJ_CONSTRUCT(&mca_mpool_base_tree, ompi_rb_tree_t); OBJ_CONSTRUCT(&mca_mpool_base_tree_item_free_list, ompi_free_list_t); OBJ_CONSTRUCT(&tree_lock, opal_mutex_t); rc = ompi_free_list_init_new(&mca_mpool_base_tree_item_free_list, sizeof(mca_mpool_base_tree_item_t), CACHE_LINE_SIZE, OBJ_CLASS(mca_mpool_base_tree_item_t), 0,CACHE_LINE_SIZE, 0, -1 , 4, NULL); if(OMPI_SUCCESS == rc) { rc = ompi_rb_tree_init(&mca_mpool_base_tree, mca_mpool_base_tree_node_compare); } return rc; }
mca_pml_base_module_t* mca_pml_crcpw_component_init(int* priority, bool enable_progress_threads, bool enable_mpi_threads) { /* We use the PML_SELECT_WRAPPER_PRIORITY to indicate when this * component should wrap around what is already selected * If it is not set to this seminal value, then we are doing a * normal selection operation */ if(*priority == PML_SELECT_WRAPPER_PRIORITY ) { opal_output_verbose( 20, mca_pml_crcpw_component.output_handle, "pml:crcpw: component_init: Wrap the selected component %s", mca_pml_base_selected_component.pmlm_version.mca_component_name); mca_pml_crcpw_module.wrapped_pml_component = mca_pml_base_selected_component; mca_pml_crcpw_module.wrapped_pml_module = mca_pml; mca_pml_crcpw_component.pml_crcp_wrapped = true; opal_output_verbose( 20, mca_pml_crcpw_component.output_handle, "pml:crcpw: component_init: Initalize Wrapper"); OBJ_CONSTRUCT(&pml_state_list, ompi_free_list_t); ompi_free_list_init_new( &pml_state_list, sizeof(ompi_crcp_base_pml_state_t), opal_cache_line_size, OBJ_CLASS(ompi_crcp_base_pml_state_t), 0,opal_cache_line_size, 5, /* Initial number */ -1, /* Max = Unlimited */ 64, /* Increment by */ NULL); } else { opal_output_verbose( 20, mca_pml_crcpw_component.output_handle, "pml:crcpw: component_init: Priority %d", mca_pml_crcpw_component.priority); } *priority = mca_pml_crcpw_component.priority; pml_crcpw_is_finalized = false; return &mca_pml_crcpw_module.super; }
static void opal_free_list_construct(opal_free_list_t* fl) { OBJ_CONSTRUCT(&fl->fl_lock, opal_mutex_t); OBJ_CONSTRUCT(&fl->fl_condition, opal_condition_t); fl->fl_max_to_alloc = 0; fl->fl_num_allocated = 0; fl->fl_num_per_alloc = 0; fl->fl_num_waiting = 0; fl->fl_frag_size = sizeof(opal_free_list_item_t); fl->fl_frag_alignment = 0; fl->fl_payload_buffer_size = 0; fl->fl_payload_buffer_alignment = 0; fl->fl_frag_class = OBJ_CLASS(opal_free_list_item_t); fl->fl_mpool = NULL; /* default flags */ fl->fl_mpool_reg_flags = MCA_MPOOL_FLAGS_CACHE_BYPASS | MCA_MPOOL_FLAGS_CUDA_REGISTER_MEM; fl->ctx = NULL; OBJ_CONSTRUCT(&(fl->fl_allocations), opal_list_t); }
/* * Initializes the rcache module. */ void mca_rcache_rgpusm_module_init(mca_rcache_rgpusm_module_t* rcache) { rcache->super.rcache_component = &mca_rcache_rgpusm_component.super; rcache->super.rcache_register = mca_rcache_rgpusm_register; rcache->super.rcache_find = mca_rcache_rgpusm_find; rcache->super.rcache_deregister = mca_rcache_rgpusm_deregister; rcache->super.rcache_finalize = mca_rcache_rgpusm_finalize; rcache->vma_module = mca_rcache_base_vma_module_alloc (); OBJ_CONSTRUCT(&rcache->reg_list, opal_free_list_t); opal_free_list_init (&rcache->reg_list, sizeof(struct mca_rcache_common_cuda_reg_t), opal_cache_line_size, OBJ_CLASS(mca_rcache_base_registration_t), 0,opal_cache_line_size, 0, -1, 32, NULL, 0, NULL, NULL, NULL); OBJ_CONSTRUCT(&rcache->lru_list, opal_list_t); rcache->stat_cache_hit = rcache->stat_cache_miss = rcache->stat_evicted = 0; rcache->stat_cache_found = rcache->stat_cache_notfound = 0; rcache->stat_cache_valid = rcache->stat_cache_invalid = 0; }
void hcoll_rte_fns_setup(void) { init_module_fns(); OBJ_CONSTRUCT(&mca_coll_hcoll_component.requests, ompi_free_list_t); ompi_free_list_init_ex_new( &(mca_coll_hcoll_component.requests), sizeof(ompi_request_t), /* no special alignment needed */ 8, OBJ_CLASS(ompi_request_t), /* no payload data */ 0, 0, /* NOTE: hack - need to parametrize this */ 10, 50, 10, /* No Mpool */ NULL, NULL, NULL ); }
int ofi_comp_list_init(opal_free_list_t *comp_list) { int rc; OBJ_CONSTRUCT(comp_list, opal_free_list_t); rc = opal_free_list_init(comp_list, sizeof(mca_btl_ofi_completion_t), opal_cache_line_size, OBJ_CLASS(mca_btl_ofi_completion_t), 0, 0, 128, -1, 128, NULL, 0, NULL, NULL, NULL); if (rc != OPAL_SUCCESS) { BTL_VERBOSE(("cannot allocate completion freelist")); } return rc; }
/* * Initializes the rcache module. */ void mca_rcache_grdma_module_init(mca_rcache_grdma_module_t* rcache, mca_rcache_grdma_cache_t *cache) { OBJ_RETAIN(cache); rcache->cache = cache; rcache->super.rcache_component = &mca_rcache_grdma_component.super; rcache->super.rcache_register = mca_rcache_grdma_register; rcache->super.rcache_find = mca_rcache_grdma_find; rcache->super.rcache_deregister = mca_rcache_grdma_deregister; rcache->super.rcache_invalidate_range = mca_rcache_grdma_invalidate_range; rcache->super.rcache_finalize = mca_rcache_grdma_finalize; rcache->super.rcache_evict = mca_rcache_grdma_evict; rcache->stat_cache_hit = rcache->stat_cache_miss = rcache->stat_evicted = 0; rcache->stat_cache_found = rcache->stat_cache_notfound = 0; OBJ_CONSTRUCT(&rcache->reg_list, opal_free_list_t); opal_free_list_init (&rcache->reg_list, rcache->resources.sizeof_reg, opal_cache_line_size, OBJ_CLASS(mca_rcache_base_registration_t), 0, opal_cache_line_size, 0, -1, 32, NULL, 0, NULL, NULL, NULL); }
/** VPROTOCOL level functions (same as PML one) */ static mca_vprotocol_base_module_t *mca_vprotocol_pessimist_component_init( int* priority, bool enable_progress_threads, bool enable_mpi_threads) { V_OUTPUT_VERBOSE(500, "vprotocol_pessimist: component_init"); *priority = _priority; /* sanity check */ if(enable_mpi_threads) { opal_output(0, "vprotocol_pessimist: component_init: threads are enabled, and not supported by vprotocol pessimist fault tolerant layer, will not load"); return NULL; } mca_vprotocol_pessimist.clock = 1; mca_vprotocol_pessimist.replay = false; OBJ_CONSTRUCT(&mca_vprotocol_pessimist.replay_events, opal_list_t); OBJ_CONSTRUCT(&mca_vprotocol_pessimist.pending_events, opal_list_t); OBJ_CONSTRUCT(&mca_vprotocol_pessimist.events_pool, opal_free_list_t); opal_free_list_init (&mca_vprotocol_pessimist.events_pool, sizeof(mca_vprotocol_pessimist_event_t), opal_cache_line_size, OBJ_CLASS(mca_vprotocol_pessimist_event_t), 0,opal_cache_line_size, _free_list_num, _free_list_max, _free_list_inc, NULL, 0, NULL, NULL, NULL); mca_vprotocol_pessimist.event_buffer_max_length = _event_buffer_size / sizeof(vprotocol_pessimist_mem_event_t); mca_vprotocol_pessimist.event_buffer_length = 0; mca_vprotocol_pessimist.event_buffer = (vprotocol_pessimist_mem_event_t *) malloc(_event_buffer_size); mca_vprotocol_pessimist.el_comm = MPI_COMM_NULL; return &mca_vprotocol_pessimist.super; }