/* ////////////////////////////////////////////////////////////////////////// */ static mca_common_sm_module_t * attach_and_init(opal_shmem_ds_t *shmem_bufp, size_t size, size_t size_ctl_structure, size_t data_seg_alignment, bool first_call) { mca_common_sm_module_t *map = NULL; mca_common_sm_seg_header_t *seg = NULL; unsigned char *addr = NULL; /* attach to the specified segment. note that at this point, the contents of * *shmem_bufp have already been initialized via opal_shmem_segment_create. */ if (NULL == (seg = (mca_common_sm_seg_header_t *) opal_shmem_segment_attach(shmem_bufp))) { return NULL; } opal_atomic_rmb(); if (NULL == (map = OBJ_NEW(mca_common_sm_module_t))) { OPAL_ERROR_LOG(OPAL_ERR_OUT_OF_RESOURCE); (void)opal_shmem_segment_detach(shmem_bufp); return NULL; } /* copy meta information into common sm module * from ====> to */ if (OPAL_SUCCESS != opal_shmem_ds_copy(shmem_bufp, &map->shmem_ds)) { (void)opal_shmem_segment_detach(shmem_bufp); free(map); return NULL; } /* the first entry in the file is the control structure. the first * entry in the control structure is an mca_common_sm_seg_header_t * element. */ map->module_seg = seg; addr = ((unsigned char *)seg) + size_ctl_structure; /* if we have a data segment (i.e., if 0 != data_seg_alignment), * then make it the first aligned address after the control * structure. IF THIS HAPPENS, THIS IS A PROGRAMMING ERROR IN * OPEN MPI! */ if (0 != data_seg_alignment) { addr = OPAL_ALIGN_PTR(addr, data_seg_alignment, unsigned char *); /* is addr past end of the shared memory segment? */ if ((unsigned char *)seg + shmem_bufp->seg_size < addr) { opal_show_help("help-mpi-common-sm.txt", "mmap too small", 1, opal_proc_local_get()->proc_hostname, (unsigned long)shmem_bufp->seg_size, (unsigned long)size_ctl_structure, (unsigned long)data_seg_alignment); (void)opal_shmem_segment_detach(shmem_bufp); free(map); return NULL; } }
static int vader_finalize(struct mca_btl_base_module_t *btl) { mca_btl_vader_component_t *component = &mca_btl_vader_component; mca_btl_vader_t *vader_btl = (mca_btl_vader_t *) btl; if (!vader_btl->btl_inited) { return OPAL_SUCCESS; } for (int i = 0 ; i < 1 + MCA_BTL_VADER_NUM_LOCAL_PEERS ; ++i) { fini_vader_endpoint (component->endpoints + i); } free (component->endpoints); component->endpoints = NULL; vader_btl->btl_inited = false; free (component->fbox_in_endpoints); component->fbox_in_endpoints = NULL; if (MCA_BTL_VADER_XPMEM != mca_btl_vader_component.single_copy_mechanism) { opal_shmem_unlink (&mca_btl_vader_component.seg_ds); opal_shmem_segment_detach (&mca_btl_vader_component.seg_ds); } return OPAL_SUCCESS; }
static void mca_btl_vader_endpoint_destructor (mca_btl_vader_endpoint_t *ep) { OBJ_DESTRUCT(&ep->pending_frags); #if OPAL_BTL_VADER_HAVE_XPMEM if (MCA_BTL_VADER_XPMEM == mca_btl_vader_component.single_copy_mechanism) { if (ep->segment_data.xpmem.rcache) { /* clean out the registration cache */ const int nregs = 100; mca_mpool_base_registration_t *regs[nregs]; int reg_cnt; do { reg_cnt = ep->segment_data.xpmem.rcache->rcache_find_all(ep->segment_data.xpmem.rcache, 0, (size_t)-1, regs, nregs); for (int i = 0 ; i < reg_cnt ; ++i) { /* otherwise dereg will fail on assert */ regs[i]->ref_count = 0; OBJ_RELEASE(regs[i]); } } while (reg_cnt == nregs); ep->segment_data.xpmem.rcache = NULL; } if (ep->segment_base) { xpmem_release (ep->segment_data.xpmem.apid); ep->segment_data.xpmem.apid = 0; } } else #endif if (ep->segment_data.other.seg_ds) { opal_shmem_ds_t seg_ds; /* opal_shmem_segment_detach expects a opal_shmem_ds_t and will * stomp past the end of the seg_ds if it is too small (which * ep->seg_ds probably is) */ memcpy (&seg_ds, ep->segment_data.other.seg_ds, opal_shmem_sizeof_shmem_ds (ep->segment_data.other.seg_ds)); free (ep->segment_data.other.seg_ds); ep->segment_data.other.seg_ds = NULL; /* disconnect from the peer's segment */ opal_shmem_segment_detach (&seg_ds); } ep->fbox_in.buffer = ep->fbox_out.buffer = NULL; ep->segment_base = NULL; ep->fifo = NULL; }