Пример #1
0
int oshmem_proc_finalize(void)
{
    opal_list_item_t *item;

    /* Destroy all groups */
    oshmem_proc_group_finalize();

    /* remove all items from list and destroy them. Since we cannot know
     * the reference count of the procs for certain, it is possible that
     * a single OBJ_RELEASE won't drive the count to zero, and hence will
     * not release the memory. Accordingly, we cycle through the list here,
     * calling release on each item.
     *
     * This will cycle until it forces the reference count of each item
     * to zero, thus causing the destructor to run - which will remove
     * the item from the list!
     *
     * We cannot do this under the thread lock as the destructor will
     * call it when removing the item from the list. However, this function
     * is ONLY called from MPI_Finalize, and all threads are prohibited from
     * calling an MPI function once ANY thread has called MPI_Finalize. Of
     * course, multiple threads are allowed to call MPI_Finalize, so this
     * function may get called multiple times by various threads. We believe
     * it is thread safe to do so...though it may not -appear- to be so
     * without walking through the entire list/destructor sequence.
     */
    while (opal_list_get_end(&oshmem_proc_list)
            != (item = opal_list_get_first(&oshmem_proc_list))) {
        OBJ_RELEASE(item);
    }
    OBJ_RELEASE( oshmem_shmem_local_convertor);
    /* now destruct the list and thread lock */
    OBJ_DESTRUCT(&oshmem_proc_list);
    OBJ_DESTRUCT(&oshmem_proc_lock);

    return OSHMEM_SUCCESS;
}
Пример #2
0
static int _shmem_finalize(void)
{
    int ret = OSHMEM_SUCCESS;

    shmem_barrier_all();

    shmem_lock_finalize();

    /* Finalize preconnect framework */
    if (OSHMEM_SUCCESS != (ret = oshmem_shmem_preconnect_all_finalize())) {
        return ret;
    }

    /* free requests */
    if (OSHMEM_SUCCESS != (ret = oshmem_request_finalize())) {
        return ret;
    }
    /* must free cached groups before we kill collectives */
    if (OSHMEM_SUCCESS != (ret = oshmem_group_cache_list_free())) {
        return ret;
    }
    /* We need to call mca_scoll_base_group_unselect explicitly for each group
     * that are not freed by oshmem_group_cache_list_free. We can only release its collectives at this point */
    mca_scoll_base_group_unselect(oshmem_group_all);
    mca_scoll_base_group_unselect(oshmem_group_self);

    /* Close down MCA modules */

    if (OSHMEM_SUCCESS != (ret = mca_base_framework_close(&oshmem_atomic_base_framework) ) ) {
        return ret;
    }

    if (OSHMEM_SUCCESS != (ret = mca_base_framework_close(&oshmem_scoll_base_framework) ) ) {
        return ret;
    }

    if (OSHMEM_SUCCESS != (ret = mca_base_framework_close(&oshmem_memheap_base_framework) ) ) {
        return ret;
    }

    if (OSHMEM_SUCCESS != (ret = mca_base_framework_close(&oshmem_sshmem_base_framework) ) ) {
        return ret;
    }

    if (OSHMEM_SUCCESS
            != (ret =
                    MCA_SPML_CALL(del_procs(oshmem_group_all->proc_array, oshmem_group_all->proc_count)))) {
        return ret;
    }

    oshmem_shmem_barrier();

    /* free spml resource */
    if (OSHMEM_SUCCESS != (ret = mca_spml_base_finalize())) {
        return ret;
    }

    if (OSHMEM_SUCCESS != (ret = mca_base_framework_close(&oshmem_spml_base_framework) ) ) {
        return ret;
    }

    /* free op resources */
    if (OSHMEM_SUCCESS != (ret = oshmem_op_finalize())) {
        return ret;
    }

    /* free proc_group resources */
    if (OSHMEM_SUCCESS != (ret = oshmem_proc_group_finalize())) {
        return ret;
    }

    /* free proc resources */
    if (OSHMEM_SUCCESS != (ret = oshmem_proc_finalize())) {
        return ret;
    }

    /* free info resources */
    if (OSHMEM_SUCCESS != (ret = oshmem_info_finalize())) {
        return ret;
    }

    return ret;
}