/* * Front-end function to delete all the attributes on an MPI object */ int ompi_attr_delete_all(ompi_attribute_type_t type, void *object, opal_hash_table_t *attr_hash) { int ret, i, num_attrs; uint32_t key; void *node, *in_node, *attr; attribute_value_t **attrs; /* Ensure that the table is not empty */ if (NULL == attr_hash) { return MPI_SUCCESS; } OPAL_THREAD_LOCK(&attribute_lock); /* Make an array that contains all attributes in local object's hash */ num_attrs = opal_hash_table_get_size(attr_hash); if (0 == num_attrs) { OPAL_THREAD_UNLOCK(&attribute_lock); return MPI_SUCCESS; } attrs = malloc(sizeof(attribute_value_t *) * num_attrs); if (NULL == attrs) { OPAL_THREAD_UNLOCK(&attribute_lock); return OMPI_ERR_OUT_OF_RESOURCE; } ret = opal_hash_table_get_first_key_uint32(attr_hash, &key, &attr, &node); for (i = 0; OMPI_SUCCESS == ret; i++) { attrs[i] = attr; in_node = node; ret = opal_hash_table_get_next_key_uint32(attr_hash, &key, &attr, in_node, &node); } /* Sort attributes in the order that they were set */ qsort(attrs, num_attrs, sizeof(attribute_value_t *), compare_attr_sequence); /* Delete attributes in the reverse order that they were set. Actually this ordering is required only for MPI_COMM_SELF, as specified in MPI-2.2: 8.7.1 Allowing User Functions at Process Termination, but we do it for everything -- what the heck. :-) */ for (i = num_attrs - 1; i >= 0; i--) { ret = ompi_attr_delete_impl(type, object, attr_hash, attrs[i]->av_key, true); if (OMPI_SUCCESS != ret) { break; } } /* All done */ free(attrs); opal_atomic_wmb(); OPAL_THREAD_UNLOCK(&attribute_lock); return ret; }
/** * Function to pack all the entries in the SOS table and send it * over to the HNP. * * @return OPAL_SUCCESS Upon success * @return OPAL_FAILURE Upon failure * * ADK: Presently, we simply rely on orte_show_help to do the aggregation on * a per-error basis. */ static int opal_sos_send_table(void) { opal_sos_error_t *opal_error; opal_buffer_t *buf; uint32_t key; int rc; size_t table_size; void *prev_error, *next_error; next_error = NULL; buf = OBJ_NEW(opal_buffer_t); if (NULL == buf) { return ORTE_ERR_OUT_OF_RESOURCE; } OPAL_THREAD_LOCK(&opal_sos_table_lock); table_size = opal_hash_table_get_size(&opal_sos_table); /* Pack the size of the SOS error table */ if (ORTE_SUCCESS != (rc = opal_dss.pack(buf, &table_size, 1, OPAL_SIZE))) { ORTE_ERROR_LOG(rc); goto error; } if (OPAL_SUCCESS != opal_hash_table_get_first_key_uint32(&opal_sos_table, &key, (void**)&opal_error, &prev_error)) { rc = ORTE_ERROR; goto error; } /* Pack the sos error object */ if (ORTE_SUCCESS != (rc = opal_dss_pack_sos_error(buf, opal_error))) { ORTE_ERROR_LOG(rc); goto error; } while (OPAL_SUCCESS == opal_hash_table_get_next_key_uint32(&opal_sos_table, &key, (void**)&opal_error, &prev_error, &next_error)) { if (ORTE_SUCCESS != (rc = opal_dss_pack_sos_error(buf, opal_error))) { ORTE_ERROR_LOG(rc); goto error; } } OPAL_THREAD_UNLOCK(&opal_sos_table_lock); /* Now send the buffer (rc = number of bytes sent) */ rc = orte_rml.send_buffer(ORTE_PROC_MY_HNP, buf, ORTE_RML_TAG_NOTIFIER_HNP, 0); if (rc <= 0) { ORTE_ERROR_LOG(rc); OBJ_RELEASE(buf); return rc; } return ORTE_SUCCESS; error: OPAL_THREAD_UNLOCK(&opal_sos_table_lock); OBJ_RELEASE(buf); return rc; }
int ompi_osc_rdma_module_free(ompi_win_t *win) { int ret = OMPI_SUCCESS; int tmp, i; ompi_osc_rdma_module_t *module = GET_MODULE(win); opal_output_verbose(1, ompi_osc_base_output, "rdma component destroying window with id %d", ompi_comm_get_cid(module->m_comm)); /* finish with a barrier */ if (ompi_group_size(win->w_group) > 1) { ret = module->m_comm->c_coll.coll_barrier(module->m_comm, module->m_comm->c_coll.coll_barrier_module); } /* remove from component information */ OPAL_THREAD_LOCK(&mca_osc_rdma_component.c_lock); tmp = opal_hash_table_remove_value_uint32(&mca_osc_rdma_component.c_modules, ompi_comm_get_cid(module->m_comm)); /* only take the output of hast_table_remove if there wasn't already an error */ ret = (ret != OMPI_SUCCESS) ? ret : tmp; if (0 == opal_hash_table_get_size(&mca_osc_rdma_component.c_modules)) { #if OPAL_ENABLE_PROGRESS_THREADS void *foo; mca_osc_rdma_component.c_thread_run = false; opal_condition_broadcast(&ompi_request_cond); opal_thread_join(&mca_osc_rdma_component.c_thread, &foo); #else opal_progress_unregister(ompi_osc_rdma_component_progress); #endif } OPAL_THREAD_UNLOCK(&mca_osc_rdma_component.c_lock); win->w_osc_module = NULL; OBJ_DESTRUCT(&module->m_unlocks_pending); OBJ_DESTRUCT(&module->m_locks_pending); OBJ_DESTRUCT(&module->m_queued_sendreqs); OBJ_DESTRUCT(&module->m_copy_pending_sendreqs); OBJ_DESTRUCT(&module->m_pending_sendreqs); OBJ_DESTRUCT(&module->m_acc_lock); OBJ_DESTRUCT(&module->m_cond); OBJ_DESTRUCT(&module->m_lock); if (NULL != module->m_sc_remote_ranks) { free(module->m_sc_remote_ranks); } if (NULL != module->m_sc_remote_active_ranks) { free(module->m_sc_remote_active_ranks); } if (NULL != module->m_pending_buffers) { free(module->m_pending_buffers); } if (NULL != module->m_fence_coll_counts) { free(module->m_fence_coll_counts); } if (NULL != module->m_copy_num_pending_sendreqs) { free(module->m_copy_num_pending_sendreqs); } if (NULL != module->m_num_pending_sendreqs) { free(module->m_num_pending_sendreqs); } if (NULL != module->m_peer_info) { for (i = 0 ; i < ompi_comm_size(module->m_comm) ; ++i) { ompi_osc_rdma_peer_info_free(&module->m_peer_info[i]); } free(module->m_peer_info); } if (NULL != module->m_comm) ompi_comm_free(&module->m_comm); if (NULL != module) free(module); return ret; }